savoji commited on
Commit
c139af0
·
verified ·
1 Parent(s): 6788dfb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_base.yaml +79 -0
  2. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_base.yaml +42 -0
  3. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_nerf_blender.yaml +56 -0
  4. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_base.yaml +80 -0
  5. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_normed.yaml +18 -0
  6. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_transformer.yaml +18 -0
  7. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_unnormed.yaml +19 -0
  8. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_base.yaml +38 -0
  9. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_co3dv2_base.yaml +8 -0
  10. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_idr_ad.yaml +65 -0
  11. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_ad.yaml +12 -0
  12. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_wce.yaml +12 -0
  13. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer.yaml +18 -0
  14. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer_angle_w.yaml +7 -0
  15. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet.yaml +35 -0
  16. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet_noharm.yaml +11 -0
  17. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce.yaml +31 -0
  18. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce_noharm.yaml +11 -0
  19. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerf_wce.yaml +4 -0
  20. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerformer.yaml +4 -0
  21. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_ad_hypernet.yaml +4 -0
  22. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_wce.yaml +4 -0
  23. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_base.yaml +41 -0
  24. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_co3dv2_base.yaml +8 -0
  25. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_idr.yaml +57 -0
  26. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf.yaml +3 -0
  27. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml +55 -0
  28. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_wce.yaml +10 -0
  29. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerformer.yaml +18 -0
  30. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn.yaml +29 -0
  31. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_noharm.yaml +11 -0
  32. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce.yaml +30 -0
  33. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce_noharm.yaml +11 -0
  34. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_idr.yaml +4 -0
  35. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerf.yaml +4 -0
  36. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerformer.yaml +4 -0
  37. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_srn_noharm.yaml +4 -0
  38. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_wce_base.yaml +22 -0
  39. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/__init__.py +12 -0
  40. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/compat.py +45 -0
  41. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/datatypes.py +60 -0
  42. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/linear_with_repeat.py +95 -0
  43. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/__init__.py +10 -0
  44. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/symeig3x3.py +319 -0
  45. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/utils.py +33 -0
  46. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query.cu +129 -0
  47. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query.h +93 -0
  48. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query_cpu.cpp +54 -0
  49. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite.cu +233 -0
  50. project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite.h +115 -0
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_base.yaml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - default_config
3
+ - _self_
4
+ exp_dir: ./data/exps/overfit_base/
5
+ training_loop_ImplicitronTrainingLoop_args:
6
+ visdom_port: 8097
7
+ visualize_interval: 0
8
+ max_epochs: 1000
9
+ data_source_ImplicitronDataSource_args:
10
+ data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
11
+ dataset_map_provider_class_type: JsonIndexDatasetMapProvider
12
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
13
+ dataset_length_train: 1000
14
+ dataset_length_val: 1
15
+ num_workers: 8
16
+ dataset_map_provider_JsonIndexDatasetMapProvider_args:
17
+ dataset_root: ${oc.env:CO3D_DATASET_ROOT}
18
+ n_frames_per_sequence: -1
19
+ test_on_train: true
20
+ test_restrict_sequence_id: 0
21
+ dataset_JsonIndexDataset_args:
22
+ load_point_clouds: false
23
+ mask_depths: false
24
+ mask_images: false
25
+ model_factory_ImplicitronModelFactory_args:
26
+ model_class_type: "OverfitModel"
27
+ model_OverfitModel_args:
28
+ loss_weights:
29
+ loss_mask_bce: 1.0
30
+ loss_prev_stage_mask_bce: 1.0
31
+ loss_autodecoder_norm: 0.01
32
+ loss_rgb_mse: 1.0
33
+ loss_prev_stage_rgb_mse: 1.0
34
+ output_rasterized_mc: false
35
+ chunk_size_grid: 102400
36
+ render_image_height: 400
37
+ render_image_width: 400
38
+ share_implicit_function_across_passes: false
39
+ implicit_function_class_type: "NeuralRadianceFieldImplicitFunction"
40
+ implicit_function_NeuralRadianceFieldImplicitFunction_args:
41
+ n_harmonic_functions_xyz: 10
42
+ n_harmonic_functions_dir: 4
43
+ n_hidden_neurons_xyz: 256
44
+ n_hidden_neurons_dir: 128
45
+ n_layers_xyz: 8
46
+ append_xyz:
47
+ - 5
48
+ coarse_implicit_function_class_type: "NeuralRadianceFieldImplicitFunction"
49
+ coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args:
50
+ n_harmonic_functions_xyz: 10
51
+ n_harmonic_functions_dir: 4
52
+ n_hidden_neurons_xyz: 256
53
+ n_hidden_neurons_dir: 128
54
+ n_layers_xyz: 8
55
+ append_xyz:
56
+ - 5
57
+ raysampler_AdaptiveRaySampler_args:
58
+ n_rays_per_image_sampled_from_mask: 1024
59
+ scene_extent: 8.0
60
+ n_pts_per_ray_training: 64
61
+ n_pts_per_ray_evaluation: 64
62
+ stratified_point_sampling_training: true
63
+ stratified_point_sampling_evaluation: false
64
+ renderer_MultiPassEmissionAbsorptionRenderer_args:
65
+ n_pts_per_ray_fine_training: 64
66
+ n_pts_per_ray_fine_evaluation: 64
67
+ append_coarse_samples_to_fine: true
68
+ density_noise_std_train: 1.0
69
+ optimizer_factory_ImplicitronOptimizerFactory_args:
70
+ breed: Adam
71
+ weight_decay: 0.0
72
+ lr_policy: MultiStepLR
73
+ multistep_lr_milestones: []
74
+ lr: 0.0005
75
+ gamma: 0.1
76
+ momentum: 0.9
77
+ betas:
78
+ - 0.9
79
+ - 0.999
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_base.yaml ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - overfit_base
3
+ - _self_
4
+ data_source_ImplicitronDataSource_args:
5
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
6
+ batch_size: 1
7
+ dataset_length_train: 1000
8
+ dataset_length_val: 1
9
+ num_workers: 8
10
+ dataset_map_provider_JsonIndexDatasetMapProvider_args:
11
+ assert_single_seq: true
12
+ n_frames_per_sequence: -1
13
+ test_restrict_sequence_id: 0
14
+ test_on_train: false
15
+ model_factory_ImplicitronModelFactory_args:
16
+ model_class_type: "OverfitModel"
17
+ model_OverfitModel_args:
18
+ render_image_height: 800
19
+ render_image_width: 800
20
+ log_vars:
21
+ - loss_rgb_psnr_fg
22
+ - loss_rgb_psnr
23
+ - loss_eikonal
24
+ - loss_prev_stage_rgb_psnr
25
+ - loss_mask_bce
26
+ - loss_prev_stage_mask_bce
27
+ - loss_rgb_mse
28
+ - loss_prev_stage_rgb_mse
29
+ - loss_depth_abs
30
+ - loss_depth_abs_fg
31
+ - loss_kl
32
+ - loss_mask_neg_iou
33
+ - objective
34
+ - epoch
35
+ - sec/it
36
+ optimizer_factory_ImplicitronOptimizerFactory_args:
37
+ lr: 0.0005
38
+ multistep_lr_milestones:
39
+ - 200
40
+ - 300
41
+ training_loop_ImplicitronTrainingLoop_args:
42
+ max_epochs: 400
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_nerf_blender.yaml ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - overfit_singleseq_base
3
+ - _self_
4
+ exp_dir: "./data/overfit_nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}"
5
+ data_source_ImplicitronDataSource_args:
6
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
7
+ dataset_length_train: 100
8
+ dataset_map_provider_class_type: BlenderDatasetMapProvider
9
+ dataset_map_provider_BlenderDatasetMapProvider_args:
10
+ base_dir: ${oc.env:BLENDER_DATASET_ROOT}/${oc.env:BLENDER_SINGLESEQ_CLASS}
11
+ n_known_frames_for_test: null
12
+ object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS}
13
+ path_manager_factory_class_type: PathManagerFactory
14
+ path_manager_factory_PathManagerFactory_args:
15
+ silence_logs: true
16
+
17
+ model_factory_ImplicitronModelFactory_args:
18
+ model_class_type: "OverfitModel"
19
+ model_OverfitModel_args:
20
+ mask_images: false
21
+ raysampler_class_type: AdaptiveRaySampler
22
+ raysampler_AdaptiveRaySampler_args:
23
+ n_pts_per_ray_training: 64
24
+ n_pts_per_ray_evaluation: 64
25
+ n_rays_per_image_sampled_from_mask: 4096
26
+ stratified_point_sampling_training: true
27
+ stratified_point_sampling_evaluation: false
28
+ scene_extent: 2.0
29
+ scene_center:
30
+ - 0.0
31
+ - 0.0
32
+ - 0.0
33
+ renderer_MultiPassEmissionAbsorptionRenderer_args:
34
+ density_noise_std_train: 0.0
35
+ n_pts_per_ray_fine_training: 128
36
+ n_pts_per_ray_fine_evaluation: 128
37
+ raymarcher_EmissionAbsorptionRaymarcher_args:
38
+ blend_output: false
39
+ loss_weights:
40
+ loss_rgb_mse: 1.0
41
+ loss_prev_stage_rgb_mse: 1.0
42
+ loss_mask_bce: 0.0
43
+ loss_prev_stage_mask_bce: 0.0
44
+ loss_autodecoder_norm: 0.00
45
+
46
+ optimizer_factory_ImplicitronOptimizerFactory_args:
47
+ exponential_lr_step_size: 3001
48
+ lr_policy: LinearExponential
49
+ linear_exponential_lr_milestone: 200
50
+
51
+ training_loop_ImplicitronTrainingLoop_args:
52
+ max_epochs: 6000
53
+ metric_print_interval: 10
54
+ store_checkpoints_purge: 3
55
+ test_when_finished: true
56
+ validation_interval: 100
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_base.yaml ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - default_config
3
+ - _self_
4
+ exp_dir: ./data/exps/base/
5
+ training_loop_ImplicitronTrainingLoop_args:
6
+ visdom_port: 8097
7
+ visualize_interval: 0
8
+ max_epochs: 1000
9
+ data_source_ImplicitronDataSource_args:
10
+ data_loader_map_provider_class_type: SequenceDataLoaderMapProvider
11
+ dataset_map_provider_class_type: JsonIndexDatasetMapProvider
12
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
13
+ dataset_length_train: 1000
14
+ dataset_length_val: 1
15
+ num_workers: 8
16
+ dataset_map_provider_JsonIndexDatasetMapProvider_args:
17
+ dataset_root: ${oc.env:CO3D_DATASET_ROOT}
18
+ n_frames_per_sequence: -1
19
+ test_on_train: true
20
+ test_restrict_sequence_id: 0
21
+ dataset_JsonIndexDataset_args:
22
+ load_point_clouds: false
23
+ mask_depths: false
24
+ mask_images: false
25
+ model_factory_ImplicitronModelFactory_args:
26
+ model_GenericModel_args:
27
+ loss_weights:
28
+ loss_mask_bce: 1.0
29
+ loss_prev_stage_mask_bce: 1.0
30
+ loss_autodecoder_norm: 0.01
31
+ loss_rgb_mse: 1.0
32
+ loss_prev_stage_rgb_mse: 1.0
33
+ output_rasterized_mc: false
34
+ chunk_size_grid: 102400
35
+ render_image_height: 400
36
+ render_image_width: 400
37
+ num_passes: 2
38
+ implicit_function_NeuralRadianceFieldImplicitFunction_args:
39
+ n_harmonic_functions_xyz: 10
40
+ n_harmonic_functions_dir: 4
41
+ n_hidden_neurons_xyz: 256
42
+ n_hidden_neurons_dir: 128
43
+ n_layers_xyz: 8
44
+ append_xyz:
45
+ - 5
46
+ raysampler_AdaptiveRaySampler_args:
47
+ n_rays_per_image_sampled_from_mask: 1024
48
+ scene_extent: 8.0
49
+ n_pts_per_ray_training: 64
50
+ n_pts_per_ray_evaluation: 64
51
+ stratified_point_sampling_training: true
52
+ stratified_point_sampling_evaluation: false
53
+ renderer_MultiPassEmissionAbsorptionRenderer_args:
54
+ n_pts_per_ray_fine_training: 64
55
+ n_pts_per_ray_fine_evaluation: 64
56
+ append_coarse_samples_to_fine: true
57
+ density_noise_std_train: 1.0
58
+ view_pooler_args:
59
+ view_sampler_args:
60
+ masked_sampling: false
61
+ image_feature_extractor_ResNetFeatureExtractor_args:
62
+ stages:
63
+ - 1
64
+ - 2
65
+ - 3
66
+ - 4
67
+ proj_dim: 16
68
+ image_rescale: 0.32
69
+ first_max_pool: false
70
+ optimizer_factory_ImplicitronOptimizerFactory_args:
71
+ breed: Adam
72
+ weight_decay: 0.0
73
+ lr_policy: MultiStepLR
74
+ multistep_lr_milestones: []
75
+ lr: 0.0005
76
+ gamma: 0.1
77
+ momentum: 0.9
78
+ betas:
79
+ - 0.9
80
+ - 0.999
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_normed.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_factory_ImplicitronModelFactory_args:
2
+ model_GenericModel_args:
3
+ image_feature_extractor_class_type: ResNetFeatureExtractor
4
+ image_feature_extractor_ResNetFeatureExtractor_args:
5
+ add_images: true
6
+ add_masks: true
7
+ first_max_pool: true
8
+ image_rescale: 0.375
9
+ l2_norm: true
10
+ name: resnet34
11
+ normalize_image: true
12
+ pretrained: true
13
+ stages:
14
+ - 1
15
+ - 2
16
+ - 3
17
+ - 4
18
+ proj_dim: 32
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_transformer.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_factory_ImplicitronModelFactory_args:
2
+ model_GenericModel_args:
3
+ image_feature_extractor_class_type: ResNetFeatureExtractor
4
+ image_feature_extractor_ResNetFeatureExtractor_args:
5
+ add_images: true
6
+ add_masks: true
7
+ first_max_pool: false
8
+ image_rescale: 0.375
9
+ l2_norm: true
10
+ name: resnet34
11
+ normalize_image: true
12
+ pretrained: true
13
+ stages:
14
+ - 1
15
+ - 2
16
+ - 3
17
+ - 4
18
+ proj_dim: 16
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_unnormed.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model_factory_ImplicitronModelFactory_args:
2
+ model_GenericModel_args:
3
+ image_feature_extractor_class_type: ResNetFeatureExtractor
4
+ image_feature_extractor_ResNetFeatureExtractor_args:
5
+ stages:
6
+ - 1
7
+ - 2
8
+ - 3
9
+ first_max_pool: false
10
+ proj_dim: -1
11
+ l2_norm: false
12
+ image_rescale: 0.375
13
+ name: resnet34
14
+ normalize_image: true
15
+ pretrained: true
16
+ view_pooler_args:
17
+ feature_aggregator_AngleWeightedReductionFeatureAggregator_args:
18
+ reduction_functions:
19
+ - AVG
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_base.yaml ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_base.yaml
3
+ - _self_
4
+ data_source_ImplicitronDataSource_args:
5
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
6
+ batch_size: 10
7
+ dataset_length_train: 1000
8
+ dataset_length_val: 1
9
+ num_workers: 8
10
+ train_conditioning_type: SAME
11
+ val_conditioning_type: SAME
12
+ test_conditioning_type: SAME
13
+ images_per_seq_options:
14
+ - 2
15
+ - 3
16
+ - 4
17
+ - 5
18
+ - 6
19
+ - 7
20
+ - 8
21
+ - 9
22
+ - 10
23
+ dataset_map_provider_JsonIndexDatasetMapProvider_args:
24
+ assert_single_seq: false
25
+ task_str: multisequence
26
+ n_frames_per_sequence: -1
27
+ test_on_train: true
28
+ test_restrict_sequence_id: 0
29
+ optimizer_factory_ImplicitronOptimizerFactory_args:
30
+ multistep_lr_milestones:
31
+ - 1000
32
+ training_loop_ImplicitronTrainingLoop_args:
33
+ max_epochs: 3000
34
+ evaluator_ImplicitronEvaluator_args:
35
+ camera_difficulty_bin_breaks:
36
+ - 0.666667
37
+ - 0.833334
38
+ is_multisequence: true
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_co3dv2_base.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ data_source_ImplicitronDataSource_args:
2
+ dataset_map_provider_class_type: JsonIndexDatasetMapProviderV2
3
+ dataset_map_provider_JsonIndexDatasetMapProviderV2_args:
4
+ category: teddybear
5
+ subset_name: fewview_dev
6
+ training_loop_ImplicitronTrainingLoop_args:
7
+ evaluator_ImplicitronEvaluator_args:
8
+ is_multisequence: true
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_idr_ad.yaml ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_base.yaml
3
+ - _self_
4
+ model_factory_ImplicitronModelFactory_args:
5
+ model_GenericModel_args:
6
+ loss_weights:
7
+ loss_mask_bce: 100.0
8
+ loss_kl: 0.0
9
+ loss_rgb_mse: 1.0
10
+ loss_eikonal: 0.1
11
+ chunk_size_grid: 65536
12
+ num_passes: 1
13
+ output_rasterized_mc: true
14
+ sampling_mode_training: mask_sample
15
+ global_encoder_class_type: SequenceAutodecoder
16
+ global_encoder_SequenceAutodecoder_args:
17
+ autodecoder_args:
18
+ n_instances: 20000
19
+ init_scale: 1.0
20
+ encoding_dim: 256
21
+ implicit_function_IdrFeatureField_args:
22
+ n_harmonic_functions_xyz: 6
23
+ bias: 0.6
24
+ d_in: 3
25
+ d_out: 1
26
+ dims:
27
+ - 512
28
+ - 512
29
+ - 512
30
+ - 512
31
+ - 512
32
+ - 512
33
+ - 512
34
+ - 512
35
+ geometric_init: true
36
+ pooled_feature_dim: 0
37
+ skip_in:
38
+ - 6
39
+ weight_norm: true
40
+ renderer_SignedDistanceFunctionRenderer_args:
41
+ ray_tracer_args:
42
+ line_search_step: 0.5
43
+ line_step_iters: 3
44
+ n_secant_steps: 8
45
+ n_steps: 100
46
+ sdf_threshold: 5.0e-05
47
+ ray_normal_coloring_network_args:
48
+ d_in: 9
49
+ d_out: 3
50
+ dims:
51
+ - 512
52
+ - 512
53
+ - 512
54
+ - 512
55
+ mode: idr
56
+ n_harmonic_functions_dir: 4
57
+ pooled_feature_dim: 0
58
+ weight_norm: true
59
+ raysampler_AdaptiveRaySampler_args:
60
+ n_rays_per_image_sampled_from_mask: 1024
61
+ n_pts_per_ray_training: 0
62
+ n_pts_per_ray_evaluation: 0
63
+ scene_extent: 8.0
64
+ renderer_class_type: SignedDistanceFunctionRenderer
65
+ implicit_function_class_type: IdrFeatureField
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_ad.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_base.yaml
3
+ - _self_
4
+ model_factory_ImplicitronModelFactory_args:
5
+ model_GenericModel_args:
6
+ chunk_size_grid: 16000
7
+ view_pooler_enabled: false
8
+ global_encoder_class_type: SequenceAutodecoder
9
+ global_encoder_SequenceAutodecoder_args:
10
+ autodecoder_args:
11
+ n_instances: 20000
12
+ encoding_dim: 256
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_wce.yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_base.yaml
3
+ - repro_feat_extractor_unnormed.yaml
4
+ - _self_
5
+ model_factory_ImplicitronModelFactory_args:
6
+ model_GenericModel_args:
7
+ chunk_size_grid: 16000
8
+ view_pooler_enabled: true
9
+ raysampler_AdaptiveRaySampler_args:
10
+ n_rays_per_image_sampled_from_mask: 850
11
+ training_loop_ImplicitronTrainingLoop_args:
12
+ clip_grad: 1.0
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_base.yaml
3
+ - repro_feat_extractor_transformer.yaml
4
+ - _self_
5
+ model_factory_ImplicitronModelFactory_args:
6
+ model_GenericModel_args:
7
+ chunk_size_grid: 16000
8
+ raysampler_AdaptiveRaySampler_args:
9
+ n_rays_per_image_sampled_from_mask: 800
10
+ n_pts_per_ray_training: 32
11
+ n_pts_per_ray_evaluation: 32
12
+ renderer_MultiPassEmissionAbsorptionRenderer_args:
13
+ n_pts_per_ray_fine_training: 16
14
+ n_pts_per_ray_fine_evaluation: 16
15
+ implicit_function_class_type: NeRFormerImplicitFunction
16
+ view_pooler_enabled: true
17
+ view_pooler_args:
18
+ feature_aggregator_class_type: IdentityFeatureAggregator
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer_angle_w.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_nerformer.yaml
3
+ - _self_
4
+ model_factory_ImplicitronModelFactory_args:
5
+ model_GenericModel_args:
6
+ view_pooler_args:
7
+ feature_aggregator_class_type: AngleWeightedIdentityFeatureAggregator
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet.yaml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_base.yaml
3
+ - _self_
4
+ model_factory_ImplicitronModelFactory_args:
5
+ model_GenericModel_args:
6
+ chunk_size_grid: 16000
7
+ view_pooler_enabled: false
8
+ n_train_target_views: -1
9
+ num_passes: 1
10
+ loss_weights:
11
+ loss_rgb_mse: 200.0
12
+ loss_prev_stage_rgb_mse: 0.0
13
+ loss_mask_bce: 1.0
14
+ loss_prev_stage_mask_bce: 0.0
15
+ loss_autodecoder_norm: 0.001
16
+ depth_neg_penalty: 10000.0
17
+ global_encoder_class_type: SequenceAutodecoder
18
+ global_encoder_SequenceAutodecoder_args:
19
+ autodecoder_args:
20
+ encoding_dim: 256
21
+ n_instances: 20000
22
+ raysampler_class_type: NearFarRaySampler
23
+ raysampler_NearFarRaySampler_args:
24
+ n_rays_per_image_sampled_from_mask: 2048
25
+ min_depth: 0.05
26
+ max_depth: 0.05
27
+ n_pts_per_ray_training: 1
28
+ n_pts_per_ray_evaluation: 1
29
+ stratified_point_sampling_training: false
30
+ stratified_point_sampling_evaluation: false
31
+ renderer_class_type: LSTMRenderer
32
+ implicit_function_class_type: SRNHyperNetImplicitFunction
33
+ optimizer_factory_ImplicitronOptimizerFactory_args:
34
+ breed: Adam
35
+ lr: 5.0e-05
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet_noharm.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_srn_ad_hypernet.yaml
3
+ - _self_
4
+ model_factory_ImplicitronModelFactory_args:
5
+ model_GenericModel_args:
6
+ num_passes: 1
7
+ implicit_function_SRNHyperNetImplicitFunction_args:
8
+ pixel_generator_args:
9
+ n_harmonic_functions: 0
10
+ hypernet_args:
11
+ n_harmonic_functions: 0
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_base.yaml
3
+ - repro_feat_extractor_normed.yaml
4
+ - _self_
5
+ model_factory_ImplicitronModelFactory_args:
6
+ model_GenericModel_args:
7
+ chunk_size_grid: 32000
8
+ num_passes: 1
9
+ n_train_target_views: -1
10
+ loss_weights:
11
+ loss_rgb_mse: 200.0
12
+ loss_prev_stage_rgb_mse: 0.0
13
+ loss_mask_bce: 1.0
14
+ loss_prev_stage_mask_bce: 0.0
15
+ loss_autodecoder_norm: 0.0
16
+ depth_neg_penalty: 10000.0
17
+ raysampler_class_type: NearFarRaySampler
18
+ raysampler_NearFarRaySampler_args:
19
+ n_rays_per_image_sampled_from_mask: 2048
20
+ min_depth: 0.05
21
+ max_depth: 0.05
22
+ n_pts_per_ray_training: 1
23
+ n_pts_per_ray_evaluation: 1
24
+ stratified_point_sampling_training: false
25
+ stratified_point_sampling_evaluation: false
26
+ renderer_class_type: LSTMRenderer
27
+ implicit_function_class_type: SRNImplicitFunction
28
+ view_pooler_enabled: true
29
+ optimizer_factory_ImplicitronOptimizerFactory_args:
30
+ breed: Adam
31
+ lr: 5.0e-05
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce_noharm.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_srn_wce.yaml
3
+ - _self_
4
+ model_factory_ImplicitronModelFactory_args:
5
+ model_GenericModel_args:
6
+ num_passes: 1
7
+ implicit_function_SRNImplicitFunction_args:
8
+ pixel_generator_args:
9
+ n_harmonic_functions: 0
10
+ raymarch_function_args:
11
+ n_harmonic_functions: 0
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerf_wce.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_nerf_wce.yaml
3
+ - repro_multiseq_co3dv2_base.yaml
4
+ - _self_
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerformer.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_nerformer.yaml
3
+ - repro_multiseq_co3dv2_base.yaml
4
+ - _self_
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_ad_hypernet.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_srn_ad_hypernet.yaml
3
+ - repro_multiseq_co3dv2_base.yaml
4
+ - _self_
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_wce.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ defaults:
2
+ - repro_multiseq_srn_wce.yaml
3
+ - repro_multiseq_co3dv2_base.yaml
4
+ - _self_
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_base.yaml ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_base
3
+ - _self_
4
+ data_source_ImplicitronDataSource_args:
5
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
6
+ batch_size: 1
7
+ dataset_length_train: 1000
8
+ dataset_length_val: 1
9
+ num_workers: 8
10
+ dataset_map_provider_JsonIndexDatasetMapProvider_args:
11
+ assert_single_seq: true
12
+ n_frames_per_sequence: -1
13
+ test_restrict_sequence_id: 0
14
+ test_on_train: false
15
+ model_factory_ImplicitronModelFactory_args:
16
+ model_GenericModel_args:
17
+ render_image_height: 800
18
+ render_image_width: 800
19
+ log_vars:
20
+ - loss_rgb_psnr_fg
21
+ - loss_rgb_psnr
22
+ - loss_eikonal
23
+ - loss_prev_stage_rgb_psnr
24
+ - loss_mask_bce
25
+ - loss_prev_stage_mask_bce
26
+ - loss_rgb_mse
27
+ - loss_prev_stage_rgb_mse
28
+ - loss_depth_abs
29
+ - loss_depth_abs_fg
30
+ - loss_kl
31
+ - loss_mask_neg_iou
32
+ - objective
33
+ - epoch
34
+ - sec/it
35
+ optimizer_factory_ImplicitronOptimizerFactory_args:
36
+ lr: 0.0005
37
+ multistep_lr_milestones:
38
+ - 200
39
+ - 300
40
+ training_loop_ImplicitronTrainingLoop_args:
41
+ max_epochs: 400
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_co3dv2_base.yaml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ data_source_ImplicitronDataSource_args:
2
+ dataset_map_provider_class_type: JsonIndexDatasetMapProviderV2
3
+ dataset_map_provider_JsonIndexDatasetMapProviderV2_args:
4
+ category: teddybear
5
+ subset_name: manyview_dev_0
6
+ training_loop_ImplicitronTrainingLoop_args:
7
+ evaluator_ImplicitronEvaluator_args:
8
+ is_multisequence: false
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_idr.yaml ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_base
3
+ - _self_
4
+ model_factory_ImplicitronModelFactory_args:
5
+ model_GenericModel_args:
6
+ loss_weights:
7
+ loss_mask_bce: 100.0
8
+ loss_kl: 0.0
9
+ loss_rgb_mse: 1.0
10
+ loss_eikonal: 0.1
11
+ chunk_size_grid: 65536
12
+ num_passes: 1
13
+ view_pooler_enabled: false
14
+ implicit_function_IdrFeatureField_args:
15
+ n_harmonic_functions_xyz: 6
16
+ bias: 0.6
17
+ d_in: 3
18
+ d_out: 1
19
+ dims:
20
+ - 512
21
+ - 512
22
+ - 512
23
+ - 512
24
+ - 512
25
+ - 512
26
+ - 512
27
+ - 512
28
+ geometric_init: true
29
+ pooled_feature_dim: 0
30
+ skip_in:
31
+ - 6
32
+ weight_norm: true
33
+ renderer_SignedDistanceFunctionRenderer_args:
34
+ ray_tracer_args:
35
+ line_search_step: 0.5
36
+ line_step_iters: 3
37
+ n_secant_steps: 8
38
+ n_steps: 100
39
+ sdf_threshold: 5.0e-05
40
+ ray_normal_coloring_network_args:
41
+ d_in: 9
42
+ d_out: 3
43
+ dims:
44
+ - 512
45
+ - 512
46
+ - 512
47
+ - 512
48
+ mode: idr
49
+ n_harmonic_functions_dir: 4
50
+ pooled_feature_dim: 0
51
+ weight_norm: true
52
+ raysampler_AdaptiveRaySampler_args:
53
+ n_rays_per_image_sampled_from_mask: 1024
54
+ n_pts_per_ray_training: 0
55
+ n_pts_per_ray_evaluation: 0
56
+ renderer_class_type: SignedDistanceFunctionRenderer
57
+ implicit_function_class_type: IdrFeatureField
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_base
3
+ - _self_
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_base
3
+ - _self_
4
+ exp_dir: "./data/nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}"
5
+ data_source_ImplicitronDataSource_args:
6
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
7
+ dataset_length_train: 100
8
+ dataset_map_provider_class_type: BlenderDatasetMapProvider
9
+ dataset_map_provider_BlenderDatasetMapProvider_args:
10
+ base_dir: ${oc.env:BLENDER_DATASET_ROOT}/${oc.env:BLENDER_SINGLESEQ_CLASS}
11
+ n_known_frames_for_test: null
12
+ object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS}
13
+ path_manager_factory_class_type: PathManagerFactory
14
+ path_manager_factory_PathManagerFactory_args:
15
+ silence_logs: true
16
+
17
+ model_factory_ImplicitronModelFactory_args:
18
+ model_GenericModel_args:
19
+ mask_images: false
20
+ raysampler_class_type: AdaptiveRaySampler
21
+ raysampler_AdaptiveRaySampler_args:
22
+ n_pts_per_ray_training: 64
23
+ n_pts_per_ray_evaluation: 64
24
+ n_rays_per_image_sampled_from_mask: 4096
25
+ stratified_point_sampling_training: true
26
+ stratified_point_sampling_evaluation: false
27
+ scene_extent: 2.0
28
+ scene_center:
29
+ - 0.0
30
+ - 0.0
31
+ - 0.0
32
+ renderer_MultiPassEmissionAbsorptionRenderer_args:
33
+ density_noise_std_train: 0.0
34
+ n_pts_per_ray_fine_training: 128
35
+ n_pts_per_ray_fine_evaluation: 128
36
+ raymarcher_EmissionAbsorptionRaymarcher_args:
37
+ blend_output: false
38
+ loss_weights:
39
+ loss_rgb_mse: 1.0
40
+ loss_prev_stage_rgb_mse: 1.0
41
+ loss_mask_bce: 0.0
42
+ loss_prev_stage_mask_bce: 0.0
43
+ loss_autodecoder_norm: 0.00
44
+
45
+ optimizer_factory_ImplicitronOptimizerFactory_args:
46
+ exponential_lr_step_size: 3001
47
+ lr_policy: LinearExponential
48
+ linear_exponential_lr_milestone: 200
49
+
50
+ training_loop_ImplicitronTrainingLoop_args:
51
+ max_epochs: 6000
52
+ metric_print_interval: 10
53
+ store_checkpoints_purge: 3
54
+ test_when_finished: true
55
+ validation_interval: 100
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_wce.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_wce_base.yaml
3
+ - repro_feat_extractor_unnormed.yaml
4
+ - _self_
5
+ model_factory_ImplicitronModelFactory_args:
6
+ model_GenericModel_args:
7
+ chunk_size_grid: 16000
8
+ view_pooler_enabled: true
9
+ raysampler_AdaptiveRaySampler_args:
10
+ n_rays_per_image_sampled_from_mask: 850
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerformer.yaml ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_wce_base.yaml
3
+ - repro_feat_extractor_transformer.yaml
4
+ - _self_
5
+ model_factory_ImplicitronModelFactory_args:
6
+ model_GenericModel_args:
7
+ chunk_size_grid: 16000
8
+ view_pooler_enabled: true
9
+ implicit_function_class_type: NeRFormerImplicitFunction
10
+ raysampler_AdaptiveRaySampler_args:
11
+ n_rays_per_image_sampled_from_mask: 800
12
+ n_pts_per_ray_training: 32
13
+ n_pts_per_ray_evaluation: 32
14
+ renderer_MultiPassEmissionAbsorptionRenderer_args:
15
+ n_pts_per_ray_fine_training: 16
16
+ n_pts_per_ray_fine_evaluation: 16
17
+ view_pooler_args:
18
+ feature_aggregator_class_type: IdentityFeatureAggregator
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn.yaml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_base.yaml
3
+ - _self_
4
+ model_factory_ImplicitronModelFactory_args:
5
+ model_GenericModel_args:
6
+ num_passes: 1
7
+ chunk_size_grid: 32000
8
+ view_pooler_enabled: false
9
+ loss_weights:
10
+ loss_rgb_mse: 200.0
11
+ loss_prev_stage_rgb_mse: 0.0
12
+ loss_mask_bce: 1.0
13
+ loss_prev_stage_mask_bce: 0.0
14
+ loss_autodecoder_norm: 0.0
15
+ depth_neg_penalty: 10000.0
16
+ raysampler_class_type: NearFarRaySampler
17
+ raysampler_NearFarRaySampler_args:
18
+ n_rays_per_image_sampled_from_mask: 2048
19
+ min_depth: 0.05
20
+ max_depth: 0.05
21
+ n_pts_per_ray_training: 1
22
+ n_pts_per_ray_evaluation: 1
23
+ stratified_point_sampling_training: false
24
+ stratified_point_sampling_evaluation: false
25
+ renderer_class_type: LSTMRenderer
26
+ implicit_function_class_type: SRNImplicitFunction
27
+ optimizer_factory_ImplicitronOptimizerFactory_args:
28
+ breed: Adam
29
+ lr: 5.0e-05
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_noharm.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_srn.yaml
3
+ - _self_
4
+ model_factory_ImplicitronModelFactory_args:
5
+ model_GenericModel_args:
6
+ num_passes: 1
7
+ implicit_function_SRNImplicitFunction_args:
8
+ pixel_generator_args:
9
+ n_harmonic_functions: 0
10
+ raymarch_function_args:
11
+ n_harmonic_functions: 0
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce.yaml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_wce_base
3
+ - repro_feat_extractor_normed.yaml
4
+ - _self_
5
+ model_factory_ImplicitronModelFactory_args:
6
+ model_GenericModel_args:
7
+ num_passes: 1
8
+ chunk_size_grid: 32000
9
+ view_pooler_enabled: true
10
+ loss_weights:
11
+ loss_rgb_mse: 200.0
12
+ loss_prev_stage_rgb_mse: 0.0
13
+ loss_mask_bce: 1.0
14
+ loss_prev_stage_mask_bce: 0.0
15
+ loss_autodecoder_norm: 0.0
16
+ depth_neg_penalty: 10000.0
17
+ raysampler_class_type: NearFarRaySampler
18
+ raysampler_NearFarRaySampler_args:
19
+ n_rays_per_image_sampled_from_mask: 2048
20
+ min_depth: 0.05
21
+ max_depth: 0.05
22
+ n_pts_per_ray_training: 1
23
+ n_pts_per_ray_evaluation: 1
24
+ stratified_point_sampling_training: false
25
+ stratified_point_sampling_evaluation: false
26
+ renderer_class_type: LSTMRenderer
27
+ implicit_function_class_type: SRNImplicitFunction
28
+ optimizer_factory_ImplicitronOptimizerFactory_args:
29
+ breed: Adam
30
+ lr: 5.0e-05
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce_noharm.yaml ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_srn_wce.yaml
3
+ - _self_
4
+ model_factory_ImplicitronModelFactory_args:
5
+ model_GenericModel_args:
6
+ num_passes: 1
7
+ implicit_function_SRNImplicitFunction_args:
8
+ pixel_generator_args:
9
+ n_harmonic_functions: 0
10
+ raymarch_function_args:
11
+ n_harmonic_functions: 0
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_idr.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_idr.yaml
3
+ - repro_singleseq_co3dv2_base.yaml
4
+ - _self_
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerf.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_nerf.yaml
3
+ - repro_singleseq_co3dv2_base.yaml
4
+ - _self_
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerformer.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_nerformer.yaml
3
+ - repro_singleseq_co3dv2_base.yaml
4
+ - _self_
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_srn_noharm.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_srn_noharm.yaml
3
+ - repro_singleseq_co3dv2_base.yaml
4
+ - _self_
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_wce_base.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ defaults:
2
+ - repro_singleseq_base
3
+ - _self_
4
+ data_source_ImplicitronDataSource_args:
5
+ data_loader_map_provider_SequenceDataLoaderMapProvider_args:
6
+ batch_size: 10
7
+ dataset_length_train: 1000
8
+ dataset_length_val: 1
9
+ num_workers: 8
10
+ train_conditioning_type: SAME
11
+ val_conditioning_type: SAME
12
+ test_conditioning_type: SAME
13
+ images_per_seq_options:
14
+ - 2
15
+ - 3
16
+ - 4
17
+ - 5
18
+ - 6
19
+ - 7
20
+ - 8
21
+ - 9
22
+ - 10
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from .datatypes import Device, get_device, make_device
10
+
11
+
12
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/compat.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from typing import Sequence, Tuple, Union
10
+
11
+ import torch
12
+
13
+
14
+ """
15
+ Some functions which depend on PyTorch or Python versions.
16
+ """
17
+
18
+
19
+ def meshgrid_ij(
20
+ *A: Union[torch.Tensor, Sequence[torch.Tensor]],
21
+ ) -> Tuple[torch.Tensor, ...]: # pragma: no cover
22
+ """
23
+ Like torch.meshgrid was before PyTorch 1.10.0, i.e. with indexing set to ij
24
+ """
25
+ if (
26
+ # pyre-fixme[16]: Callable `meshgrid` has no attribute `__kwdefaults__`.
27
+ torch.meshgrid.__kwdefaults__ is not None
28
+ and "indexing" in torch.meshgrid.__kwdefaults__
29
+ ):
30
+ # PyTorch >= 1.10.0
31
+ # pyre-fixme[6]: For 1st param expected `Union[List[Tensor], Tensor]` but
32
+ # got `Union[Sequence[Tensor], Tensor]`.
33
+ return torch.meshgrid(*A, indexing="ij")
34
+ # pyre-fixme[6]: For 1st param expected `Union[List[Tensor], Tensor]` but got
35
+ # `Union[Sequence[Tensor], Tensor]`.
36
+ return torch.meshgrid(*A)
37
+
38
+
39
+ def prod(iterable, *, start=1):
40
+ """
41
+ Like math.prod in Python 3.8 and later.
42
+ """
43
+ for i in iterable:
44
+ start *= i
45
+ return start
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/datatypes.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from typing import Optional, Union
10
+
11
+ import torch
12
+
13
+
14
+ Device = Union[str, torch.device]
15
+
16
+
17
+ def make_device(device: Device) -> torch.device:
18
+ """
19
+ Makes an actual torch.device object from the device specified as
20
+ either a string or torch.device object. If the device is `cuda` without
21
+ a specific index, the index of the current device is assigned.
22
+
23
+ Args:
24
+ device: Device (as str or torch.device)
25
+
26
+ Returns:
27
+ A matching torch.device object
28
+ """
29
+ device = torch.device(device) if isinstance(device, str) else device
30
+ if device.type == "cuda" and device.index is None:
31
+ # If cuda but with no index, then the current cuda device is indicated.
32
+ # In that case, we fix to that device
33
+ device = torch.device(f"cuda:{torch.cuda.current_device()}")
34
+ return device
35
+
36
+
37
+ def get_device(x, device: Optional[Device] = None) -> torch.device:
38
+ """
39
+ Gets the device of the specified variable x if it is a tensor, or
40
+ falls back to a default CPU device otherwise. Allows overriding by
41
+ providing an explicit device.
42
+
43
+ Args:
44
+ x: a torch.Tensor to get the device from or another type
45
+ device: Device (as str or torch.device) to fall back to
46
+
47
+ Returns:
48
+ A matching torch.device object
49
+ """
50
+
51
+ # User overrides device
52
+ if device is not None:
53
+ return make_device(device)
54
+
55
+ # Set device based on input tensor
56
+ if torch.is_tensor(x):
57
+ return x.device
58
+
59
+ # Default device is cpu
60
+ return torch.device("cpu")
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/linear_with_repeat.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import math
10
+ from typing import Tuple
11
+
12
+ import torch
13
+ import torch.nn.functional as F
14
+ from torch.nn import init, Parameter
15
+
16
+
17
+ class LinearWithRepeat(torch.nn.Module):
18
+ """
19
+ if x has shape (..., k, n1)
20
+ and y has shape (..., n2)
21
+ then
22
+ LinearWithRepeat(n1 + n2, out_features).forward((x,y))
23
+ is equivalent to
24
+ Linear(n1 + n2, out_features).forward(
25
+ torch.cat([x, y.unsqueeze(-2).expand(..., k, n2)], dim=-1)
26
+ )
27
+
28
+ Or visually:
29
+ Given the following, for each ray,
30
+
31
+ feature ->
32
+
33
+ ray xxxxxxxx
34
+ position xxxxxxxx
35
+ | xxxxxxxx
36
+ v xxxxxxxx
37
+
38
+
39
+ and
40
+ yyyyyyyy
41
+
42
+ where the y's do not depend on the position
43
+ but only on the ray,
44
+ we want to evaluate a Linear layer on both
45
+ types of data at every position.
46
+
47
+ It's as if we constructed
48
+
49
+ xxxxxxxxyyyyyyyy
50
+ xxxxxxxxyyyyyyyy
51
+ xxxxxxxxyyyyyyyy
52
+ xxxxxxxxyyyyyyyy
53
+
54
+ and sent that through the Linear.
55
+ """
56
+
57
+ def __init__(
58
+ self,
59
+ in_features: int,
60
+ out_features: int,
61
+ bias: bool = True,
62
+ device=None,
63
+ dtype=None,
64
+ ) -> None:
65
+ """
66
+ Copied from torch.nn.Linear.
67
+ """
68
+ factory_kwargs = {"device": device, "dtype": dtype}
69
+ super().__init__()
70
+ self.in_features = in_features
71
+ self.out_features = out_features
72
+ self.weight = Parameter(
73
+ torch.empty((out_features, in_features), **factory_kwargs)
74
+ )
75
+ if bias:
76
+ self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
77
+ else:
78
+ self.register_parameter("bias", None)
79
+ self.reset_parameters()
80
+
81
+ def reset_parameters(self) -> None:
82
+ """
83
+ Copied from torch.nn.Linear.
84
+ """
85
+ init.kaiming_uniform_(self.weight, a=math.sqrt(5))
86
+ if self.bias is not None:
87
+ fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
88
+ bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
89
+ init.uniform_(self.bias, -bound, bound)
90
+
91
+ def forward(self, input: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor:
92
+ n1 = input[0].shape[-1]
93
+ output1 = F.linear(input[0], self.weight[:, :n1], self.bias)
94
+ output2 = F.linear(input[1], self.weight[:, n1:], None)
95
+ return output1 + output2.unsqueeze(-2)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ from .symeig3x3 import symeig3x3
10
+ from .utils import _safe_det_3x3
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/symeig3x3.py ADDED
@@ -0,0 +1,319 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+ import math
10
+ from typing import Optional, Tuple
11
+
12
+ import torch
13
+ import torch.nn.functional as F
14
+ from torch import nn
15
+
16
+
17
+ class _SymEig3x3(nn.Module):
18
+ """
19
+ Optimized implementation of eigenvalues and eigenvectors computation for symmetric 3x3
20
+ matrices.
21
+
22
+ Please see https://en.wikipedia.org/wiki/Eigenvalue_algorithm#3.C3.973_matrices
23
+ and https://www.geometrictools.com/Documentation/RobustEigenSymmetric3x3.pdf
24
+ """
25
+
26
+ def __init__(self, eps: Optional[float] = None) -> None:
27
+ """
28
+ Args:
29
+ eps: epsilon to specify, if None then use torch.float eps
30
+ """
31
+ super().__init__()
32
+
33
+ self.register_buffer("_identity", torch.eye(3))
34
+ self.register_buffer("_rotation_2d", torch.tensor([[0.0, -1.0], [1.0, 0.0]]))
35
+ self.register_buffer(
36
+ "_rotations_3d", self._create_rotation_matrices(self._rotation_2d)
37
+ )
38
+
39
+ self._eps = eps or torch.finfo(torch.float).eps
40
+
41
+ @staticmethod
42
+ def _create_rotation_matrices(rotation_2d) -> torch.Tensor:
43
+ """
44
+ Compute rotations for later use in U V computation
45
+
46
+ Args:
47
+ rotation_2d: a π/2 rotation matrix.
48
+
49
+ Returns:
50
+ a (3, 3, 3) tensor containing 3 rotation matrices around each of the coordinate axes
51
+ by π/2
52
+ """
53
+
54
+ rotations_3d = torch.zeros((3, 3, 3))
55
+ rotation_axes = set(range(3))
56
+ for rotation_axis in rotation_axes:
57
+ rest = list(rotation_axes - {rotation_axis})
58
+ rotations_3d[rotation_axis][rest[0], rest] = rotation_2d[0]
59
+ rotations_3d[rotation_axis][rest[1], rest] = rotation_2d[1]
60
+
61
+ return rotations_3d
62
+
63
+ def forward(
64
+ self, inputs: torch.Tensor, eigenvectors: bool = True
65
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
66
+ """
67
+ Compute eigenvalues and (optionally) eigenvectors
68
+
69
+ Args:
70
+ inputs: symmetric matrices with shape of (..., 3, 3)
71
+ eigenvectors: whether should we compute only eigenvalues or eigenvectors as well
72
+
73
+ Returns:
74
+ Either a tuple of (eigenvalues, eigenvectors) or eigenvalues only, depending on
75
+ given params. Eigenvalues are of shape (..., 3) and eigenvectors (..., 3, 3)
76
+ """
77
+ if inputs.shape[-2:] != (3, 3):
78
+ raise ValueError("Only inputs of shape (..., 3, 3) are supported.")
79
+
80
+ inputs_diag = inputs.diagonal(dim1=-2, dim2=-1)
81
+ inputs_trace = inputs_diag.sum(-1)
82
+ q = inputs_trace / 3.0
83
+
84
+ # Calculate squared sum of elements outside the main diagonal / 2
85
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
86
+ p1 = ((inputs**2).sum(dim=(-1, -2)) - (inputs_diag**2).sum(-1)) / 2
87
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
88
+ p2 = ((inputs_diag - q[..., None]) ** 2).sum(dim=-1) + 2.0 * p1.clamp(self._eps)
89
+
90
+ p = torch.sqrt(p2 / 6.0)
91
+ B = (inputs - q[..., None, None] * self._identity) / p[..., None, None]
92
+
93
+ r = torch.det(B) / 2.0
94
+ # Keep r within (-1.0, 1.0) boundaries with a margin to prevent exploding gradients.
95
+ r = r.clamp(-1.0 + self._eps, 1.0 - self._eps)
96
+
97
+ phi = torch.acos(r) / 3.0
98
+ eig1 = q + 2 * p * torch.cos(phi)
99
+ eig2 = q + 2 * p * torch.cos(phi + 2 * math.pi / 3)
100
+ eig3 = 3 * q - eig1 - eig2
101
+ # eigenvals[..., i] is the i-th eigenvalue of the input, α0 ≤ α1 ≤ α2.
102
+ eigenvals = torch.stack((eig2, eig3, eig1), dim=-1)
103
+
104
+ # Soft dispatch between the degenerate case (diagonal A) and general.
105
+ # diag_soft_cond -> 1.0 when p1 < 6 * eps and diag_soft_cond -> 0.0 otherwise.
106
+ # We use 6 * eps to take into account the error accumulated during the p1 summation
107
+ diag_soft_cond = torch.exp(-((p1 / (6 * self._eps)) ** 2)).detach()[..., None]
108
+
109
+ # Eigenvalues are the ordered elements of main diagonal in the degenerate case
110
+ diag_eigenvals, _ = torch.sort(inputs_diag, dim=-1)
111
+ eigenvals = diag_soft_cond * diag_eigenvals + (1.0 - diag_soft_cond) * eigenvals
112
+
113
+ if eigenvectors:
114
+ eigenvecs = self._construct_eigenvecs_set(inputs, eigenvals)
115
+ else:
116
+ eigenvecs = None
117
+
118
+ return eigenvals, eigenvecs
119
+
120
+ def _construct_eigenvecs_set(
121
+ self, inputs: torch.Tensor, eigenvals: torch.Tensor
122
+ ) -> torch.Tensor:
123
+ """
124
+ Construct orthonormal set of eigenvectors by given inputs and pre-computed eigenvalues
125
+
126
+ Args:
127
+ inputs: tensor of symmetric matrices of shape (..., 3, 3)
128
+ eigenvals: tensor of pre-computed eigenvalues of of shape (..., 3, 3)
129
+
130
+ Returns:
131
+ Tuple of three eigenvector tensors of shape (..., 3, 3), composing an orthonormal
132
+ set
133
+ """
134
+ eigenvecs_tuple_for_01 = self._construct_eigenvecs(
135
+ inputs, eigenvals[..., 0], eigenvals[..., 1]
136
+ )
137
+ eigenvecs_for_01 = torch.stack(eigenvecs_tuple_for_01, dim=-1)
138
+
139
+ eigenvecs_tuple_for_21 = self._construct_eigenvecs(
140
+ inputs, eigenvals[..., 2], eigenvals[..., 1]
141
+ )
142
+ eigenvecs_for_21 = torch.stack(eigenvecs_tuple_for_21[::-1], dim=-1)
143
+
144
+ # The result will be smooth here even if both parts of comparison
145
+ # are close, because eigenvecs_01 and eigenvecs_21 would be mostly equal as well
146
+ eigenvecs_cond = (
147
+ eigenvals[..., 1] - eigenvals[..., 0]
148
+ > eigenvals[..., 2] - eigenvals[..., 1]
149
+ ).detach()
150
+ eigenvecs = torch.where(
151
+ eigenvecs_cond[..., None, None], eigenvecs_for_01, eigenvecs_for_21
152
+ )
153
+
154
+ return eigenvecs
155
+
156
+ def _construct_eigenvecs(
157
+ self, inputs: torch.Tensor, alpha0: torch.Tensor, alpha1: torch.Tensor
158
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
159
+ """
160
+ Construct an orthonormal set of eigenvectors by given pair of eigenvalues.
161
+
162
+ Args:
163
+ inputs: tensor of symmetric matrices of shape (..., 3, 3)
164
+ alpha0: first eigenvalues of shape (..., 3)
165
+ alpha1: second eigenvalues of shape (..., 3)
166
+
167
+ Returns:
168
+ Tuple of three eigenvector tensors of shape (..., 3, 3), composing an orthonormal
169
+ set
170
+ """
171
+
172
+ # Find the eigenvector corresponding to alpha0, its eigenvalue is distinct
173
+ ev0 = self._get_ev0(inputs - alpha0[..., None, None] * self._identity)
174
+ u, v = self._get_uv(ev0)
175
+ ev1 = self._get_ev1(inputs - alpha1[..., None, None] * self._identity, u, v)
176
+ # Third eigenvector is computed as the cross-product of the other two
177
+ ev2 = torch.cross(ev0, ev1, dim=-1)
178
+
179
+ return ev0, ev1, ev2
180
+
181
+ def _get_ev0(self, char_poly: torch.Tensor) -> torch.Tensor:
182
+ """
183
+ Construct the first normalized eigenvector given a characteristic polynomial
184
+
185
+ Args:
186
+ char_poly: a characteristic polynomials of the input matrices of shape (..., 3, 3)
187
+
188
+ Returns:
189
+ Tensor of first eigenvectors of shape (..., 3)
190
+ """
191
+
192
+ r01 = torch.cross(char_poly[..., 0, :], char_poly[..., 1, :], dim=-1)
193
+ r12 = torch.cross(char_poly[..., 1, :], char_poly[..., 2, :], dim=-1)
194
+ r02 = torch.cross(char_poly[..., 0, :], char_poly[..., 2, :], dim=-1)
195
+
196
+ cross_products = torch.stack((r01, r12, r02), dim=-2)
197
+ # Regularize it with + or -eps depending on the sign of the first vector
198
+ cross_products += self._eps * self._sign_without_zero(
199
+ cross_products[..., :1, :]
200
+ )
201
+
202
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
203
+ norms_sq = (cross_products**2).sum(dim=-1)
204
+ max_norms_index = norms_sq.argmax(dim=-1)
205
+
206
+ # Pick only the cross-product with highest squared norm for each input
207
+ max_cross_products = self._gather_by_index(
208
+ cross_products, max_norms_index[..., None, None], -2
209
+ )
210
+ # Pick corresponding squared norms for each cross-product
211
+ max_norms_sq = self._gather_by_index(norms_sq, max_norms_index[..., None], -1)
212
+
213
+ # Normalize cross-product vectors by thier norms
214
+ return max_cross_products / torch.sqrt(max_norms_sq[..., None])
215
+
216
+ def _gather_by_index(
217
+ self, source: torch.Tensor, index: torch.Tensor, dim: int
218
+ ) -> torch.Tensor:
219
+ """
220
+ Selects elements from the given source tensor by provided index tensor.
221
+ Number of dimensions should be the same for source and index tensors.
222
+
223
+ Args:
224
+ source: input tensor to gather from
225
+ index: index tensor with indices to gather from source
226
+ dim: dimension to gather across
227
+
228
+ Returns:
229
+ Tensor of shape same as the source with exception of specified dimension.
230
+ """
231
+
232
+ index_shape = list(source.shape)
233
+ index_shape[dim] = 1
234
+
235
+ return source.gather(dim, index.expand(index_shape)).squeeze(dim)
236
+
237
+ def _get_uv(self, w: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
238
+ """
239
+ Computes unit-length vectors U and V such that {U, V, W} is a right-handed
240
+ orthonormal set.
241
+
242
+ Args:
243
+ w: eigenvector tensor of shape (..., 3)
244
+
245
+ Returns:
246
+ Tuple of U and V unit-length vector tensors of shape (..., 3)
247
+ """
248
+
249
+ min_idx = w.abs().argmin(dim=-1)
250
+ rotation_2d = self._rotations_3d[min_idx].to(w)
251
+
252
+ u = F.normalize((rotation_2d @ w[..., None])[..., 0], dim=-1)
253
+ v = torch.cross(w, u, dim=-1)
254
+ return u, v
255
+
256
+ def _get_ev1(
257
+ self, char_poly: torch.Tensor, u: torch.Tensor, v: torch.Tensor
258
+ ) -> torch.Tensor:
259
+ """
260
+ Computes the second normalized eigenvector given a characteristic polynomial
261
+ and U and V vectors
262
+
263
+ Args:
264
+ char_poly: a characteristic polynomials of the input matrices of shape (..., 3, 3)
265
+ u: unit-length vectors from _get_uv method
266
+ v: unit-length vectors from _get_uv method
267
+
268
+ Returns:
269
+ desc
270
+ """
271
+
272
+ j = torch.stack((u, v), dim=-1)
273
+ m = j.transpose(-1, -2) @ char_poly @ j
274
+
275
+ # If angle between those vectors is acute, take their sum = m[..., 0, :] + m[..., 1, :],
276
+ # otherwise take the difference = m[..., 0, :] - m[..., 1, :]
277
+ # m is in theory of rank 1 (or 0), so it snaps only when one of the rows is close to 0
278
+ is_acute_sign = self._sign_without_zero(
279
+ (m[..., 0, :] * m[..., 1, :]).sum(dim=-1)
280
+ ).detach()
281
+
282
+ rowspace = m[..., 0, :] + is_acute_sign[..., None] * m[..., 1, :]
283
+ # rowspace will be near zero for second-order eigenvalues
284
+ # this regularization guarantees abs(rowspace[0]) >= eps in a smooth'ish way
285
+ rowspace += self._eps * self._sign_without_zero(rowspace[..., :1])
286
+
287
+ return (
288
+ j
289
+ @ F.normalize(rowspace @ self._rotation_2d.to(rowspace), dim=-1)[..., None]
290
+ )[..., 0]
291
+
292
+ @staticmethod
293
+ def _sign_without_zero(tensor):
294
+ """
295
+ Args:
296
+ tensor: an arbitrary shaped tensor
297
+
298
+ Returns:
299
+ Tensor of the same shape as an input, but with 1.0 if tensor > 0.0 and -1.0
300
+ otherwise
301
+ """
302
+ return 2.0 * (tensor > 0.0).to(tensor.dtype) - 1.0
303
+
304
+
305
+ def symeig3x3(
306
+ inputs: torch.Tensor, eigenvectors: bool = True
307
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
308
+ """
309
+ Compute eigenvalues and (optionally) eigenvectors
310
+
311
+ Args:
312
+ inputs: symmetric matrices with shape of (..., 3, 3)
313
+ eigenvectors: whether should we compute only eigenvalues or eigenvectors as well
314
+
315
+ Returns:
316
+ Either a tuple of (eigenvalues, eigenvectors) or eigenvalues only, depending on
317
+ given params. Eigenvalues are of shape (..., 3) and eigenvectors (..., 3, 3)
318
+ """
319
+ return _SymEig3x3().to(inputs.device)(inputs, eigenvectors=eigenvectors)
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/utils.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # pyre-unsafe
8
+
9
+
10
+ import torch
11
+
12
+
13
+ def _safe_det_3x3(t: torch.Tensor):
14
+ """
15
+ Fast determinant calculation for a batch of 3x3 matrices.
16
+
17
+ Note, result of this function might not be the same as `torch.det()`.
18
+ The differences might be in the last significant digit.
19
+
20
+ Args:
21
+ t: Tensor of shape (N, 3, 3).
22
+
23
+ Returns:
24
+ Tensor of shape (N) with determinants.
25
+ """
26
+
27
+ det = (
28
+ t[..., 0, 0] * (t[..., 1, 1] * t[..., 2, 2] - t[..., 1, 2] * t[..., 2, 1])
29
+ - t[..., 0, 1] * (t[..., 1, 0] * t[..., 2, 2] - t[..., 2, 0] * t[..., 1, 2])
30
+ + t[..., 0, 2] * (t[..., 1, 0] * t[..., 2, 1] - t[..., 2, 0] * t[..., 1, 1])
31
+ )
32
+
33
+ return det
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query.cu ADDED
@@ -0,0 +1,129 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #include <ATen/ATen.h>
10
+ #include <ATen/cuda/CUDAContext.h>
11
+ #include <c10/cuda/CUDAGuard.h>
12
+ #include <math.h>
13
+ #include <stdio.h>
14
+ #include <stdlib.h>
15
+
16
+ // A chunk of work is blocksize-many points of P1.
17
+ // The number of potential chunks to do is N*(1+(P1-1)/blocksize)
18
+ // call (1+(P1-1)/blocksize) chunks_per_cloud
19
+ // These chunks are divided among the gridSize-many blocks.
20
+ // In block b, we work on chunks b, b+gridSize, b+2*gridSize etc .
21
+ // In chunk i, we work on cloud i/chunks_per_cloud on points starting from
22
+ // blocksize*(i%chunks_per_cloud).
23
+
24
+ template <typename scalar_t>
25
+ __global__ void BallQueryKernel(
26
+ const at::PackedTensorAccessor64<scalar_t, 3, at::RestrictPtrTraits> p1,
27
+ const at::PackedTensorAccessor64<scalar_t, 3, at::RestrictPtrTraits> p2,
28
+ const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits>
29
+ lengths1,
30
+ const at::PackedTensorAccessor64<int64_t, 1, at::RestrictPtrTraits>
31
+ lengths2,
32
+ at::PackedTensorAccessor64<int64_t, 3, at::RestrictPtrTraits> idxs,
33
+ at::PackedTensorAccessor64<scalar_t, 3, at::RestrictPtrTraits> dists,
34
+ const int64_t K,
35
+ const float radius2) {
36
+ const int64_t N = p1.size(0);
37
+ const int64_t chunks_per_cloud = (1 + (p1.size(1) - 1) / blockDim.x);
38
+ const int64_t chunks_to_do = N * chunks_per_cloud;
39
+ const int D = p1.size(2);
40
+
41
+ for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) {
42
+ const int64_t n = chunk / chunks_per_cloud; // batch_index
43
+ const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud);
44
+ int64_t i = start_point + threadIdx.x;
45
+
46
+ // Check if point is valid in heterogeneous tensor
47
+ if (i >= lengths1[n]) {
48
+ continue;
49
+ }
50
+
51
+ // Iterate over points in p2 until desired count is reached or
52
+ // all points have been considered
53
+ for (int64_t j = 0, count = 0; j < lengths2[n] && count < K; ++j) {
54
+ // Calculate the distance between the points
55
+ scalar_t dist2 = 0.0;
56
+ for (int d = 0; d < D; ++d) {
57
+ scalar_t diff = p1[n][i][d] - p2[n][j][d];
58
+ dist2 += (diff * diff);
59
+ }
60
+
61
+ if (dist2 < radius2) {
62
+ // If the point is within the radius
63
+ // Set the value of the index to the point index
64
+ idxs[n][i][count] = j;
65
+ dists[n][i][count] = dist2;
66
+
67
+ // increment the number of selected samples for the point i
68
+ ++count;
69
+ }
70
+ }
71
+ }
72
+ }
73
+
74
+ std::tuple<at::Tensor, at::Tensor> BallQueryCuda(
75
+ const at::Tensor& p1, // (N, P1, 3)
76
+ const at::Tensor& p2, // (N, P2, 3)
77
+ const at::Tensor& lengths1, // (N,)
78
+ const at::Tensor& lengths2, // (N,)
79
+ int K,
80
+ float radius) {
81
+ // Check inputs are on the same device
82
+ at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2},
83
+ lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4};
84
+ at::CheckedFrom c = "BallQueryCuda";
85
+ at::checkAllSameGPU(c, {p1_t, p2_t, lengths1_t, lengths2_t});
86
+ at::checkAllSameType(c, {p1_t, p2_t});
87
+
88
+ // Set the device for the kernel launch based on the device of p1
89
+ at::cuda::CUDAGuard device_guard(p1.device());
90
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
91
+
92
+ TORCH_CHECK(
93
+ p2.size(2) == p1.size(2), "Point sets must have the same last dimension");
94
+
95
+ const int N = p1.size(0);
96
+ const int P1 = p1.size(1);
97
+ const int64_t K_64 = K;
98
+ const float radius2 = radius * radius;
99
+
100
+ // Output tensor with indices of neighbors for each point in p1
101
+ auto long_dtype = lengths1.options().dtype(at::kLong);
102
+ auto idxs = at::full({N, P1, K}, -1, long_dtype);
103
+ auto dists = at::zeros({N, P1, K}, p1.options());
104
+
105
+ if (idxs.numel() == 0) {
106
+ AT_CUDA_CHECK(cudaGetLastError());
107
+ return std::make_tuple(idxs, dists);
108
+ }
109
+
110
+ const size_t blocks = 256;
111
+ const size_t threads = 256;
112
+
113
+ AT_DISPATCH_FLOATING_TYPES(
114
+ p1.scalar_type(), "ball_query_kernel_cuda", ([&] {
115
+ BallQueryKernel<<<blocks, threads, 0, stream>>>(
116
+ p1.packed_accessor64<float, 3, at::RestrictPtrTraits>(),
117
+ p2.packed_accessor64<float, 3, at::RestrictPtrTraits>(),
118
+ lengths1.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(),
119
+ lengths2.packed_accessor64<int64_t, 1, at::RestrictPtrTraits>(),
120
+ idxs.packed_accessor64<int64_t, 3, at::RestrictPtrTraits>(),
121
+ dists.packed_accessor64<float, 3, at::RestrictPtrTraits>(),
122
+ K_64,
123
+ radius2);
124
+ }));
125
+
126
+ AT_CUDA_CHECK(cudaGetLastError());
127
+
128
+ return std::make_tuple(idxs, dists);
129
+ }
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #pragma once
10
+ #include <torch/extension.h>
11
+ #include <tuple>
12
+ #include "utils/pytorch3d_cutils.h"
13
+
14
+ // Compute indices of K neighbors in pointcloud p2 to points
15
+ // in pointcloud p1 which fall within a specified radius
16
+ //
17
+ // Args:
18
+ // p1: FloatTensor of shape (N, P1, D) giving a batch of pointclouds each
19
+ // containing P1 points of dimension D.
20
+ // p2: FloatTensor of shape (N, P2, D) giving a batch of pointclouds each
21
+ // containing P2 points of dimension D.
22
+ // lengths1: LongTensor, shape (N,), giving actual length of each P1 cloud.
23
+ // lengths2: LongTensor, shape (N,), giving actual length of each P2 cloud.
24
+ // K: Integer giving the upper bound on the number of samples to take
25
+ // within the radius
26
+ // radius: the radius around each point within which the neighbors need to be
27
+ // located
28
+ //
29
+ // Returns:
30
+ // p1_neighbor_idx: LongTensor of shape (N, P1, K), where
31
+ // p1_neighbor_idx[n, i, k] = j means that the kth
32
+ // neighbor to p1[n, i] in the cloud p2[n] is p2[n, j].
33
+ // This is padded with -1s both where a cloud in p2 has fewer than
34
+ // S points and where a cloud in p1 has fewer than P1 points and
35
+ // also if there are fewer than K points which satisfy the radius
36
+ // threshold.
37
+ //
38
+ // p1_neighbor_dists: FloatTensor of shape (N, P1, K) containing the squared
39
+ // distance from each point p1[n, p, :] to its K neighbors
40
+ // p2[n, p1_neighbor_idx[n, p, k], :].
41
+
42
+ // CPU implementation
43
+ std::tuple<at::Tensor, at::Tensor> BallQueryCpu(
44
+ const at::Tensor& p1,
45
+ const at::Tensor& p2,
46
+ const at::Tensor& lengths1,
47
+ const at::Tensor& lengths2,
48
+ const int K,
49
+ const float radius);
50
+
51
+ // CUDA implementation
52
+ std::tuple<at::Tensor, at::Tensor> BallQueryCuda(
53
+ const at::Tensor& p1,
54
+ const at::Tensor& p2,
55
+ const at::Tensor& lengths1,
56
+ const at::Tensor& lengths2,
57
+ const int K,
58
+ const float radius);
59
+
60
+ // Implementation which is exposed
61
+ // Note: the backward pass reuses the KNearestNeighborBackward kernel
62
+ inline std::tuple<at::Tensor, at::Tensor> BallQuery(
63
+ const at::Tensor& p1,
64
+ const at::Tensor& p2,
65
+ const at::Tensor& lengths1,
66
+ const at::Tensor& lengths2,
67
+ int K,
68
+ float radius) {
69
+ if (p1.is_cuda() || p2.is_cuda()) {
70
+ #ifdef WITH_CUDA
71
+ CHECK_CUDA(p1);
72
+ CHECK_CUDA(p2);
73
+ return BallQueryCuda(
74
+ p1.contiguous(),
75
+ p2.contiguous(),
76
+ lengths1.contiguous(),
77
+ lengths2.contiguous(),
78
+ K,
79
+ radius);
80
+ #else
81
+ AT_ERROR("Not compiled with GPU support.");
82
+ #endif
83
+ }
84
+ CHECK_CPU(p1);
85
+ CHECK_CPU(p2);
86
+ return BallQueryCpu(
87
+ p1.contiguous(),
88
+ p2.contiguous(),
89
+ lengths1.contiguous(),
90
+ lengths2.contiguous(),
91
+ K,
92
+ radius);
93
+ }
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query_cpu.cpp ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #include <torch/extension.h>
10
+ #include <tuple>
11
+
12
+ std::tuple<at::Tensor, at::Tensor> BallQueryCpu(
13
+ const at::Tensor& p1,
14
+ const at::Tensor& p2,
15
+ const at::Tensor& lengths1,
16
+ const at::Tensor& lengths2,
17
+ int K,
18
+ float radius) {
19
+ const int N = p1.size(0);
20
+ const int P1 = p1.size(1);
21
+ const int D = p1.size(2);
22
+
23
+ auto long_opts = lengths1.options().dtype(torch::kInt64);
24
+ torch::Tensor idxs = torch::full({N, P1, K}, -1, long_opts);
25
+ torch::Tensor dists = torch::full({N, P1, K}, 0, p1.options());
26
+ const float radius2 = radius * radius;
27
+
28
+ auto p1_a = p1.accessor<float, 3>();
29
+ auto p2_a = p2.accessor<float, 3>();
30
+ auto lengths1_a = lengths1.accessor<int64_t, 1>();
31
+ auto lengths2_a = lengths2.accessor<int64_t, 1>();
32
+ auto idxs_a = idxs.accessor<int64_t, 3>();
33
+ auto dists_a = dists.accessor<float, 3>();
34
+
35
+ for (int n = 0; n < N; ++n) {
36
+ const int64_t length1 = lengths1_a[n];
37
+ const int64_t length2 = lengths2_a[n];
38
+ for (int64_t i = 0; i < length1; ++i) {
39
+ for (int64_t j = 0, count = 0; j < length2 && count < K; ++j) {
40
+ float dist2 = 0;
41
+ for (int d = 0; d < D; ++d) {
42
+ float diff = p1_a[n][i][d] - p2_a[n][j][d];
43
+ dist2 += diff * diff;
44
+ }
45
+ if (dist2 < radius2) {
46
+ dists_a[n][i][count] = dist2;
47
+ idxs_a[n][i][count] = j;
48
+ ++count;
49
+ }
50
+ }
51
+ }
52
+ }
53
+ return std::make_tuple(idxs, dists);
54
+ }
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite.cu ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #include <ATen/ATen.h>
10
+ #include <ATen/core/TensorAccessor.h>
11
+ #include <ATen/cuda/CUDAContext.h>
12
+ #include <c10/cuda/CUDAGuard.h>
13
+
14
+ #include <cuda.h>
15
+ #include <cuda_runtime.h>
16
+
17
+ #include <stdio.h>
18
+ #include <vector>
19
+
20
+ __constant__ const float kEpsilon = 1e-9;
21
+
22
+ // TODO(gkioxari) support all data types once AtomicAdd supports doubles.
23
+ // Currently, support is for floats only.
24
+ __global__ void alphaCompositeCudaForwardKernel(
25
+ // clang-format off
26
+ at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> result,
27
+ const at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> features,
28
+ const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
29
+ const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
30
+ // clang-format on
31
+ const int64_t C = features.size(0);
32
+ const int64_t H = points_idx.size(2);
33
+ const int64_t W = points_idx.size(3);
34
+
35
+ // Get the batch and index
36
+ const auto batch = blockIdx.x;
37
+
38
+ const int num_pixels = C * H * W;
39
+ const auto num_threads = gridDim.y * blockDim.x;
40
+ const auto tid = blockIdx.y * blockDim.x + threadIdx.x;
41
+
42
+ // Iterate over each feature in each pixel
43
+ for (int pid = tid; pid < num_pixels; pid += num_threads) {
44
+ int ch = pid / (H * W);
45
+ int j = (pid % (H * W)) / W;
46
+ int i = (pid % (H * W)) % W;
47
+
48
+ // alphacomposite the different values
49
+ float cum_alpha = 1.;
50
+ // Iterate through the closest K points for this pixel
51
+ for (int k = 0; k < points_idx.size(1); ++k) {
52
+ int n_idx = points_idx[batch][k][j][i];
53
+
54
+ // Sentinel value is -1 indicating no point overlaps the pixel
55
+ if (n_idx < 0) {
56
+ continue;
57
+ }
58
+
59
+ float alpha = alphas[batch][k][j][i];
60
+ // TODO(gkioxari) It might be more efficient to have threads write in a
61
+ // local variable, and move atomicAdd outside of the loop such that
62
+ // atomicAdd is executed once per thread.
63
+ atomicAdd(
64
+ &result[batch][ch][j][i], features[ch][n_idx] * cum_alpha * alpha);
65
+ cum_alpha = cum_alpha * (1 - alpha);
66
+ }
67
+ }
68
+ }
69
+
70
+ // TODO(gkioxari) support all data types once AtomicAdd supports doubles.
71
+ // Currently, support is for floats only.
72
+ __global__ void alphaCompositeCudaBackwardKernel(
73
+ // clang-format off
74
+ at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> grad_features,
75
+ at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> grad_alphas,
76
+ const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> grad_outputs,
77
+ const at::PackedTensorAccessor64<float, 2, at::RestrictPtrTraits> features,
78
+ const at::PackedTensorAccessor64<float, 4, at::RestrictPtrTraits> alphas,
79
+ const at::PackedTensorAccessor64<int64_t, 4, at::RestrictPtrTraits> points_idx) {
80
+ // clang-format on
81
+ const int64_t C = features.size(0);
82
+ const int64_t H = points_idx.size(2);
83
+ const int64_t W = points_idx.size(3);
84
+
85
+ // Get the batch and index
86
+ const auto batch = blockIdx.x;
87
+
88
+ const int num_pixels = C * H * W;
89
+ const auto num_threads = gridDim.y * blockDim.x;
90
+ const auto tid = blockIdx.y * blockDim.x + threadIdx.x;
91
+
92
+ // Parallelize over each feature in each pixel in images of size H * W,
93
+ // for each image in the batch of size batch_size
94
+ for (int pid = tid; pid < num_pixels; pid += num_threads) {
95
+ int ch = pid / (H * W);
96
+ int j = (pid % (H * W)) / W;
97
+ int i = (pid % (H * W)) % W;
98
+
99
+ // alphacomposite the different values
100
+ float cum_alpha = 1.;
101
+ // Iterate through the closest K points for this pixel
102
+ for (int k = 0; k < points_idx.size(1); ++k) {
103
+ int n_idx = points_idx[batch][k][j][i];
104
+
105
+ // Sentinel value is -1 indicating no point overlaps the pixel
106
+ if (n_idx < 0) {
107
+ continue;
108
+ }
109
+ float alpha = alphas[batch][k][j][i];
110
+
111
+ // TODO(gkioxari) It might be more efficient to have threads write in a
112
+ // local variable, and move atomicAdd outside of the loop such that
113
+ // atomicAdd is executed once per thread.
114
+ atomicAdd(
115
+ &grad_alphas[batch][k][j][i],
116
+ cum_alpha * features[ch][n_idx] * grad_outputs[batch][ch][j][i]);
117
+ atomicAdd(
118
+ &grad_features[ch][n_idx],
119
+ cum_alpha * alpha * grad_outputs[batch][ch][j][i]);
120
+
121
+ // Iterate over all (K-1) nearest points to update gradient
122
+ for (int t = 0; t < k; ++t) {
123
+ int t_idx = points_idx[batch][t][j][i];
124
+ // Sentinel value is -1, indicating no point overlaps this pixel
125
+ if (t_idx < 0) {
126
+ continue;
127
+ }
128
+ float alpha_tvalue = alphas[batch][t][j][i];
129
+ // TODO(gkioxari) It might be more efficient to have threads write in a
130
+ // local variable, and move atomicAdd outside of the loop such that
131
+ // atomicAdd is executed once per thread.
132
+ atomicAdd(
133
+ &grad_alphas[batch][t][j][i],
134
+ -grad_outputs[batch][ch][j][i] * features[ch][n_idx] * cum_alpha *
135
+ alpha / (1 - alpha_tvalue + kEpsilon));
136
+ }
137
+
138
+ cum_alpha = cum_alpha * (1 - alphas[batch][k][j][i]);
139
+ }
140
+ }
141
+ }
142
+
143
+ at::Tensor alphaCompositeCudaForward(
144
+ const at::Tensor& features,
145
+ const at::Tensor& alphas,
146
+ const at::Tensor& points_idx) {
147
+ // Check inputs are on the same device
148
+ at::TensorArg features_t{features, "features", 1},
149
+ alphas_t{alphas, "alphas", 2}, points_idx_t{points_idx, "points_idx", 3};
150
+ at::CheckedFrom c = "alphaCompositeCudaForward";
151
+ at::checkAllSameGPU(c, {features_t, alphas_t, points_idx_t});
152
+ at::checkAllSameType(c, {features_t, alphas_t});
153
+
154
+ // Set the device for the kernel launch based on the device of the input
155
+ at::cuda::CUDAGuard device_guard(features.device());
156
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
157
+
158
+ const int64_t batch_size = points_idx.size(0);
159
+ const int64_t C = features.size(0);
160
+ const int64_t H = points_idx.size(2);
161
+ const int64_t W = points_idx.size(3);
162
+
163
+ auto result = at::zeros({batch_size, C, H, W}, features.options());
164
+
165
+ if (result.numel() == 0) {
166
+ AT_CUDA_CHECK(cudaGetLastError());
167
+ return result;
168
+ }
169
+
170
+ const dim3 threadsPerBlock(64);
171
+ const dim3 numBlocks(batch_size, 1024 / batch_size + 1);
172
+
173
+ // TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
174
+ // doubles. Currently, support is for floats only.
175
+ alphaCompositeCudaForwardKernel<<<numBlocks, threadsPerBlock, 0, stream>>>(
176
+ // clang-format off
177
+ // As we are using packed accessors here the tensors
178
+ // do not need to be made contiguous.
179
+ result.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
180
+ features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
181
+ alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
182
+ points_idx.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>());
183
+ // clang-format on
184
+ AT_CUDA_CHECK(cudaGetLastError());
185
+ return result;
186
+ }
187
+
188
+ std::tuple<at::Tensor, at::Tensor> alphaCompositeCudaBackward(
189
+ const at::Tensor& grad_outputs,
190
+ const at::Tensor& features,
191
+ const at::Tensor& alphas,
192
+ const at::Tensor& points_idx) {
193
+ // Check inputs are on the same device
194
+ at::TensorArg grad_outputs_t{grad_outputs, "grad_outputs", 1},
195
+ features_t{features, "features", 2}, alphas_t{alphas, "alphas", 3},
196
+ points_idx_t{points_idx, "points_idx", 4};
197
+ at::CheckedFrom c = "alphaCompositeCudaBackward";
198
+ at::checkAllSameGPU(c, {grad_outputs_t, features_t, alphas_t, points_idx_t});
199
+ at::checkAllSameType(c, {grad_outputs_t, features_t, alphas_t});
200
+
201
+ // Set the device for the kernel launch based on the device of the input
202
+ at::cuda::CUDAGuard device_guard(features.device());
203
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
204
+
205
+ auto grad_features = at::zeros_like(features);
206
+ auto grad_alphas = at::zeros_like(alphas);
207
+
208
+ if (grad_features.numel() == 0 || grad_alphas.numel() == 0) {
209
+ AT_CUDA_CHECK(cudaGetLastError());
210
+ return std::make_tuple(grad_features, grad_alphas);
211
+ }
212
+
213
+ const int64_t bs = alphas.size(0);
214
+
215
+ const dim3 threadsPerBlock(64);
216
+ const dim3 numBlocks(bs, 1024 / bs + 1);
217
+
218
+ // TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports
219
+ // doubles. Currently, support is for floats only.
220
+ alphaCompositeCudaBackwardKernel<<<numBlocks, threadsPerBlock, 0, stream>>>(
221
+ // clang-format off
222
+ // As we are using packed accessors here the tensors
223
+ // do not need to be made contiguous.
224
+ grad_features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
225
+ grad_alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
226
+ grad_outputs.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
227
+ features.packed_accessor64<float, 2, at::RestrictPtrTraits>(),
228
+ alphas.packed_accessor64<float, 4, at::RestrictPtrTraits>(),
229
+ points_idx.packed_accessor64<int64_t, 4, at::RestrictPtrTraits>());
230
+ // clang-format on
231
+ AT_CUDA_CHECK(cudaGetLastError());
232
+ return std::make_tuple(grad_features, grad_alphas);
233
+ }
project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite.h ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright (c) Meta Platforms, Inc. and affiliates.
3
+ * All rights reserved.
4
+ *
5
+ * This source code is licensed under the BSD-style license found in the
6
+ * LICENSE file in the root directory of this source tree.
7
+ */
8
+
9
+ #include <torch/extension.h>
10
+ #include "utils/pytorch3d_cutils.h"
11
+
12
+ #include <vector>
13
+
14
+ // Perform alpha compositing of points in a z-buffer.
15
+ //
16
+ // Inputs:
17
+ // features: FloatTensor of shape (C, P) which gives the features
18
+ // of each point where C is the size of the feature and
19
+ // P the number of points.
20
+ // alphas: FloatTensor of shape (N, points_per_pixel, H, W) where
21
+ // points_per_pixel is the number of points in the z-buffer
22
+ // sorted in z-order, and (H, W) is the image size.
23
+ // points_idx: IntTensor of shape (N, points_per_pixel, H, W) giving the
24
+ // indices of the nearest points at each pixel, sorted in z-order.
25
+ // Returns:
26
+ // weighted_fs: FloatTensor of shape (N, C, H, W) giving the accumulated
27
+ // feature for each point. Concretely, it gives:
28
+ // weighted_fs[b,c,i,j] = sum_k cum_alpha_k *
29
+ // features[c,points_idx[b,k,i,j]]
30
+ // where cum_alpha_k =
31
+ // alphas[b,k,i,j] * prod_l=0..k-1 (1 - alphas[b,l,i,j])
32
+
33
+ // CUDA declarations
34
+ #ifdef WITH_CUDA
35
+ torch::Tensor alphaCompositeCudaForward(
36
+ const torch::Tensor& features,
37
+ const torch::Tensor& alphas,
38
+ const torch::Tensor& points_idx);
39
+
40
+ std::tuple<torch::Tensor, torch::Tensor> alphaCompositeCudaBackward(
41
+ const torch::Tensor& grad_outputs,
42
+ const torch::Tensor& features,
43
+ const torch::Tensor& alphas,
44
+ const torch::Tensor& points_idx);
45
+ #endif
46
+
47
+ // C++ declarations
48
+ torch::Tensor alphaCompositeCpuForward(
49
+ const torch::Tensor& features,
50
+ const torch::Tensor& alphas,
51
+ const torch::Tensor& points_idx);
52
+
53
+ std::tuple<torch::Tensor, torch::Tensor> alphaCompositeCpuBackward(
54
+ const torch::Tensor& grad_outputs,
55
+ const torch::Tensor& features,
56
+ const torch::Tensor& alphas,
57
+ const torch::Tensor& points_idx);
58
+
59
+ torch::Tensor alphaCompositeForward(
60
+ torch::Tensor& features,
61
+ torch::Tensor& alphas,
62
+ torch::Tensor& points_idx) {
63
+ features = features.contiguous();
64
+ alphas = alphas.contiguous();
65
+ points_idx = points_idx.contiguous();
66
+
67
+ if (features.is_cuda()) {
68
+ #ifdef WITH_CUDA
69
+ CHECK_CUDA(features);
70
+ CHECK_CUDA(alphas);
71
+ CHECK_CUDA(points_idx);
72
+ return alphaCompositeCudaForward(features, alphas, points_idx);
73
+ #else
74
+ AT_ERROR("Not compiled with GPU support");
75
+ #endif
76
+ } else {
77
+ CHECK_CPU(features);
78
+ CHECK_CPU(alphas);
79
+ CHECK_CPU(points_idx);
80
+ return alphaCompositeCpuForward(features, alphas, points_idx);
81
+ }
82
+ }
83
+
84
+ std::tuple<torch::Tensor, torch::Tensor> alphaCompositeBackward(
85
+ torch::Tensor& grad_outputs,
86
+ torch::Tensor& features,
87
+ torch::Tensor& alphas,
88
+ torch::Tensor& points_idx) {
89
+ grad_outputs = grad_outputs.contiguous();
90
+ features = features.contiguous();
91
+ alphas = alphas.contiguous();
92
+ points_idx = points_idx.contiguous();
93
+
94
+ if (grad_outputs.is_cuda()) {
95
+ #ifdef WITH_CUDA
96
+ CHECK_CUDA(grad_outputs);
97
+ CHECK_CUDA(features);
98
+ CHECK_CUDA(alphas);
99
+ CHECK_CUDA(points_idx);
100
+
101
+ return alphaCompositeCudaBackward(
102
+ grad_outputs, features, alphas, points_idx);
103
+ #else
104
+ AT_ERROR("Not compiled with GPU support");
105
+ #endif
106
+ } else {
107
+ CHECK_CPU(grad_outputs);
108
+ CHECK_CPU(features);
109
+ CHECK_CPU(alphas);
110
+ CHECK_CPU(points_idx);
111
+
112
+ return alphaCompositeCpuBackward(
113
+ grad_outputs, features, alphas, points_idx);
114
+ }
115
+ }