diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5cc0cccbad843c750a4653c3b72fbe90a57547e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_base.yaml @@ -0,0 +1,79 @@ +defaults: +- default_config +- _self_ +exp_dir: ./data/exps/overfit_base/ +training_loop_ImplicitronTrainingLoop_args: + visdom_port: 8097 + visualize_interval: 0 + max_epochs: 1000 +data_source_ImplicitronDataSource_args: + data_loader_map_provider_class_type: SequenceDataLoaderMapProvider + dataset_map_provider_class_type: JsonIndexDatasetMapProvider + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + dataset_length_train: 1000 + dataset_length_val: 1 + num_workers: 8 + dataset_map_provider_JsonIndexDatasetMapProvider_args: + dataset_root: ${oc.env:CO3D_DATASET_ROOT} + n_frames_per_sequence: -1 + test_on_train: true + test_restrict_sequence_id: 0 + dataset_JsonIndexDataset_args: + load_point_clouds: false + mask_depths: false + mask_images: false +model_factory_ImplicitronModelFactory_args: + model_class_type: "OverfitModel" + model_OverfitModel_args: + loss_weights: + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 1.0 + loss_autodecoder_norm: 0.01 + loss_rgb_mse: 1.0 + loss_prev_stage_rgb_mse: 1.0 + output_rasterized_mc: false + chunk_size_grid: 102400 + render_image_height: 400 + render_image_width: 400 + share_implicit_function_across_passes: false + implicit_function_class_type: "NeuralRadianceFieldImplicitFunction" + implicit_function_NeuralRadianceFieldImplicitFunction_args: + n_harmonic_functions_xyz: 10 + n_harmonic_functions_dir: 4 + n_hidden_neurons_xyz: 256 + n_hidden_neurons_dir: 128 + n_layers_xyz: 8 + append_xyz: + - 5 + coarse_implicit_function_class_type: "NeuralRadianceFieldImplicitFunction" + coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args: + n_harmonic_functions_xyz: 10 + n_harmonic_functions_dir: 4 + n_hidden_neurons_xyz: 256 + n_hidden_neurons_dir: 128 + n_layers_xyz: 8 + append_xyz: + - 5 + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 1024 + scene_extent: 8.0 + n_pts_per_ray_training: 64 + n_pts_per_ray_evaluation: 64 + stratified_point_sampling_training: true + stratified_point_sampling_evaluation: false + renderer_MultiPassEmissionAbsorptionRenderer_args: + n_pts_per_ray_fine_training: 64 + n_pts_per_ray_fine_evaluation: 64 + append_coarse_samples_to_fine: true + density_noise_std_train: 1.0 +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + weight_decay: 0.0 + lr_policy: MultiStepLR + multistep_lr_milestones: [] + lr: 0.0005 + gamma: 0.1 + momentum: 0.9 + betas: + - 0.9 + - 0.999 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0349fd27a1ab25d7155f1d05c6258545acd6a5f7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_base.yaml @@ -0,0 +1,42 @@ +defaults: +- overfit_base +- _self_ +data_source_ImplicitronDataSource_args: + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + batch_size: 1 + dataset_length_train: 1000 + dataset_length_val: 1 + num_workers: 8 + dataset_map_provider_JsonIndexDatasetMapProvider_args: + assert_single_seq: true + n_frames_per_sequence: -1 + test_restrict_sequence_id: 0 + test_on_train: false +model_factory_ImplicitronModelFactory_args: + model_class_type: "OverfitModel" + model_OverfitModel_args: + render_image_height: 800 + render_image_width: 800 + log_vars: + - loss_rgb_psnr_fg + - loss_rgb_psnr + - loss_eikonal + - loss_prev_stage_rgb_psnr + - loss_mask_bce + - loss_prev_stage_mask_bce + - loss_rgb_mse + - loss_prev_stage_rgb_mse + - loss_depth_abs + - loss_depth_abs_fg + - loss_kl + - loss_mask_neg_iou + - objective + - epoch + - sec/it +optimizer_factory_ImplicitronOptimizerFactory_args: + lr: 0.0005 + multistep_lr_milestones: + - 200 + - 300 +training_loop_ImplicitronTrainingLoop_args: + max_epochs: 400 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_nerf_blender.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_nerf_blender.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c61d759f382beb27da12d8e9655599f367161fd9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_nerf_blender.yaml @@ -0,0 +1,56 @@ +defaults: +- overfit_singleseq_base +- _self_ +exp_dir: "./data/overfit_nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}" +data_source_ImplicitronDataSource_args: + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + dataset_length_train: 100 + dataset_map_provider_class_type: BlenderDatasetMapProvider + dataset_map_provider_BlenderDatasetMapProvider_args: + base_dir: ${oc.env:BLENDER_DATASET_ROOT}/${oc.env:BLENDER_SINGLESEQ_CLASS} + n_known_frames_for_test: null + object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS} + path_manager_factory_class_type: PathManagerFactory + path_manager_factory_PathManagerFactory_args: + silence_logs: true + +model_factory_ImplicitronModelFactory_args: + model_class_type: "OverfitModel" + model_OverfitModel_args: + mask_images: false + raysampler_class_type: AdaptiveRaySampler + raysampler_AdaptiveRaySampler_args: + n_pts_per_ray_training: 64 + n_pts_per_ray_evaluation: 64 + n_rays_per_image_sampled_from_mask: 4096 + stratified_point_sampling_training: true + stratified_point_sampling_evaluation: false + scene_extent: 2.0 + scene_center: + - 0.0 + - 0.0 + - 0.0 + renderer_MultiPassEmissionAbsorptionRenderer_args: + density_noise_std_train: 0.0 + n_pts_per_ray_fine_training: 128 + n_pts_per_ray_fine_evaluation: 128 + raymarcher_EmissionAbsorptionRaymarcher_args: + blend_output: false + loss_weights: + loss_rgb_mse: 1.0 + loss_prev_stage_rgb_mse: 1.0 + loss_mask_bce: 0.0 + loss_prev_stage_mask_bce: 0.0 + loss_autodecoder_norm: 0.00 + +optimizer_factory_ImplicitronOptimizerFactory_args: + exponential_lr_step_size: 3001 + lr_policy: LinearExponential + linear_exponential_lr_milestone: 200 + +training_loop_ImplicitronTrainingLoop_args: + max_epochs: 6000 + metric_print_interval: 10 + store_checkpoints_purge: 3 + test_when_finished: true + validation_interval: 100 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d6af2608fe23be8924a354e3cf5f20d690bdac9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_base.yaml @@ -0,0 +1,80 @@ +defaults: +- default_config +- _self_ +exp_dir: ./data/exps/base/ +training_loop_ImplicitronTrainingLoop_args: + visdom_port: 8097 + visualize_interval: 0 + max_epochs: 1000 +data_source_ImplicitronDataSource_args: + data_loader_map_provider_class_type: SequenceDataLoaderMapProvider + dataset_map_provider_class_type: JsonIndexDatasetMapProvider + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + dataset_length_train: 1000 + dataset_length_val: 1 + num_workers: 8 + dataset_map_provider_JsonIndexDatasetMapProvider_args: + dataset_root: ${oc.env:CO3D_DATASET_ROOT} + n_frames_per_sequence: -1 + test_on_train: true + test_restrict_sequence_id: 0 + dataset_JsonIndexDataset_args: + load_point_clouds: false + mask_depths: false + mask_images: false +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + loss_weights: + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 1.0 + loss_autodecoder_norm: 0.01 + loss_rgb_mse: 1.0 + loss_prev_stage_rgb_mse: 1.0 + output_rasterized_mc: false + chunk_size_grid: 102400 + render_image_height: 400 + render_image_width: 400 + num_passes: 2 + implicit_function_NeuralRadianceFieldImplicitFunction_args: + n_harmonic_functions_xyz: 10 + n_harmonic_functions_dir: 4 + n_hidden_neurons_xyz: 256 + n_hidden_neurons_dir: 128 + n_layers_xyz: 8 + append_xyz: + - 5 + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 1024 + scene_extent: 8.0 + n_pts_per_ray_training: 64 + n_pts_per_ray_evaluation: 64 + stratified_point_sampling_training: true + stratified_point_sampling_evaluation: false + renderer_MultiPassEmissionAbsorptionRenderer_args: + n_pts_per_ray_fine_training: 64 + n_pts_per_ray_fine_evaluation: 64 + append_coarse_samples_to_fine: true + density_noise_std_train: 1.0 + view_pooler_args: + view_sampler_args: + masked_sampling: false + image_feature_extractor_ResNetFeatureExtractor_args: + stages: + - 1 + - 2 + - 3 + - 4 + proj_dim: 16 + image_rescale: 0.32 + first_max_pool: false +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + weight_decay: 0.0 + lr_policy: MultiStepLR + multistep_lr_milestones: [] + lr: 0.0005 + gamma: 0.1 + momentum: 0.9 + betas: + - 0.9 + - 0.999 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..578fe1a2ccfef253ed268fc84eaf202a1c88c91c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_base.yaml @@ -0,0 +1,38 @@ +defaults: +- repro_base.yaml +- _self_ +data_source_ImplicitronDataSource_args: + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + batch_size: 10 + dataset_length_train: 1000 + dataset_length_val: 1 + num_workers: 8 + train_conditioning_type: SAME + val_conditioning_type: SAME + test_conditioning_type: SAME + images_per_seq_options: + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + dataset_map_provider_JsonIndexDatasetMapProvider_args: + assert_single_seq: false + task_str: multisequence + n_frames_per_sequence: -1 + test_on_train: true + test_restrict_sequence_id: 0 +optimizer_factory_ImplicitronOptimizerFactory_args: + multistep_lr_milestones: + - 1000 +training_loop_ImplicitronTrainingLoop_args: + max_epochs: 3000 + evaluator_ImplicitronEvaluator_args: + camera_difficulty_bin_breaks: + - 0.666667 + - 0.833334 + is_multisequence: true diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_ad.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_ad.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aa4291d3503cd731255a364db19f82b6f707f729 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_ad.yaml @@ -0,0 +1,12 @@ +defaults: +- repro_multiseq_base.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + chunk_size_grid: 16000 + view_pooler_enabled: false + global_encoder_class_type: SequenceAutodecoder + global_encoder_SequenceAutodecoder_args: + autodecoder_args: + n_instances: 20000 + encoding_dim: 256 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9aa9f4c5fd0839bc4e3c6fc74f3db3190d559fb5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer.yaml @@ -0,0 +1,18 @@ +defaults: +- repro_multiseq_base.yaml +- repro_feat_extractor_transformer.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + chunk_size_grid: 16000 + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 800 + n_pts_per_ray_training: 32 + n_pts_per_ray_evaluation: 32 + renderer_MultiPassEmissionAbsorptionRenderer_args: + n_pts_per_ray_fine_training: 16 + n_pts_per_ray_fine_evaluation: 16 + implicit_function_class_type: NeRFormerImplicitFunction + view_pooler_enabled: true + view_pooler_args: + feature_aggregator_class_type: IdentityFeatureAggregator diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer_angle_w.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer_angle_w.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c9a30fe79dd25afded6cffb80c29610a45803c0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer_angle_w.yaml @@ -0,0 +1,7 @@ +defaults: +- repro_multiseq_nerformer.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + view_pooler_args: + feature_aggregator_class_type: AngleWeightedIdentityFeatureAggregator diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet_noharm.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet_noharm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f29cbbe82ede4f4610949849433a67f91aff07f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet_noharm.yaml @@ -0,0 +1,11 @@ +defaults: +- repro_multiseq_srn_ad_hypernet.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + num_passes: 1 + implicit_function_SRNHyperNetImplicitFunction_args: + pixel_generator_args: + n_harmonic_functions: 0 + hypernet_args: + n_harmonic_functions: 0 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4a72c32621d063276a2b765d34e1edd707c87eac --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce.yaml @@ -0,0 +1,31 @@ +defaults: +- repro_multiseq_base.yaml +- repro_feat_extractor_normed.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + chunk_size_grid: 32000 + num_passes: 1 + n_train_target_views: -1 + loss_weights: + loss_rgb_mse: 200.0 + loss_prev_stage_rgb_mse: 0.0 + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 0.0 + loss_autodecoder_norm: 0.0 + depth_neg_penalty: 10000.0 + raysampler_class_type: NearFarRaySampler + raysampler_NearFarRaySampler_args: + n_rays_per_image_sampled_from_mask: 2048 + min_depth: 0.05 + max_depth: 0.05 + n_pts_per_ray_training: 1 + n_pts_per_ray_evaluation: 1 + stratified_point_sampling_training: false + stratified_point_sampling_evaluation: false + renderer_class_type: LSTMRenderer + implicit_function_class_type: SRNImplicitFunction + view_pooler_enabled: true +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + lr: 5.0e-05 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerf_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerf_wce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f3ac0553a9a05574626c1228873cd8ac370ec5a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerf_wce.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_multiseq_nerf_wce.yaml +- repro_multiseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerformer.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee7ef332310d444b377798faaf7b67e8575d5b0f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerformer.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_multiseq_nerformer.yaml +- repro_multiseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_ad_hypernet.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_ad_hypernet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bdb544f3217e329a8940b117ceb2f47cdc501692 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_ad_hypernet.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_multiseq_srn_ad_hypernet.yaml +- repro_multiseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_wce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b8ae36746035bc35c93867fc01399c61476e14a6 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_wce.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_multiseq_srn_wce.yaml +- repro_multiseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_co3dv2_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_co3dv2_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..54e1e2a42037013e0a55f8ad13ca11973d68d6b7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_co3dv2_base.yaml @@ -0,0 +1,8 @@ +data_source_ImplicitronDataSource_args: + dataset_map_provider_class_type: JsonIndexDatasetMapProviderV2 + dataset_map_provider_JsonIndexDatasetMapProviderV2_args: + category: teddybear + subset_name: manyview_dev_0 +training_loop_ImplicitronTrainingLoop_args: + evaluator_ImplicitronEvaluator_args: + is_multisequence: false diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_idr.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_idr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7224b9d5d9cecd791262a50dde5432cac0d7ed88 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_idr.yaml @@ -0,0 +1,57 @@ +defaults: +- repro_singleseq_base +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + loss_weights: + loss_mask_bce: 100.0 + loss_kl: 0.0 + loss_rgb_mse: 1.0 + loss_eikonal: 0.1 + chunk_size_grid: 65536 + num_passes: 1 + view_pooler_enabled: false + implicit_function_IdrFeatureField_args: + n_harmonic_functions_xyz: 6 + bias: 0.6 + d_in: 3 + d_out: 1 + dims: + - 512 + - 512 + - 512 + - 512 + - 512 + - 512 + - 512 + - 512 + geometric_init: true + pooled_feature_dim: 0 + skip_in: + - 6 + weight_norm: true + renderer_SignedDistanceFunctionRenderer_args: + ray_tracer_args: + line_search_step: 0.5 + line_step_iters: 3 + n_secant_steps: 8 + n_steps: 100 + sdf_threshold: 5.0e-05 + ray_normal_coloring_network_args: + d_in: 9 + d_out: 3 + dims: + - 512 + - 512 + - 512 + - 512 + mode: idr + n_harmonic_functions_dir: 4 + pooled_feature_dim: 0 + weight_norm: true + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 1024 + n_pts_per_ray_training: 0 + n_pts_per_ray_evaluation: 0 + renderer_class_type: SignedDistanceFunctionRenderer + implicit_function_class_type: IdrFeatureField diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fd85af5e7af23f5acd2abec6dae3255e7087cd7c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf.yaml @@ -0,0 +1,3 @@ +defaults: +- repro_singleseq_base +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f60f0b9480348a6660b90244600e7d59622470a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn.yaml @@ -0,0 +1,29 @@ +defaults: +- repro_singleseq_base.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + num_passes: 1 + chunk_size_grid: 32000 + view_pooler_enabled: false + loss_weights: + loss_rgb_mse: 200.0 + loss_prev_stage_rgb_mse: 0.0 + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 0.0 + loss_autodecoder_norm: 0.0 + depth_neg_penalty: 10000.0 + raysampler_class_type: NearFarRaySampler + raysampler_NearFarRaySampler_args: + n_rays_per_image_sampled_from_mask: 2048 + min_depth: 0.05 + max_depth: 0.05 + n_pts_per_ray_training: 1 + n_pts_per_ray_evaluation: 1 + stratified_point_sampling_training: false + stratified_point_sampling_evaluation: false + renderer_class_type: LSTMRenderer + implicit_function_class_type: SRNImplicitFunction +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + lr: 5.0e-05 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d190c28084f905a08d106976b45de7eb8560b3a0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce.yaml @@ -0,0 +1,30 @@ +defaults: +- repro_singleseq_wce_base +- repro_feat_extractor_normed.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + num_passes: 1 + chunk_size_grid: 32000 + view_pooler_enabled: true + loss_weights: + loss_rgb_mse: 200.0 + loss_prev_stage_rgb_mse: 0.0 + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 0.0 + loss_autodecoder_norm: 0.0 + depth_neg_penalty: 10000.0 + raysampler_class_type: NearFarRaySampler + raysampler_NearFarRaySampler_args: + n_rays_per_image_sampled_from_mask: 2048 + min_depth: 0.05 + max_depth: 0.05 + n_pts_per_ray_training: 1 + n_pts_per_ray_evaluation: 1 + stratified_point_sampling_training: false + stratified_point_sampling_evaluation: false + renderer_class_type: LSTMRenderer + implicit_function_class_type: SRNImplicitFunction +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + lr: 5.0e-05 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce_noharm.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce_noharm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3fc1254bd14e42266a1b8894d19bf081edced575 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce_noharm.yaml @@ -0,0 +1,11 @@ +defaults: +- repro_singleseq_srn_wce.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + num_passes: 1 + implicit_function_SRNImplicitFunction_args: + pixel_generator_args: + n_harmonic_functions: 0 + raymarch_function_args: + n_harmonic_functions: 0 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerf.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89999cde6b2869bb4ba773e6f09819bdc4554cd4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerf.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_singleseq_nerf.yaml +- repro_singleseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3a1d6eb22213cdcbad25088366dd18ab8c1cb86 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/__pycache__/__init__.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend.cu new file mode 100644 index 0000000000000000000000000000000000000000..76912c441b155e03e2470144835850cd567cb060 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend.cu @@ -0,0 +1,216 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include + +template +__global__ void SigmoidAlphaBlendForwardKernel( + // clang-format off + const at::PackedTensorAccessor64 distances, // (N, H, W, K) + const at::PackedTensorAccessor64 pix_to_face, // (N, H, W, K) + at::PackedTensorAccessor64 alphas, // (N, H, W) + // clang-format on + const scalar_t sigma, + const int N, + const int H, + const int W, + const int K) { + // Parallelize over each pixel in images of + // size H * W, for each image in the batch of size N. + const int num_threads = gridDim.x * blockDim.x; + const int tid = blockIdx.x * blockDim.x + threadIdx.x; + + // TODO: revisit performance of this kernel with shared memory usage + + for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { + // Convert linear index to 3D index + const int n = t_i / (H * W); // batch index. + const int pix_idx = t_i % (H * W); + + // TODO: fix index calculation for non square images. + const int yi = pix_idx / W; + const int xi = pix_idx % W; + scalar_t alpha = 1.0; + + // Loop over all the faces for this pixel. + for (int k = 0; k < K; k++) { + // Index into (N, H, W, K) tensors + const int f = pix_to_face[n][yi][xi][k]; + if (f < 0) { + // Sentinel value is -1 indicating no face overlaps the pixel. + continue; + } + // The distance is negative if a pixel is inside a face and positive + // outside the face. Therefore use -1.0 * the distance to get the + // correct sign. + scalar_t dist = -1.0 * distances[n][yi][xi][k]; + + // Calculate the sigmoid probability. + scalar_t prob = 1. / (1. + exp(-dist / sigma)); + + // The cumulative product ensures that alpha will be 0.0 if at least 1 + // face fully covers the pixel as for that face, prob will be 1.0. + // This results in a multiplication by 0.0 because of the (1.0 - prob) + // term. Therefore the final result of (1.0 - alpha) will be 1.0. + alpha *= (1.0 - prob); + } + alphas[n][yi][xi] = 1.0 - alpha; + } +} + +at::Tensor SigmoidAlphaBlendForwardCuda( + const at::Tensor& distances, // (N, H, W, K) + const at::Tensor& pix_to_face, // (N, H, W, K) + const float sigma) { + const int N = distances.size(0); + const int H = distances.size(1); + const int W = distances.size(2); + const int K = distances.size(3); + + at::Tensor alphas = at::zeros({N, H, W}, distances.options()); + const size_t blocks = 1024; + const size_t threads = 128; + + // Check inputs are on the same device + at::TensorArg distances_t{distances, "distances", 1}, + pix_to_face_t{pix_to_face, "pix_to_face", 2}; + at::CheckedFrom c = "SigmoidAlphaBlendForwardCuda"; + at::checkAllSameGPU(c, {distances_t, pix_to_face_t}); + + // Set the device for the kernel launch based on the device of distances + at::cuda::CUDAGuard device_guard(distances.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + if (distances.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return alphas; + } + + AT_DISPATCH_FLOATING_TYPES( + distances.scalar_type(), "sigmoid_alpha_blend_kernel", ([&] { + // clang-format off + SigmoidAlphaBlendForwardKernel<<>>( + distances.packed_accessor64(), + pix_to_face.packed_accessor64(), + alphas.packed_accessor64(), + sigma, + N, + H, + W, + K); + // clang-format on + })); + + AT_CUDA_CHECK(cudaGetLastError()); + return alphas; +} + +template +__global__ void SigmoidAlphaBlendBackwardKernel( + // clang-format off + const at::PackedTensorAccessor64 grad_alphas, // (N, H, W) + const at::PackedTensorAccessor64 alphas, // (N, H, W) + const at::PackedTensorAccessor64 distances, // (N, H, W, K) + const at::PackedTensorAccessor64 pix_to_face, // (N, H, W, K) + at::PackedTensorAccessor64 grad_distances, // (N, H, W) + // clang-format on + const scalar_t sigma, + const int N, + const int H, + const int W, + const int K) { + // Parallelize over each of the top K faces for each pixel in images of + // size H * W * K, for each image in the batch of size N. + + // Get block and thread index. + const int n = blockIdx.x; + const int num_pixels = H * W * K; + const int num_threads = gridDim.y * blockDim.x; + const int tid = blockIdx.y * blockDim.x + threadIdx.x; + + for (int t_i = tid; t_i < num_pixels; t_i += num_threads) { + // Convert linear index to 3D index. + int yi = t_i / (W * K); + int xi = (t_i % (W * K)) / K; + int k = (t_i % (W * K)) % K; + + const scalar_t alpha = 1.0 - alphas[n][yi][xi]; + const scalar_t grad_alpha = grad_alphas[n][yi][xi]; + const int f = pix_to_face[n][yi][xi][k]; + + // Sentinel value is -1 indicating no face overlaps the pixel. + if (f >= 0) { + // The distance is negative if a pixel is inside a face and positive + // outside the face. Therefore use -1.0 * the distance to get the + // correct sign. + scalar_t dist = -1.0 * distances[n][yi][xi][k]; + + // Calculate the sigmoid probability. + scalar_t prob = 1. / (1. + exp(-dist / sigma)); + + grad_distances[n][yi][xi][k] = grad_alpha * (-1.0 / sigma) * prob * alpha; + } + } +} + +at::Tensor SigmoidAlphaBlendBackwardCuda( + const at::Tensor& grad_alphas, // (N, H, W) + const at::Tensor& alphas, // (N, H, W) + const at::Tensor& distances, // (N, H, W, K) + const at::Tensor& pix_to_face, // (N, H, W, K) + float sigma) { + const int N = distances.size(0); + const int H = distances.size(1); + const int W = distances.size(2); + const int K = distances.size(3); + + at::Tensor grad_distances = at::zeros({N, H, W, K}, distances.options()); + + const dim3 threads(512); + const dim3 blocks(N, 1024 / N + 1); + + at::TensorArg grad_alphas_t{grad_alphas, "grad_alphas", 1}, + alphas_t{alphas, "alphas", 2}, distances_t{distances, "distances", 3}, + pix_to_face_t{pix_to_face, "pix_to_face", 4}; + at::CheckedFrom c = "SigmoidAlphaBlendBackwardCuda"; + at::checkAllSameGPU(c, {grad_alphas_t, alphas_t, distances_t, pix_to_face_t}); + + // Set the device for the kernel launch based on the device of distances + at::cuda::CUDAGuard device_guard(alphas.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + if (alphas.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_alphas; + } + + AT_DISPATCH_FLOATING_TYPES( + distances.scalar_type(), "sigmoid_alpha_blend_backward_kernel", ([&] { + SigmoidAlphaBlendBackwardKernel< + scalar_t><<>>( + // clang-format off + grad_alphas.packed_accessor64(), + alphas.packed_accessor64(), + distances.packed_accessor64(), + pix_to_face.packed_accessor64(), + grad_distances.packed_accessor64(), + // clang-format on + sigma, + N, + H, + W, + K); + })); + + AT_CUDA_CHECK(cudaGetLastError()); + return grad_distances; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend.h new file mode 100644 index 0000000000000000000000000000000000000000..d424c769c03c7df8b9bd32d6ac1d52b25befb2de --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include + +// clang-format off +// Function to blend the top K faces per pixel based on the 2d euclidean distance +// from the center of the pixel to the face. This method is adapted from [1]. +// The output can be used to set the alpha value in an RGBA image. +// Args: +// pix_to_face: LongTensor of shape (N, H, W, K), indices of faces overlapping +// with each pixel, where N is the batch size, H, W are the dimensions of the +// image and K is the number of faces rasterized per pixel. +// distances: FloatTensor of shape (N, H, W, K), 2d euclidean distance of each pixel +// relative to the faces in pix_to_face +// sigma: float, parameter which controls the width of the sigmoid for blending +// Returns: +// alphas: FloatTensor of shape (N, H, W), the blended values for each pixel +// in the image. +// +// [1] Shichen Liu et al, 'Soft Rasterizer: A Differentiable Renderer for +// Image-based 3D Reasoning' +// clang-format on +at::Tensor SigmoidAlphaBlendForwardCpu( + const at::Tensor& distances, + const at::Tensor& pix_to_face, + const float sigma); + +#ifdef WITH_CUDA +at::Tensor SigmoidAlphaBlendForwardCuda( + const at::Tensor& distances, + const at::Tensor& pix_to_face, + const float sigma); +#endif + +// clang-format off +// Args: +// grad_alphas: FloatTensor of shape (N, H, W), upstream gradients for alphas +// alphas: FloatTensor of shape (N, H, W), the alpha values from the forward pass +// pix_to_face: LongTensor of shape (N, H, W, K), indices of faces overlapping +// with each pixel, where N is the batch size, H, W are the dimensions of the +// image, and K is the number of faces rasterized per pixel +// distances: FloatTensor of shape (N, H, W, K), 2d euclidean distance of each pixel +// to the corresponding faces in pix_to_face +// sigma: float, parameter which controls the width of the sigmoid for blending +// Returns: +// grad_distances: FloatTensor of shape (N, H, W, K) +// clang-format on +at::Tensor SigmoidAlphaBlendBackwardCpu( + const at::Tensor& grad_alphas, + const at::Tensor& alphas, + const at::Tensor& distances, + const at::Tensor& pix_to_face, + const float sigma); + +#ifdef WITH_CUDA +at::Tensor SigmoidAlphaBlendBackwardCuda( + const at::Tensor& grad_alphas, + const at::Tensor& alphas, + const at::Tensor& distances, + const at::Tensor& pix_to_face, + const float sigma); +#endif + +// Implementation which is exposed. +at::Tensor +SigmoidAlphaBlend(at::Tensor& distances, at::Tensor& pix_to_face, float sigma) { + if (distances.is_cuda() && pix_to_face.is_cuda()) { +#ifdef WITH_CUDA + return SigmoidAlphaBlendForwardCuda(distances, pix_to_face, sigma); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + return SigmoidAlphaBlendForwardCpu(distances, pix_to_face, sigma); +} + +// Implementation which is exposed. +at::Tensor SigmoidAlphaBlendBackward( + const at::Tensor& grad_alphas, + const at::Tensor& alphas, + const at::Tensor& distances, + const at::Tensor& pix_to_face, + const float sigma) { + if (distances.is_cuda() && pix_to_face.is_cuda() && alphas.is_cuda() && + grad_alphas.is_cuda()) { +#ifdef WITH_CUDA + return SigmoidAlphaBlendBackwardCuda( + grad_alphas, alphas, distances, pix_to_face, sigma); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + return SigmoidAlphaBlendBackwardCpu( + grad_alphas, alphas, distances, pix_to_face, sigma); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8a19516726f320e206402f7e78a37661603be76b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/blending/sigmoid_alpha_blend_cpu.cpp @@ -0,0 +1,129 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +at::Tensor SigmoidAlphaBlendForwardCpu( + const at::Tensor& distances, // (N, H, W, K) + const at::Tensor& pix_to_face, // (N, H, W, K) + const float sigma) { + const int N = distances.size(0); + const int H = distances.size(1); + const int W = distances.size(2); + const int K = distances.size(3); + + torch::Tensor out = torch::empty({N, H, W}, distances.options()); + + auto distances_a = distances.accessor(); + auto pix_to_face_a = pix_to_face.accessor(); + auto out_a = out.accessor(); + + // Iterate over the images in the batch. + for (int n = 0; n < N; ++n) { + // Iterate through the horizontal lines of the image from top to bottom. + for (int h = 0; h < H; ++h) { + // Iterate over the pixels on this horizontal line, left to right. + for (int w = 0; w < W; ++w) { + float alpha = 1.0; + + // Loop through the top K faces for each pixel. + for (int k = 0; k < K; ++k) { + const int f = pix_to_face_a[n][h][w][k]; + if (f < 0) { + // Sentinel value is -1 indicating no face overlaps the pixel. + continue; + } + // The distance is negative if a pixel is inside a face and positive + // outside the face. Therefore use -1.0 * the distance to get the + // correct sign. + float dist = -1.0 * distances_a[n][h][w][k]; + + // Calculate the sigmoid probability. + float prob = 1. / (1. + exp(-dist / sigma)); + + // The product ensures that alpha will be 0.0 if at least 1 + // face fully covers the pixel as for that face, prob will be 1.0. + // This results in a multiplication by 0.0 because of the (1.0 - prob) + // term. Therefore 1.0 - alpha will be 1.0. + alpha *= 1.0 - prob; + } + out_a[n][h][w] = 1.0 - alpha; + } + } + } + return out; +} + +at::Tensor SigmoidAlphaBlendBackwardCpu( + const at::Tensor& grad_alphas, // (N, H, W) + const at::Tensor& alphas, // (N, H, W) + const at::Tensor& distances, // (N, H, W, K) + const at::Tensor& pix_to_face, // (N, H, W, K) + const float sigma) { + const int N = distances.size(0); + const int H = distances.size(1); + const int W = distances.size(2); + const int K = distances.size(3); + + auto distances_a = distances.accessor(); + auto pix_to_face_a = pix_to_face.accessor(); + auto alphas_a = alphas.accessor(); + auto grad_alphas_a = grad_alphas.accessor(); + + torch::Tensor grad_distances = + torch::zeros({N, H, W, K}, distances.options()); + auto grad_distances_a = grad_distances.accessor(); + + // Iterate over the images in the batch. + for (int n = 0; n < N; ++n) { + // Iterate through the horizontal lines of the image from top to bottom. + for (int h = 0; h < H; ++h) { + // Iterate over the pixels on this horizontal line, left to right. + for (int w = 0; w < W; ++w) { + // Get the alpha value from the forward pass and the + // upstream gradient. + const float alpha = 1.0 - alphas_a[n][h][w]; + const float grad_alpha = grad_alphas_a[n][h][w]; + + // Loop through the top K faces for each pixel. + for (int k = 0; k < K; ++k) { + const int f = pix_to_face_a[n][h][w][k]; + if (f < 0) { + // Sentinel value is -1 indicating no face overlaps the pixel + continue; + } + // The distance is negative if a pixel is inside a face and positive + // outside the face. Therefore use -1.0 * distance to get the + // correct sign. + float dist = -1.0 * distances_a[n][h][w][k]; + + // Calculate the sigmoid probability. + float prob = 1. / (1. + exp(-dist / sigma)); + + // clang-format off + // We need to take the derivative of alpha w.r.t to the distance. + // alpha = 1.0 - (1.0- sigmoid(-x)) * (1.0 - sigmoid(-x2)) * ... * (1.0 - sigmoid(-xn)) + // + // Note that d/dx sigmoid(x) = sigmoid(x) * (1.0 - sigmoid(x)) + // + // This gives: + // d_alpha/d_dist = -1.0 * -1.0 * sigmoid(-x)(1. - sigmoid(-x)) * (-1.0/sigma) + // * ((1.0 - sigmoid(-x2) * ... * (1.0 - sigmoid(-xn)) + // = (-1.0/sigma) * prob * (1.0 - prob) * alpha/(1.0 - prob) + // = (-1.0/sigma) * prob * alpha + // clang-format on + grad_distances_a[n][h][w][k] = + grad_alpha * (-1.0 / sigma) * prob * alpha; + } + } + } + } + return grad_distances; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/ext.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/ext.cpp new file mode 100644 index 0000000000000000000000000000000000000000..49ec02c684edcace9118cbb8db09b74ed98e19dc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/ext.cpp @@ -0,0 +1,193 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +// clang-format off +#if !defined(USE_ROCM) +#include "./pulsar/global.h" // Include before . +#endif +#include +// clang-format on +#if !defined(USE_ROCM) +#include "./pulsar/pytorch/renderer.h" +#include "./pulsar/pytorch/tensor_util.h" +#endif +#include "ball_query/ball_query.h" +#include "blending/sigmoid_alpha_blend.h" +#include "compositing/alpha_composite.h" +#include "compositing/norm_weighted_sum.h" +#include "compositing/weighted_sum.h" +#include "face_areas_normals/face_areas_normals.h" +#include "gather_scatter/gather_scatter.h" +#include "interp_face_attrs/interp_face_attrs.h" +#include "iou_box3d/iou_box3d.h" +#include "knn/knn.h" +#include "marching_cubes/marching_cubes.h" +#include "mesh_normal_consistency/mesh_normal_consistency.h" +#include "packed_to_padded_tensor/packed_to_padded_tensor.h" +#include "point_mesh/point_mesh_cuda.h" +#include "points_to_volumes/points_to_volumes.h" +#include "rasterize_meshes/rasterize_meshes.h" +#include "rasterize_points/rasterize_points.h" +#include "sample_farthest_points/sample_farthest_points.h" +#include "sample_pdf/sample_pdf.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("face_areas_normals_forward", &FaceAreasNormalsForward); + m.def("face_areas_normals_backward", &FaceAreasNormalsBackward); + m.def("packed_to_padded", &PackedToPadded); + m.def("padded_to_packed", &PaddedToPacked); + m.def("interp_face_attrs_forward", &InterpFaceAttrsForward); + m.def("interp_face_attrs_backward", &InterpFaceAttrsBackward); +#ifdef WITH_CUDA + m.def("knn_check_version", &KnnCheckVersion); +#endif + m.def("knn_points_idx", &KNearestNeighborIdx); + m.def("knn_points_backward", &KNearestNeighborBackward); + m.def("ball_query", &BallQuery); + m.def("sample_farthest_points", &FarthestPointSampling); + m.def( + "mesh_normal_consistency_find_verts", &MeshNormalConsistencyFindVertices); + m.def("gather_scatter", &GatherScatter); + m.def("points_to_volumes_forward", PointsToVolumesForward); + m.def("points_to_volumes_backward", PointsToVolumesBackward); + m.def("rasterize_points", &RasterizePoints); + m.def("rasterize_points_backward", &RasterizePointsBackward); + m.def("rasterize_meshes_backward", &RasterizeMeshesBackward); + m.def("rasterize_meshes", &RasterizeMeshes); + m.def("sigmoid_alpha_blend", &SigmoidAlphaBlend); + m.def("sigmoid_alpha_blend_backward", &SigmoidAlphaBlendBackward); + + // Accumulation functions + m.def("accum_weightedsumnorm", &weightedSumNormForward); + m.def("accum_weightedsum", &weightedSumForward); + m.def("accum_alphacomposite", &alphaCompositeForward); + m.def("accum_weightedsumnorm_backward", &weightedSumNormBackward); + m.def("accum_weightedsum_backward", &weightedSumBackward); + m.def("accum_alphacomposite_backward", &alphaCompositeBackward); + + // These are only visible for testing; users should not call them directly + m.def("_rasterize_points_coarse", &RasterizePointsCoarse); + m.def("_rasterize_points_naive", &RasterizePointsNaive); + m.def("_rasterize_meshes_naive", &RasterizeMeshesNaive); + m.def("_rasterize_meshes_coarse", &RasterizeMeshesCoarse); + m.def("_rasterize_meshes_fine", &RasterizeMeshesFine); + + // PointEdge distance functions + m.def("point_edge_dist_forward", &PointEdgeDistanceForward); + m.def("point_edge_dist_backward", &PointEdgeDistanceBackward); + m.def("edge_point_dist_forward", &EdgePointDistanceForward); + m.def("edge_point_dist_backward", &EdgePointDistanceBackward); + m.def("point_edge_array_dist_forward", &PointEdgeArrayDistanceForward); + m.def("point_edge_array_dist_backward", &PointEdgeArrayDistanceBackward); + + // PointFace distance functions + m.def("point_face_dist_forward", &PointFaceDistanceForward); + m.def("point_face_dist_backward", &PointFaceDistanceBackward); + m.def("face_point_dist_forward", &FacePointDistanceForward); + m.def("face_point_dist_backward", &FacePointDistanceBackward); + m.def("point_face_array_dist_forward", &PointFaceArrayDistanceForward); + m.def("point_face_array_dist_backward", &PointFaceArrayDistanceBackward); + + // Sample PDF + m.def("sample_pdf", &SamplePdf); + + // 3D IoU + m.def("iou_box3d", &IoUBox3D); + + // Marching cubes + m.def("marching_cubes", &MarchingCubes); + + // Pulsar. + // Pulsar not enabled on AMD. +#if !defined(USE_ROCM) +#ifdef PULSAR_LOGGING_ENABLED + c10::ShowLogInfoToStderr(); +#endif + py::class_< + pulsar::pytorch::Renderer, + std::shared_ptr>(m, "PulsarRenderer") + .def(py::init< + const uint&, + const uint&, + const uint&, + const bool&, + const bool&, + const float&, + const uint&, + const uint&>()) + .def( + "__eq__", + [](const pulsar::pytorch::Renderer& a, + const pulsar::pytorch::Renderer& b) { return a == b; }, + py::is_operator()) + .def( + "__ne__", + [](const pulsar::pytorch::Renderer& a, + const pulsar::pytorch::Renderer& b) { return !(a == b); }, + py::is_operator()) + .def( + "__repr__", + [](const pulsar::pytorch::Renderer& self) { + std::stringstream ss; + ss << self; + return ss.str(); + }) + .def( + "forward", + &pulsar::pytorch::Renderer::forward, + py::arg("vert_pos"), + py::arg("vert_col"), + py::arg("vert_radii"), + + py::arg("cam_pos"), + py::arg("pixel_0_0_center"), + py::arg("pixel_vec_x"), + py::arg("pixel_vec_y"), + py::arg("focal_length"), + py::arg("principal_point_offsets"), + + py::arg("gamma"), + py::arg("max_depth"), + py::arg("min_depth") /* = 0.f*/, + py::arg( + "bg_col") /* = at::nullopt not exposed properly in pytorch 1.1. */ + , + py::arg("opacity") /* = at::nullopt ... */, + py::arg("percent_allowed_difference") = 0.01f, + py::arg("max_n_hits") = MAX_UINT, + py::arg("mode") = 0) + .def("backward", &pulsar::pytorch::Renderer::backward) + .def_property( + "device_tracker", + [](const pulsar::pytorch::Renderer& self) { + return self.device_tracker; + }, + [](pulsar::pytorch::Renderer& self, const torch::Tensor& val) { + self.device_tracker = val; + }) + .def_property_readonly("width", &pulsar::pytorch::Renderer::width) + .def_property_readonly("height", &pulsar::pytorch::Renderer::height) + .def_property_readonly( + "max_num_balls", &pulsar::pytorch::Renderer::max_num_balls) + .def_property_readonly( + "orthogonal", &pulsar::pytorch::Renderer::orthogonal) + .def_property_readonly( + "right_handed", &pulsar::pytorch::Renderer::right_handed) + .def_property_readonly("n_track", &pulsar::pytorch::Renderer::n_track); + m.def( + "pulsar_sphere_ids_from_result_info_nograd", + &pulsar::pytorch::sphere_ids_from_result_info_nograd); + // Constants. + m.attr("EPS") = py::float_(EPS); + m.attr("MAX_FLOAT") = py::float_(MAX_FLOAT); + m.attr("MAX_INT") = py::int_(MAX_INT); + m.attr("MAX_UINT") = py::int_(MAX_UINT); + m.attr("MAX_USHORT") = py::int_(MAX_USHORT); + m.attr("PULSAR_MAX_GRAD_SPHERES") = py::int_(MAX_GRAD_SPHERES); +#endif +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals.cu new file mode 100644 index 0000000000000000000000000000000000000000..58aeb20fcfd2e5d51ab93054f176f9a2e4962ca4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals.cu @@ -0,0 +1,301 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +template +__global__ void FaceAreasNormalsForwardKernel( + const scalar_t* __restrict__ verts, + const int64_t* __restrict__ faces, + scalar_t* __restrict__ face_areas, + scalar_t* __restrict__ face_normals, + const size_t V, + const size_t F) { + const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; + const size_t stride = gridDim.x * blockDim.x; + + // Faces split evenly over the number of threads in the grid. + // Each thread computes the area & normal of its respective faces and adds it + // to the global face_areas tensor. + for (size_t f = tid; f < F; f += stride) { + const int64_t i0 = faces[3 * f + 0]; + const int64_t i1 = faces[3 * f + 1]; + const int64_t i2 = faces[3 * f + 2]; + + const scalar_t v0_x = verts[3 * i0 + 0]; + const scalar_t v0_y = verts[3 * i0 + 1]; + const scalar_t v0_z = verts[3 * i0 + 2]; + + const scalar_t v1_x = verts[3 * i1 + 0]; + const scalar_t v1_y = verts[3 * i1 + 1]; + const scalar_t v1_z = verts[3 * i1 + 2]; + + const scalar_t v2_x = verts[3 * i2 + 0]; + const scalar_t v2_y = verts[3 * i2 + 1]; + const scalar_t v2_z = verts[3 * i2 + 2]; + + const scalar_t ax = v1_x - v0_x; + const scalar_t ay = v1_y - v0_y; + const scalar_t az = v1_z - v0_z; + + const scalar_t bx = v2_x - v0_x; + const scalar_t by = v2_y - v0_y; + const scalar_t bz = v2_z - v0_z; + + const scalar_t cx = ay * bz - az * by; + const scalar_t cy = az * bx - ax * bz; + const scalar_t cz = ax * by - ay * bx; + + scalar_t norm = sqrt(cx * cx + cy * cy + cz * cz); + face_areas[f] = norm / 2.0; + norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6) + face_normals[3 * f + 0] = cx / norm; + face_normals[3 * f + 1] = cy / norm; + face_normals[3 * f + 2] = cz / norm; + } +} + +// TODO(gkioxari) support all data types once AtomicAdd supports doubles. +// Currently, support is for floats only. +__global__ void FaceAreasNormalsBackwardKernel( + const float* __restrict__ grad_areas, + const float* __restrict__ grad_normals, + const float* __restrict__ verts, + const int64_t* __restrict__ faces, + float* __restrict__ grad_verts, + const size_t V, + const size_t F) { + const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; + const size_t stride = gridDim.x * blockDim.x; + + // Faces split evenly over the number of threads in the grid. + // Each thread computes the area & normal of its respective faces and adds it + // to the global face_areas tensor. + for (size_t f = tid; f < F; f += stride) { + const int64_t i0 = faces[3 * f + 0]; + const int64_t i1 = faces[3 * f + 1]; + const int64_t i2 = faces[3 * f + 2]; + + const float v0_x = verts[3 * i0 + 0]; + const float v0_y = verts[3 * i0 + 1]; + const float v0_z = verts[3 * i0 + 2]; + + const float v1_x = verts[3 * i1 + 0]; + const float v1_y = verts[3 * i1 + 1]; + const float v1_z = verts[3 * i1 + 2]; + + const float v2_x = verts[3 * i2 + 0]; + const float v2_y = verts[3 * i2 + 1]; + const float v2_z = verts[3 * i2 + 2]; + + const float ax = v1_x - v0_x; + const float ay = v1_y - v0_y; + const float az = v1_z - v0_z; + + const float bx = v2_x - v0_x; + const float by = v2_y - v0_y; + const float bz = v2_z - v0_z; + + const float cx = ay * bz - az * by; + const float cy = az * bx - ax * bz; + const float cz = ax * by - ay * bx; + + float norm = sqrt(cx * cx + cy * cy + cz * cz); + norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6) + float inv_norm = 1. / norm; + float inv_norm_2 = pow(inv_norm, 2.0f); + float inv_norm_3 = pow(inv_norm, 3.0f); + + // We compute gradients with respect to the input vertices. + // For each vertex, gradients come from grad_areas and grad_normals. + // eg, grad_v0_x = (d / d v0_x) + // = \sum_f (d / d areas[f]) * (d areas[f] / d v0_x) + // + (d / d normals[f, 0]) * (d normals[f, 0] / d v0_x) + // + (d / d normals[f, 1]) * (d normals[f, 1] / d v0_x) + // + (d / d normals[f, 2]) * (d normals[f, 2] / d v0_x) + // with (d / d areas[f]) = grad_areas[f] and + // (d / d normals[f, j]) = grad_normals[f][j]. + // The equations below are derived after taking + // derivatives wrt to the vertices (fun times!). + + // grad v0 coming from grad areas and grad normals + const float grad_v0_x = + ((-az + bz) * cy + (-by + ay) * cz) / 2.0 * inv_norm * grad_areas[f] + + -cx * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_3 * + grad_normals[3 * f + 0] + + ((-az + bz) - cy * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_2) * + inv_norm * grad_normals[3 * f + 1] + + ((-by + ay) - cz * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_2) * + inv_norm * grad_normals[3 * f + 2]; + atomicAdd(grad_verts + 3 * i0 + 0, grad_v0_x); + + const float grad_v0_y = + ((-bz + az) * cx + (-ax + bx) * cz) / 2.0 * inv_norm * grad_areas[f] + + ((-bz + az) - cx * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_2) * + inv_norm * grad_normals[3 * f + 0] + + -cy * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_3 * + grad_normals[3 * f + 1] + + ((-ax + bx) - cz * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_2) * + inv_norm * grad_normals[3 * f + 2]; + atomicAdd(grad_verts + 3 * i0 + 1, grad_v0_y); + + const float grad_v0_z = + ((-ay + by) * cx + (-bx + ax) * cy) / 2.0 * inv_norm * grad_areas[f] + + ((-ay + by) - cx * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_2) * + inv_norm * grad_normals[3 * f + 0] + + ((-bx + ax) - cy * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_2) * + inv_norm * grad_normals[3 * f + 1] + + -cz * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_3 * + grad_normals[3 * f + 2]; + atomicAdd(grad_verts + 3 * i0 + 2, grad_v0_z); + + // grad v1 coming from grad areas and grad normals + const float grad_v1_x = + (by * cz - bz * cy) / 2.0 * inv_norm * grad_areas[f] + + -cx * (by * cz - bz * cy) * inv_norm_3 * grad_normals[3 * f + 0] + + (-bz - cy * (by * cz - bz * cy) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 1] + + (by - cz * (by * cz - bz * cy) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 2]; + atomicAdd(grad_verts + 3 * i1 + 0, grad_v1_x); + + const float grad_v1_y = + (bz * cx - bx * cz) / 2.0 * inv_norm * grad_areas[f] + + (bz - cx * (bz * cx - bx * cz) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 0] + + -cy * (bz * cx - bx * cz) * inv_norm_3 * grad_normals[3 * f + 1] + + (-bx - cz * (bz * cx - bx * cz) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 2]; + atomicAdd(grad_verts + 3 * i1 + 1, grad_v1_y); + + const float grad_v1_z = + (bx * cy - by * cx) / 2.0 * inv_norm * grad_areas[f] + + (-by - cx * (bx * cy - by * cx) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 0] + + (bx - cx * (bx * cy - by * cx) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 1] + + -cz * (bx * cy - by * cx) * inv_norm_3 * grad_normals[3 * f + 2]; + atomicAdd(grad_verts + 3 * i1 + 2, grad_v1_z); + + // grad v2 coming from grad areas + const float grad_v2_x = + (az * cy - ay * cz) / 2.0 * inv_norm * grad_areas[f] + + -cx * (az * cy - ay * cz) * inv_norm_3 * grad_normals[3 * f + 0] + + (az - cy * (az * cy - ay * cz) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 1] + + (-ay - cz * (az * cy - ay * cz) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 2]; + atomicAdd(grad_verts + 3 * i2 + 0, grad_v2_x); + + const float grad_v2_y = + (ax * cz - az * cx) / 2.0 * inv_norm * grad_areas[f] + + (-az - cx * (ax * cz - az * cx) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 0] + + -cy * (ax * cz - az * cx) * inv_norm_3 * grad_normals[3 * f + 1] + + (ax - cz * (ax * cz - az * cx) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 2]; + atomicAdd(grad_verts + 3 * i2 + 1, grad_v2_y); + + const float grad_v2_z = + (ay * cx - ax * cy) / 2.0 * inv_norm * grad_areas[f] + + (ay - cx * (ay * cx - ax * cy) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 0] + + (-ax - cy * (ay * cx - ax * cy) * inv_norm_2) * inv_norm * + grad_normals[3 * f + 1] + + -cz * (ay * cx - ax * cy) * inv_norm_3 * grad_normals[3 * f + 2]; + atomicAdd(grad_verts + 3 * i2 + 2, grad_v2_z); + } +} + +std::tuple FaceAreasNormalsForwardCuda( + const at::Tensor verts, + const at::Tensor faces) { + const auto V = verts.size(0); + const auto F = faces.size(0); + + // Check inputs are on the same device + at::TensorArg verts_t{verts, "verts", 1}, faces_t{faces, "faces", 2}; + at::CheckedFrom c = "FaceAreasNormalsForwardCuda"; + at::checkAllSameGPU(c, {verts_t, faces_t}); + + // Set the device for the kernel launch based on the device of verts + at::cuda::CUDAGuard device_guard(verts.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + at::Tensor areas = at::empty({F}, verts.options()); + at::Tensor normals = at::empty({F, 3}, verts.options()); + + if (areas.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(areas, normals); + } + + const int blocks = 64; + const int threads = 512; + + AT_DISPATCH_FLOATING_TYPES( + verts.scalar_type(), "face_areas_normals_forward_cuda", ([&] { + FaceAreasNormalsForwardKernel<<>>( + verts.contiguous().data_ptr(), + faces.contiguous().data_ptr(), + areas.data_ptr(), + normals.data_ptr(), + V, + F); + })); + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(areas, normals); +} + +at::Tensor FaceAreasNormalsBackwardCuda( + const at::Tensor grad_areas, + const at::Tensor grad_normals, + const at::Tensor verts, + const at::Tensor faces) { + // Check inputs are on the same device + at::TensorArg verts_t{verts, "verts", 1}, faces_t{faces, "faces", 2}, + grad_areas_t{grad_areas, "grad_areas", 3}, + grad_normals_t{grad_normals, "grad_normals", 4}; + at::CheckedFrom c = "FaceAreasNormalsBackwardCuda"; + at::checkAllSameGPU(c, {verts_t, faces_t, grad_areas_t, grad_normals_t}); + // This is nondeterministic because atomicAdd + at::globalContext().alertNotDeterministic("FaceAreasNormalsBackwardCuda"); + + // Set the device for the kernel launch based on the device of verts + at::cuda::CUDAGuard device_guard(verts.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const auto V = verts.size(0); + const auto F = faces.size(0); + + at::Tensor grad_verts = at::zeros({V, 3}, grad_areas.options()); + + if (grad_verts.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_verts; + } + + const int blocks = 64; + const int threads = 512; + // TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports + // doubles. Currently, support is for floats only. + FaceAreasNormalsBackwardKernel<<>>( + grad_areas.contiguous().data_ptr(), + grad_normals.contiguous().data_ptr(), + verts.contiguous().data_ptr(), + faces.contiguous().data_ptr(), + grad_verts.data_ptr(), + V, + F); + + AT_CUDA_CHECK(cudaGetLastError()); + return grad_verts; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals.h new file mode 100644 index 0000000000000000000000000000000000000000..6df37c12e4c81cc9c03375bad3751baafeb473aa --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals.h @@ -0,0 +1,84 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include "utils/pytorch3d_cutils.h" + +// Compute areas of mesh faces using packed representation. +// +// Inputs: +// verts: FloatTensor of shape (V, 3) giving vertex positions. +// faces: LongTensor of shape (F, 3) giving faces. +// +// Returns: +// areas: FloatTensor of shape (F,) where areas[f] is the area of faces[f]. +// normals: FloatTensor of shape (F, 3) where normals[f] is the normal of +// faces[f] +// + +// Cpu implementation. +std::tuple FaceAreasNormalsForwardCpu( + const at::Tensor verts, + const at::Tensor faces); +// Cpu implementation +at::Tensor FaceAreasNormalsBackwardCpu( + const at::Tensor grad_areas, + const at::Tensor grad_normals, + const at::Tensor verts, + const at::Tensor faces); + +#ifdef WITH_CUDA +// Cuda implementation. +std::tuple FaceAreasNormalsForwardCuda( + const at::Tensor verts, + const at::Tensor faces); +// Cuda implementation. +at::Tensor FaceAreasNormalsBackwardCuda( + const at::Tensor grad_areas, + const at::Tensor grad_normals, + const at::Tensor verts, + const at::Tensor faces); +#endif + +// Implementation which is exposed. +std::tuple FaceAreasNormalsForward( + const at::Tensor verts, + const at::Tensor faces) { + if (verts.is_cuda() && faces.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(verts); + CHECK_CUDA(faces); + return FaceAreasNormalsForwardCuda(verts, faces); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + return FaceAreasNormalsForwardCpu(verts, faces); +} + +// Implementation which is exposed. +at::Tensor FaceAreasNormalsBackward( + const at::Tensor grad_areas, + const at::Tensor grad_normals, + const at::Tensor verts, + const at::Tensor faces) { + if (verts.is_cuda() && faces.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(verts); + CHECK_CUDA(faces); + CHECK_CUDA(grad_areas); + CHECK_CUDA(grad_normals); + return FaceAreasNormalsBackwardCuda(grad_areas, grad_normals, verts, faces); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + return FaceAreasNormalsBackwardCpu(grad_areas, grad_normals, verts, faces); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1871ac7d4044467d1322ba32e300d513c1d5118e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/face_areas_normals/face_areas_normals_cpu.cpp @@ -0,0 +1,215 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include + +std::tuple FaceAreasNormalsForwardCpu( + const at::Tensor verts, + const at::Tensor faces) { + const int F = faces.size(0); + + at::Tensor areas = at::empty({F}, verts.options()); + at::Tensor normals = at::empty({F, 3}, verts.options()); + + auto verts_a = verts.accessor(); + auto faces_a = faces.accessor(); + auto areas_a = areas.accessor(); + auto normals_a = normals.accessor(); + + for (int f = 0; f < F; ++f) { + const int64_t i0 = faces_a[f][0]; + const int64_t i1 = faces_a[f][1]; + const int64_t i2 = faces_a[f][2]; + + const float v0_x = verts_a[i0][0]; + const float v0_y = verts_a[i0][1]; + const float v0_z = verts_a[i0][2]; + + const float v1_x = verts_a[i1][0]; + const float v1_y = verts_a[i1][1]; + const float v1_z = verts_a[i1][2]; + + const float v2_x = verts_a[i2][0]; + const float v2_y = verts_a[i2][1]; + const float v2_z = verts_a[i2][2]; + + const float ax = v1_x - v0_x; + const float ay = v1_y - v0_y; + const float az = v1_z - v0_z; + + const float bx = v2_x - v0_x; + const float by = v2_y - v0_y; + const float bz = v2_z - v0_z; + + const float cx = ay * bz - az * by; + const float cy = az * bx - ax * bz; + const float cz = ax * by - ay * bx; + + float norm = sqrt(cx * cx + cy * cy + cz * cz); + areas_a[f] = norm / 2.0; + norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6) + normals_a[f][0] = cx / norm; + normals_a[f][1] = cy / norm; + normals_a[f][2] = cz / norm; + } + return std::make_tuple(areas, normals); +} + +at::Tensor FaceAreasNormalsBackwardCpu( + const at::Tensor grad_areas, + const at::Tensor grad_normals, + const at::Tensor verts, + const at::Tensor faces) { + const int V = verts.size(0); + const int F = faces.size(0); + + at::Tensor grad_verts = at::zeros({V, 3}, grad_areas.options()); + + auto grad_areas_a = grad_areas.accessor(); + auto grad_normals_a = grad_normals.accessor(); + auto verts_a = verts.accessor(); + auto faces_a = faces.accessor(); + auto grad_verts_a = grad_verts.accessor(); + + for (int f = 0; f < F; ++f) { + const int64_t i0 = faces_a[f][0]; + const int64_t i1 = faces_a[f][1]; + const int64_t i2 = faces_a[f][2]; + + const float v0_x = verts_a[i0][0]; + const float v0_y = verts_a[i0][1]; + const float v0_z = verts_a[i0][2]; + + const float v1_x = verts_a[i1][0]; + const float v1_y = verts_a[i1][1]; + const float v1_z = verts_a[i1][2]; + + const float v2_x = verts_a[i2][0]; + const float v2_y = verts_a[i2][1]; + const float v2_z = verts_a[i2][2]; + + const float ax = v1_x - v0_x; + const float ay = v1_y - v0_y; + const float az = v1_z - v0_z; + + const float bx = v2_x - v0_x; + const float by = v2_y - v0_y; + const float bz = v2_z - v0_z; + + const float cx = ay * bz - az * by; + const float cy = az * bx - ax * bz; + const float cz = ax * by - ay * bx; + + float norm = sqrt(cx * cx + cy * cy + cz * cz); + norm = (norm < 1e-6) ? 1e-6 : norm; // max(norm, 1e-6) + float inv_norm = 1. / norm; + float inv_norm_2 = pow(inv_norm, 2.0f); + float inv_norm_3 = pow(inv_norm, 3.0f); + + // We compute gradients with respect to the input vertices. + // For each vertex, gradients come from grad_areas and grad_normals. + // eg, grad_v0_x = (d / d v0_x) + // = \sum_f (d / d areas[f]) * (d areas[f] / d v0_x) + // + (d / d normals[f, 0]) * (d normals[f, 0] / d v0_x) + // + (d / d normals[f, 1]) * (d normals[f, 1] / d v0_x) + // + (d / d normals[f, 2]) * (d normals[f, 2] / d v0_x) + // with (d / d areas[f]) = grad_areas[f] and + // (d / d normals[f, j]) = grad_normals[f][j]. + // The equations below are derived after taking + // derivatives wrt to the vertices (fun times!). + + // grad v0 coming from grad areas and grad normals + const float grad_v0_x = + ((-az + bz) * cy + (-by + ay) * cz) / 2.0 * inv_norm * grad_areas_a[f] + + -cx * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_3 * + grad_normals_a[f][0] + + ((-az + bz) - cy * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_2) * + inv_norm * grad_normals_a[f][1] + + ((-by + ay) - cz * ((-az + bz) * cy + (-by + ay) * cz) * inv_norm_2) * + inv_norm * grad_normals_a[f][2]; + grad_verts_a[i0][0] += grad_v0_x; + + const float grad_v0_y = + ((-bz + az) * cx + (-ax + bx) * cz) / 2.0 * inv_norm * grad_areas_a[f] + + ((-bz + az) - cx * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_2) * + inv_norm * grad_normals_a[f][0] + + -cy * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_3 * + grad_normals_a[f][1] + + ((-ax + bx) - cz * ((-bz + az) * cx + (-ax + bx) * cz) * inv_norm_2) * + inv_norm * grad_normals_a[f][2]; + grad_verts[i0][1] += grad_v0_y; + + const float grad_v0_z = + ((-ay + by) * cx + (-bx + ax) * cy) / 2.0 * inv_norm * grad_areas_a[f] + + ((-ay + by) - cx * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_2) * + inv_norm * grad_normals_a[f][0] + + ((-bx + ax) - cy * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_2) * + inv_norm * grad_normals_a[f][1] + + -cz * ((-ay + by) * cx + (-bx + ax) * cy) * inv_norm_3 * + grad_normals_a[f][2]; + grad_verts[i0][2] += grad_v0_z; + + // grad v1 coming from grad areas and grad normals + const float grad_v1_x = + (by * cz - bz * cy) / 2.0 * inv_norm * grad_areas_a[f] + + -cx * (by * cz - bz * cy) * inv_norm_3 * grad_normals_a[f][0] + + (-bz - cy * (by * cz - bz * cy) * inv_norm_2) * inv_norm * + grad_normals_a[f][1] + + (by - cz * (by * cz - bz * cy) * inv_norm_2) * inv_norm * + grad_normals_a[f][2]; + grad_verts[i1][0] += grad_v1_x; + + const float grad_v1_y = + (bz * cx - bx * cz) / 2.0 * inv_norm * grad_areas_a[f] + + (bz - cx * (bz * cx - bx * cz) * inv_norm_2) * inv_norm * + grad_normals_a[f][0] + + -cy * (bz * cx - bx * cz) * inv_norm_3 * grad_normals_a[f][1] + + (-bx - cz * (bz * cx - bx * cz) * inv_norm_2) * inv_norm * + grad_normals_a[f][2]; + grad_verts[i1][1] += grad_v1_y; + + const float grad_v1_z = + (bx * cy - by * cx) / 2.0 * inv_norm * grad_areas_a[f] + + (-by - cx * (bx * cy - by * cx) * inv_norm_2) * inv_norm * + grad_normals_a[f][0] + + (bx - cx * (bx * cy - by * cx) * inv_norm_2) * inv_norm * + grad_normals_a[f][1] + + -cz * (bx * cy - by * cx) * inv_norm_3 * grad_normals_a[f][2]; + grad_verts[i1][2] += grad_v1_z; + + // grad v2 coming from grad areas + const float grad_v2_x = + (az * cy - ay * cz) / 2.0 * inv_norm * grad_areas_a[f] + + -cx * (az * cy - ay * cz) * inv_norm_3 * grad_normals_a[f][0] + + (az - cy * (az * cy - ay * cz) * inv_norm_2) * inv_norm * + grad_normals_a[f][1] + + (-ay - cz * (az * cy - ay * cz) * inv_norm_2) * inv_norm * + grad_normals_a[f][2]; + grad_verts[i2][0] += grad_v2_x; + + const float grad_v2_y = + (ax * cz - az * cx) / 2.0 * inv_norm * grad_areas_a[f] + + (-az - cx * (ax * cz - az * cx) * inv_norm_2) * inv_norm * + grad_normals_a[f][0] + + -cy * (ax * cz - az * cx) * inv_norm_3 * grad_normals_a[f][1] + + (ax - cz * (ax * cz - az * cx) * inv_norm_2) * inv_norm * + grad_normals_a[f][2]; + grad_verts[i2][1] += grad_v2_y; + + const float grad_v2_z = + (ay * cx - ax * cy) / 2.0 * inv_norm * grad_areas_a[f] + + (ay - cx * (ay * cx - ax * cy) * inv_norm_2) * inv_norm * + grad_normals_a[f][0] + + (-ax - cy * (ay * cx - ax * cy) * inv_norm_2) * inv_norm * + grad_normals_a[f][1] + + -cz * (ay * cx - ax * cy) * inv_norm_3 * grad_normals_a[f][2]; + grad_verts[i2][2] += grad_v2_z; + } + return grad_verts; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.cu new file mode 100644 index 0000000000000000000000000000000000000000..1ec1a6f27a2476375f3d140c3c8fb440fb92c04f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.cu @@ -0,0 +1,91 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +// TODO(T47953967) to make this cuda kernel support all datatypes. +__global__ void GatherScatterCudaKernel( + const float* __restrict__ input, + const int64_t* __restrict__ edges, + float* __restrict__ output, + bool directed, + bool backward, + const size_t V, + const size_t D, + const size_t E) { + const int tid = threadIdx.x; + + // Reverse the vertex order if backward. + const int v0_idx = backward ? 1 : 0; + const int v1_idx = backward ? 0 : 1; + + // Edges are split evenly across the blocks. + for (int e = blockIdx.x; e < E; e += gridDim.x) { + // Get indices of vertices which form the edge. + const int64_t v0 = edges[2 * e + v0_idx]; + const int64_t v1 = edges[2 * e + v1_idx]; + + // Split vertex features evenly across threads. + // This implementation will be quite wasteful when D<128 since there will be + // a lot of threads doing nothing. + for (int d = tid; d < D; d += blockDim.x) { + const float val = input[v1 * D + d]; + float* address = output + v0 * D + d; + atomicAdd(address, val); + if (!directed) { + const float val = input[v0 * D + d]; + float* address = output + v1 * D + d; + atomicAdd(address, val); + } + } + __syncthreads(); + } +} + +at::Tensor GatherScatterCuda( + const at::Tensor& input, + const at::Tensor& edges, + bool directed, + bool backward) { + // Check inputs are on the same device + at::TensorArg input_t{input, "input", 1}, edges_t{edges, "edges", 2}; + at::CheckedFrom c = "GatherScatterCuda"; + at::checkAllSameGPU(c, {input_t, edges_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const auto num_vertices = input.size(0); + const auto input_feature_dim = input.size(1); + const auto num_edges = edges.size(0); + + auto output = at::zeros({num_vertices, input_feature_dim}, input.options()); + const size_t threads = 128; + const size_t max_blocks = 1920; + const size_t blocks = num_edges < max_blocks ? num_edges : max_blocks; + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return output; + } + + GatherScatterCudaKernel<<>>( + input.contiguous().data_ptr(), + edges.contiguous().data_ptr(), + output.data_ptr(), + directed, + backward, + num_vertices, + input_feature_dim, + num_edges); + AT_CUDA_CHECK(cudaGetLastError()); + return output; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/knn/knn_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/knn/knn_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9e3153a6669721240c36084a3a7a563dee250a42 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/knn/knn_cpu.cpp @@ -0,0 +1,128 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +std::tuple KNearestNeighborIdxCpu( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const int norm, + const int K) { + const int N = p1.size(0); + const int P1 = p1.size(1); + const int D = p1.size(2); + + auto long_opts = lengths1.options().dtype(torch::kInt64); + torch::Tensor idxs = torch::full({N, P1, K}, 0, long_opts); + torch::Tensor dists = torch::full({N, P1, K}, 0, p1.options()); + + auto p1_a = p1.accessor(); + auto p2_a = p2.accessor(); + auto lengths1_a = lengths1.accessor(); + auto lengths2_a = lengths2.accessor(); + auto idxs_a = idxs.accessor(); + auto dists_a = dists.accessor(); + + for (int n = 0; n < N; ++n) { + const int64_t length1 = lengths1_a[n]; + const int64_t length2 = lengths2_a[n]; + for (int64_t i1 = 0; i1 < length1; ++i1) { + // Use a priority queue to store (distance, index) tuples. + std::priority_queue> q; + for (int64_t i2 = 0; i2 < length2; ++i2) { + float dist = 0; + for (int d = 0; d < D; ++d) { + float diff = p1_a[n][i1][d] - p2_a[n][i2][d]; + if (norm == 1) { + dist += abs(diff); + } else { // norm is 2 (default) + dist += diff * diff; + } + } + int size = static_cast(q.size()); + if (size < K || dist < std::get<0>(q.top())) { + q.emplace(dist, i2); + if (size >= K) { + q.pop(); + } + } + } + while (!q.empty()) { + auto t = q.top(); + q.pop(); + const int k = q.size(); + dists_a[n][i1][k] = std::get<0>(t); + idxs_a[n][i1][k] = std::get<1>(t); + } + } + } + return std::make_tuple(idxs, dists); +} + +// ------------------------------------------------------------- // +// Backward Operators // +// ------------------------------------------------------------- // + +std::tuple KNearestNeighborBackwardCpu( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const at::Tensor& idxs, + const int norm, + const at::Tensor& grad_dists) { + const int N = p1.size(0); + const int P1 = p1.size(1); + const int D = p1.size(2); + const int P2 = p2.size(1); + const int K = idxs.size(2); + + torch::Tensor grad_p1 = torch::full({N, P1, D}, 0, p1.options()); + torch::Tensor grad_p2 = torch::full({N, P2, D}, 0, p2.options()); + + auto p1_a = p1.accessor(); + auto p2_a = p2.accessor(); + auto lengths1_a = lengths1.accessor(); + auto lengths2_a = lengths2.accessor(); + auto idxs_a = idxs.accessor(); + auto grad_dists_a = grad_dists.accessor(); + auto grad_p1_a = grad_p1.accessor(); + auto grad_p2_a = grad_p2.accessor(); + + for (int n = 0; n < N; ++n) { + const int64_t length1 = lengths1_a[n]; + int64_t length2 = lengths2_a[n]; + length2 = (length2 < K) ? length2 : K; + for (int64_t i1 = 0; i1 < length1; ++i1) { + for (int64_t k = 0; k < length2; ++k) { + const int64_t i2 = idxs_a[n][i1][k]; + // If the index is the pad value of -1 then ignore it + if (i2 == -1) { + continue; + } + for (int64_t d = 0; d < D; ++d) { + float diff = 0.0; + if (norm == 1) { + float sign = (p1_a[n][i1][d] > p2_a[n][i2][d]) ? 1.0 : -1.0; + diff = grad_dists_a[n][i1][k] * sign; + } else { // norm is 2 (default) + diff = 2.0f * grad_dists_a[n][i1][k] * + (p1_a[n][i1][d] - p2_a[n][i2][d]); + } + grad_p1_a[n][i1][d] += diff; + grad_p2_a[n][i2][d] += -1.0f * diff; + } + } + } + } + return std::make_tuple(grad_p1, grad_p2); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.cu new file mode 100644 index 0000000000000000000000000000000000000000..8d05ec80b27fd74122b94660bb4dedff585562cb --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.cu @@ -0,0 +1,565 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include "marching_cubes/tables.h" + +/* +Parallelized marching cubes for pytorch extension +referenced and adapted from CUDA-Samples: +(https://github.com/NVIDIA/cuda-samples/tree/master/Samples/5_Domain_Specific/marchingCubes) +We divide the algorithm into two forward-passes: +(1) The first forward-pass executes "ClassifyVoxelKernel" to +evaluate volume scalar field for each cube and pre-compute +two arrays -- number of vertices per cube (d_voxelVerts) and +occupied or not per cube (d_voxelOccupied). + +Some prepration steps: +With d_voxelOccupied, an exclusive scan is performed to compute +the number of activeVoxels, which can be used to accelerate +computation. With d_voxelVerts, another exclusive scan +is performed to compute the accumulated sum of vertices in the 3d +grid and totalVerts. + +(2) The second forward-pass calls "GenerateFacesKernel" to +generate interpolated vertex positions and face indices by "marching +through" each cube in the grid. + +*/ + +// EPS: Used to indicate if two float values are close +__constant__ const float EPSILON = 1e-5; + +// Linearly interpolate the position where an isosurface cuts an edge +// between two vertices, based on their scalar values +// +// Args: +// isolevel: float value used as threshold +// p1: position of point1 +// p2: position of point2 +// valp1: field value for p1 +// valp2: field value for p2 +// +// Returns: +// point: interpolated verte +// +__device__ float3 +vertexInterp(float isolevel, float3 p1, float3 p2, float valp1, float valp2) { + float ratio; + float3 p; + + if (abs(isolevel - valp1) < EPSILON) { + return p1; + } else if (abs(isolevel - valp2) < EPSILON) { + return p2; + } else if (abs(valp1 - valp2) < EPSILON) { + return p1; + } + + ratio = (isolevel - valp1) / (valp2 - valp1); + + p.x = p1.x * (1 - ratio) + p2.x * ratio; + p.y = p1.y * (1 - ratio) + p2.y * ratio; + p.z = p1.z * (1 - ratio) + p2.z * ratio; + + return p; +} + +// Determine if the triangle is degenerate +// A triangle is degenerate when at least two of the vertices +// share the same position. +// +// Args: +// p1: position of vertex p1 +// p2: position of vertex p2 +// p3: position of vertex p3 +// +// Returns: +// boolean indicator if the triangle is degenerate +__device__ bool isDegenerate(float3 p1, float3 p2, float3 p3) { + if ((abs(p1.x - p2.x) < EPSILON && abs(p1.y - p2.y) < EPSILON && + abs(p1.z - p2.z) < EPSILON) || + (abs(p2.x - p3.x) < EPSILON && abs(p2.y - p3.y) < EPSILON && + abs(p2.z - p3.z) < EPSILON) || + (abs(p3.x - p1.x) < EPSILON && abs(p3.y - p1.y) < EPSILON && + abs(p3.z - p1.z) < EPSILON)) { + return true; + } else { + return false; + } +} + +// Convert from local vertex id to global vertex id, given position +// of the cube where the vertex resides. The function ensures vertices +// shared from adjacent cubes are mapped to the same global id. + +// Args: +// v: local vertex id +// x: x position of the cube where the vertex belongs +// y: y position of the cube where the vertex belongs +// z: z position of the cube where the vertex belongs +// W: width of x dimension +// H: height of y dimension + +// Returns: +// global vertex id represented by its x/y/z offsets +__device__ uint localToGlobal(int v, int x, int y, int z, int W, int H) { + const int dx = v & 1; + const int dy = v >> 1 & 1; + const int dz = v >> 2 & 1; + return (x + dx) + (y + dy) * W + (z + dz) * W * H; +} + +// Hash_combine a pair of global vertex id to a single integer. +// +// Args: +// v1_id: global id of vertex 1 +// v2_id: global id of vertex 2 +// W: width of the 3d grid +// H: height of the 3d grid +// Z: depth of the 3d grid +// +// Returns: +// hashing for a pair of vertex ids +// +__device__ int64_t hashVpair(uint v1_id, uint v2_id, int W, int H, int D) { + return (int64_t)v1_id * (W + W * H + W * H * D) + (int64_t)v2_id; +} + +// precompute number of vertices and occupancy +// for each voxel in the grid. +// +// Args: +// voxelVerts: pointer to device array to store number +// of verts per voxel +// voxelOccupied: pointer to device array to store +// occupancy state per voxel +// vol: torch tensor stored with 3D scalar field +// isolevel: threshold to determine isosurface intersection +// +__global__ void ClassifyVoxelKernel( + at::PackedTensorAccessor32 voxelVerts, + at::PackedTensorAccessor32 voxelOccupied, + const at::PackedTensorAccessor32 vol, + // const at::PackedTensorAccessor + // numVertsTable, + float isolevel) { + const int indexTable[8]{0, 1, 4, 5, 3, 2, 7, 6}; + const uint D = vol.size(0) - 1; + const uint H = vol.size(1) - 1; + const uint W = vol.size(2) - 1; + + // 1-d grid + uint id = blockIdx.x * blockDim.x + threadIdx.x; + uint num_threads = gridDim.x * blockDim.x; + + // Table mapping from cubeindex to number of vertices in the configuration + const unsigned char numVertsTable[256] = { + 0, 3, 3, 6, 3, 6, 6, 9, 3, 6, 6, 9, 6, 9, 9, 6, 3, 6, + 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 3, 6, 6, 9, + 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 6, 9, 12, + 12, 9, 9, 12, 12, 9, 12, 15, 15, 6, 3, 6, 6, 9, 6, 9, 9, 12, + 6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 12, 9, 12, 12, 15, 9, 12, + 12, 15, 12, 15, 15, 12, 6, 9, 9, 12, 9, 12, 6, 9, 9, 12, 12, 15, + 12, 15, 9, 6, 9, 12, 12, 9, 12, 15, 9, 6, 12, 15, 15, 12, 15, 6, + 12, 3, 3, 6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, + 6, 9, 9, 12, 9, 12, 12, 15, 9, 6, 12, 9, 12, 9, 15, 6, 6, 9, + 9, 12, 9, 12, 12, 15, 9, 12, 12, 15, 12, 15, 15, 12, 9, 12, 12, 9, + 12, 15, 15, 12, 12, 9, 15, 6, 15, 12, 6, 3, 6, 9, 9, 12, 9, 12, + 12, 15, 9, 12, 12, 15, 6, 9, 9, 6, 9, 12, 12, 15, 12, 15, 15, 6, + 12, 9, 15, 12, 9, 6, 12, 3, 9, 12, 12, 15, 12, 15, 9, 12, 12, 15, + 15, 6, 9, 12, 6, 3, 6, 9, 9, 6, 9, 12, 6, 3, 9, 6, 12, 3, + 6, 3, 3, 0, + }; + + for (uint tid = id; tid < D * H * W; tid += num_threads) { + // compute global location of the voxel + const int gx = tid % W; + const int gy = tid / W % H; + const int gz = tid / (W * H); + + int cubeindex = 0; + for (int i = 0; i < 8; i++) { + const int dx = i & 1; + const int dy = i >> 1 & 1; + const int dz = i >> 2 & 1; + + const int x = gx + dx; + const int y = gy + dy; + const int z = gz + dz; + + if (vol[z][y][x] < isolevel) { + cubeindex |= 1 << indexTable[i]; + } + } + // collect number of vertices for each voxel + unsigned char numVerts = numVertsTable[cubeindex]; + voxelVerts[tid] = numVerts; + voxelOccupied[tid] = (numVerts > 0); + } +} + +// extract compact voxel array for acceleration +// +// Args: +// compactedVoxelArray: tensor of shape (activeVoxels,) which maps +// from accumulated non-empty voxel index to original 3d grid index +// voxelOccupied: tensor of shape (numVoxels,) which stores +// the occupancy state per voxel +// voxelOccupiedScan: tensor of shape (numVoxels,) which +// stores the accumulated occupied voxel counts +// numVoxels: number of total voxels in the grid +// +__global__ void CompactVoxelsKernel( + at::PackedTensorAccessor32 + compactedVoxelArray, + const at::PackedTensorAccessor32 + voxelOccupied, + const at::PackedTensorAccessor32 + voxelOccupiedScan, + uint numVoxels) { + uint id = blockIdx.x * blockDim.x + threadIdx.x; + uint num_threads = gridDim.x * blockDim.x; + for (uint tid = id; tid < numVoxels; tid += num_threads) { + if (voxelOccupied[tid]) { + compactedVoxelArray[voxelOccupiedScan[tid]] = tid; + } + } +} + +// generate triangles for each voxel using marching cubes +// +// Args: +// verts: torch tensor of shape (V, 3) to store interpolated mesh vertices +// faces: torch tensor of shape (F, 3) to store indices for mesh faces +// ids: torch tensor of shape (V) to store id of each vertex +// compactedVoxelArray: tensor of shape (activeVoxels,) which stores +// non-empty voxel index. +// numVertsScanned: tensor of shape (numVoxels,) which stores accumulated +// vertices count in the voxel +// activeVoxels: number of active voxels used for acceleration +// vol: torch tensor stored with 3D scalar field +// isolevel: threshold to determine isosurface intersection +// +__global__ void GenerateFacesKernel( + at::PackedTensorAccessor32 verts, + at::PackedTensorAccessor faces, + at::PackedTensorAccessor ids, + at::PackedTensorAccessor32 + compactedVoxelArray, + at::PackedTensorAccessor32 + numVertsScanned, + const uint activeVoxels, + const at::PackedTensorAccessor32 vol, + const at::PackedTensorAccessor32 faceTable, + // const at::PackedTensorAccessor32 + // numVertsTable, + const float isolevel) { + uint id = blockIdx.x * blockDim.x + threadIdx.x; + uint num_threads = gridDim.x * blockDim.x; + const int faces_size = faces.size(0); + // Table mapping each edge to the corresponding cube vertices offsets + const int edgeToVertsTable[12][2] = { + {0, 1}, + {1, 5}, + {4, 5}, + {0, 4}, + {2, 3}, + {3, 7}, + {6, 7}, + {2, 6}, + {0, 2}, + {1, 3}, + {5, 7}, + {4, 6}, + }; + + // Table mapping from cubeindex to number of vertices in the configuration + const unsigned char numVertsTable[256] = { + 0, 3, 3, 6, 3, 6, 6, 9, 3, 6, 6, 9, 6, 9, 9, 6, 3, 6, + 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 3, 6, 6, 9, + 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 6, 9, 12, + 12, 9, 9, 12, 12, 9, 12, 15, 15, 6, 3, 6, 6, 9, 6, 9, 9, 12, + 6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 12, 9, 12, 12, 15, 9, 12, + 12, 15, 12, 15, 15, 12, 6, 9, 9, 12, 9, 12, 6, 9, 9, 12, 12, 15, + 12, 15, 9, 6, 9, 12, 12, 9, 12, 15, 9, 6, 12, 15, 15, 12, 15, 6, + 12, 3, 3, 6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, + 6, 9, 9, 12, 9, 12, 12, 15, 9, 6, 12, 9, 12, 9, 15, 6, 6, 9, + 9, 12, 9, 12, 12, 15, 9, 12, 12, 15, 12, 15, 15, 12, 9, 12, 12, 9, + 12, 15, 15, 12, 12, 9, 15, 6, 15, 12, 6, 3, 6, 9, 9, 12, 9, 12, + 12, 15, 9, 12, 12, 15, 6, 9, 9, 6, 9, 12, 12, 15, 12, 15, 15, 6, + 12, 9, 15, 12, 9, 6, 12, 3, 9, 12, 12, 15, 12, 15, 9, 12, 12, 15, + 15, 6, 9, 12, 6, 3, 6, 9, 9, 6, 9, 12, 6, 3, 9, 6, 12, 3, + 6, 3, 3, 0, + }; + + for (uint tid = id; tid < activeVoxels; tid += num_threads) { + uint voxel = compactedVoxelArray[tid]; // maps from accumulated id to + // original 3d voxel id + // mapping from offsets to vi index + int indexTable[8]{0, 1, 4, 5, 3, 2, 7, 6}; + // field value for each vertex + float val[8]; + // position for each vertex + float3 p[8]; + // 3d address + const uint D = vol.size(0) - 1; + const uint H = vol.size(1) - 1; + const uint W = vol.size(2) - 1; + + const int gx = voxel % W; + const int gy = voxel / W % H; + const int gz = voxel / (W * H); + + // recalculate cubeindex; + uint cubeindex = 0; + for (int i = 0; i < 8; i++) { + const int dx = i & 1; + const int dy = i >> 1 & 1; + const int dz = i >> 2 & 1; + + const int x = gx + dx; + const int y = gy + dy; + const int z = gz + dz; + + if (vol[z][y][x] < isolevel) { + cubeindex |= 1 << indexTable[i]; + } + val[indexTable[i]] = vol[z][y][x]; // maps from vi to volume + p[indexTable[i]] = make_float3(x, y, z); // maps from vi to position + } + + // Interpolate vertices where the surface intersects the cube + float3 vertlist[12]; + vertlist[0] = vertexInterp(isolevel, p[0], p[1], val[0], val[1]); + vertlist[1] = vertexInterp(isolevel, p[1], p[2], val[1], val[2]); + vertlist[2] = vertexInterp(isolevel, p[3], p[2], val[3], val[2]); + vertlist[3] = vertexInterp(isolevel, p[0], p[3], val[0], val[3]); + + vertlist[4] = vertexInterp(isolevel, p[4], p[5], val[4], val[5]); + vertlist[5] = vertexInterp(isolevel, p[5], p[6], val[5], val[6]); + vertlist[6] = vertexInterp(isolevel, p[7], p[6], val[7], val[6]); + vertlist[7] = vertexInterp(isolevel, p[4], p[7], val[4], val[7]); + + vertlist[8] = vertexInterp(isolevel, p[0], p[4], val[0], val[4]); + vertlist[9] = vertexInterp(isolevel, p[1], p[5], val[1], val[5]); + vertlist[10] = vertexInterp(isolevel, p[2], p[6], val[2], val[6]); + vertlist[11] = vertexInterp(isolevel, p[3], p[7], val[3], val[7]); + + // output triangle faces + uint numVerts = numVertsTable[cubeindex]; + + for (int i = 0; i < numVerts; i++) { + int index = numVertsScanned[voxel] + i; + unsigned char edge = faceTable[cubeindex][i]; + + uint v1 = edgeToVertsTable[edge][0]; + uint v2 = edgeToVertsTable[edge][1]; + uint v1_id = localToGlobal(v1, gx, gy, gz, W + 1, H + 1); + uint v2_id = localToGlobal(v2, gx, gy, gz, W + 1, H + 1); + int64_t edge_id = hashVpair(v1_id, v2_id, W + 1, H + 1, D + 1); + + verts[index][0] = vertlist[edge].x; + verts[index][1] = vertlist[edge].y; + verts[index][2] = vertlist[edge].z; + + if (index < faces_size) { + faces[index][0] = index * 3 + 0; + faces[index][1] = index * 3 + 1; + faces[index][2] = index * 3 + 2; + } + + ids[index] = edge_id; + } + } // end for grid-strided kernel +} + +// ATen/Torch does not have an exclusive-scan operator. Additionally, in the +// code below we need to get the "total number of items to work on" after +// a scan, which with an inclusive-scan would simply be the value of the last +// element in the tensor. +// +// This utility function hits two birds with one stone, by running +// an inclusive-scan into a right-shifted view of a tensor that's +// allocated to be one element bigger than the input tensor. +// +// Note; return tensor is `int64_t` per element, even if the input +// tensor is only 32-bit. Also, the return tensor is one element bigger +// than the input one. +// +// Secondary optional argument is an output argument that gets the +// value of the last element of the return tensor (because you almost +// always need this CPU-side right after this function anyway). +static at::Tensor ExclusiveScanAndTotal( + const at::Tensor& inTensor, + int64_t* optTotal = nullptr) { + const auto inSize = inTensor.sizes()[0]; + auto retTensor = at::zeros({inSize + 1}, at::kLong).to(inTensor.device()); + + using at::indexing::None; + using at::indexing::Slice; + auto rightShiftedView = retTensor.index({Slice(1, None)}); + + // Do an (inclusive-scan) cumulative sum in to the view that's + // shifted one element to the right... + at::cumsum_out(rightShiftedView, inTensor, 0, at::kLong); + + if (optTotal) { + *optTotal = retTensor[inSize].cpu().item(); + } + + // ...so that the not-shifted tensor holds the exclusive-scan + return retTensor; +} + +// Entrance for marching cubes cuda extension. Marching Cubes is an algorithm to +// create triangle meshes from an implicit function (one of the form f(x, y, z) +// = 0). It works by iteratively checking a grid of cubes superimposed over a +// region of the function. The number of faces and positions of the vertices in +// each cube are determined by the the isolevel as well as the volume values +// from the eight vertices of the cube. +// +// We implement this algorithm with two forward passes where the first pass +// checks the occupancy and collects number of vertices for each cube. The +// second pass will skip empty voxels and generate vertices as well as faces for +// each cube through table lookup. The vertex positions, faces and identifiers +// for each vertex will be returned. +// +// +// Args: +// vol: torch tensor of shape (D, H, W) for volume scalar field +// isolevel: threshold to determine isosurface intesection +// +// Returns: +// tuple of : which stores vertex positions, face +// indices and integer identifiers for each vertex. +// verts: (N_verts, 3) FloatTensor for vertex positions +// faces: (N_faces, 3) LongTensor of face indices +// ids: (N_verts,) LongTensor used to identify each vertex. Vertices from +// adjacent edges can share the same 3d position. To reduce memory +// redudancy, we tag each vertex with a unique id for deduplication. In +// contrast to deduping on vertices, this has the benefit to avoid +// floating point precision issues. +// +std::tuple MarchingCubesCuda( + const at::Tensor& vol, + const float isolevel) { + // Set the device for the kernel launch based on the device of vol + at::cuda::CUDAGuard device_guard(vol.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // transfer _FACE_TABLE data to device + at::Tensor face_table_tensor = at::zeros( + {256, 16}, at::TensorOptions().dtype(at::kInt).device(at::kCPU)); + auto face_table_a = face_table_tensor.accessor(); + for (int i = 0; i < 256; i++) { + for (int j = 0; j < 16; j++) { + face_table_a[i][j] = _FACE_TABLE[i][j]; + } + } + at::Tensor faceTable = face_table_tensor.to(vol.device()); + + // get numVoxels + int threads = 128; + const uint D = vol.size(0); + const uint H = vol.size(1); + const uint W = vol.size(2); + const int numVoxels = (D - 1) * (H - 1) * (W - 1); + dim3 grid((numVoxels + threads - 1) / threads, 1, 1); + if (grid.x > 65535) { + grid.x = 65535; + } + + using at::indexing::None; + using at::indexing::Slice; + + auto d_voxelVerts = + at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt)) + .to(vol.device()); + auto d_voxelOccupied = + at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt)) + .to(vol.device()); + + // Execute "ClassifyVoxelKernel" kernel to precompute + // two arrays - d_voxelOccupied and d_voxelVertices to global memory, + // which stores the occupancy state and number of voxel vertices per voxel. + ClassifyVoxelKernel<<>>( + d_voxelVerts.packed_accessor32(), + d_voxelOccupied.packed_accessor32(), + vol.packed_accessor32(), + isolevel); + AT_CUDA_CHECK(cudaGetLastError()); + cudaDeviceSynchronize(); + + // Scan "d_voxelOccupied" array to generate accumulated voxel occupancy + // count for voxels in the grid and compute the number of active voxels. + // If the number of active voxels is 0, return zero tensor for verts and + // faces. + int64_t activeVoxels = 0; + auto d_voxelOccupiedScan = + ExclusiveScanAndTotal(d_voxelOccupied, &activeVoxels); + + const int device_id = vol.device().index(); + auto opt = at::TensorOptions().dtype(at::kInt).device(at::kCUDA, device_id); + auto opt_long = + at::TensorOptions().dtype(at::kLong).device(at::kCUDA, device_id); + + if (activeVoxels == 0) { + int ntris = 0; + at::Tensor verts = at::zeros({ntris * 3, 3}, vol.options()); + at::Tensor faces = at::zeros({ntris, 3}, opt_long); + at::Tensor ids = at::zeros({ntris}, opt_long); + return std::make_tuple(verts, faces, ids); + } + + // Execute "CompactVoxelsKernel" kernel to compress voxels for acceleration. + // This allows us to run triangle generation on only the occupied voxels. + auto d_compVoxelArray = at::zeros({activeVoxels}, opt); + CompactVoxelsKernel<<>>( + d_compVoxelArray.packed_accessor32(), + d_voxelOccupied.packed_accessor32(), + d_voxelOccupiedScan + .packed_accessor32(), + numVoxels); + AT_CUDA_CHECK(cudaGetLastError()); + cudaDeviceSynchronize(); + + // Scan d_voxelVerts array to generate offsets of vertices for each voxel + int64_t totalVerts = 0; + auto d_voxelVertsScan = ExclusiveScanAndTotal(d_voxelVerts, &totalVerts); + + // Execute "GenerateFacesKernel" kernel + // This runs only on the occupied voxels. + // It looks up the field values and generates the triangle data. + at::Tensor verts = at::zeros({totalVerts, 3}, vol.options()); + at::Tensor faces = at::zeros({totalVerts / 3, 3}, opt_long); + + at::Tensor ids = at::zeros({totalVerts}, opt_long); + + dim3 grid2((activeVoxels + threads - 1) / threads, 1, 1); + if (grid2.x > 65535) { + grid2.x = 65535; + } + + GenerateFacesKernel<<>>( + verts.packed_accessor32(), + faces.packed_accessor(), + ids.packed_accessor(), + d_compVoxelArray.packed_accessor32(), + d_voxelVertsScan.packed_accessor32(), + activeVoxels, + vol.packed_accessor32(), + faceTable.packed_accessor32(), + isolevel); + AT_CUDA_CHECK(cudaGetLastError()); + + return std::make_tuple(verts, faces, ids); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.h new file mode 100644 index 0000000000000000000000000000000000000000..51c660b18076014fd8717ef57eb93af328785f56 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include +#include "utils/pytorch3d_cutils.h" + +// Run Marching Cubes algorithm over a batch of volume scalar fields +// with a pre-defined threshold and return a mesh composed of vertices +// and faces for the mesh. +// +// Args: +// vol: FloatTensor of shape (D, H, W) giving a volume +// scalar grids. +// isolevel: isosurface value to use as the threshoold to determine whether +// the points are within a volume. +// +// Returns: +// vertices: (N_verts, 3) FloatTensor of vertices +// faces: (N_faces, 3) LongTensor of faces +// ids: (N_verts,) LongTensor used to identify each vertex and deduplication +// to avoid floating point precision issues. +// For Cuda, will be used to dedupe redundant vertices. +// For cpp implementation, this tensor is just a placeholder. + +// CPU implementation +std::tuple MarchingCubesCpu( + const at::Tensor& vol, + const float isolevel); + +// CUDA implementation +std::tuple MarchingCubesCuda( + const at::Tensor& vol, + const float isolevel); + +// Implementation which is exposed +inline std::tuple MarchingCubes( + const at::Tensor& vol, + const float isolevel) { + if (vol.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(vol); + const int D = vol.size(0); + const int H = vol.size(1); + const int W = vol.size(2); + if (D > 1024 || H > 1024 || W > 1024) { + AT_ERROR("Maximum volume size allowed 1K x 1K x 1K"); + } + return MarchingCubesCuda(vol.contiguous(), isolevel); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + return MarchingCubesCpu(vol.contiguous(), isolevel); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fa128e714228fd4c4b699ee13071b752dd8fdf4c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_cpu.cpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include "marching_cubes/marching_cubes_utils.h" +#include "marching_cubes/tables.h" + +// Cpu implementation for Marching Cubes +// Args: +// vol: a Tensor of size (D, H, W) corresponding to a 3D scalar field +// isolevel: the isosurface value to use as the threshold to determine +// whether points are within a volume. +// +// Returns: +// vertices: a float tensor of shape (N_verts, 3) for positions of the mesh +// faces: a long tensor of shape (N_faces, 3) for indices of the face +// ids: a long tensor of shape (N_verts) as placeholder +// +std::tuple MarchingCubesCpu( + const at::Tensor& vol, + const float isolevel) { + // volume shapes + const int D = vol.size(0); + const int H = vol.size(1); + const int W = vol.size(2); + + // Create tensor accessors + auto vol_a = vol.accessor(); + // edge_id_to_v maps from an edge id to a vertex position + std::unordered_map edge_id_to_v; + // uniq_edge_id: used to remove redundant edge ids + std::unordered_map uniq_edge_id; + std::vector faces; // store face indices + std::vector verts; // store vertex positions + // enumerate each cell in the 3d grid + for (int z = 0; z < D - 1; z++) { + for (int y = 0; y < H - 1; y++) { + for (int x = 0; x < W - 1; x++) { + Cube cube(x, y, z, vol_a, isolevel); + // Cube is entirely in/out of the surface + if (_FACE_TABLE[cube.cubeindex][0] == 255) { + continue; + } + // store all boundary vertices that intersect with the edges + std::array interp_points; + // triangle vertex IDs and positions + std::vector tri; + std::vector ps; + + // Interpolate the vertices where the surface intersects with the cube + for (int j = 0; _FACE_TABLE[cube.cubeindex][j] != 255; j++) { + const int e = _FACE_TABLE[cube.cubeindex][j]; + interp_points[e] = cube.VertexInterp(isolevel, e, vol_a); + + int64_t edge = cube.HashVpair(e, W, H, D); + tri.push_back(edge); + ps.push_back(interp_points[e]); + + // Check if the triangle face is degenerate. A triangle face + // is degenerate if any of the two verices share the same 3D position + if ((j + 1) % 3 == 0 && ps[0] != ps[1] && ps[1] != ps[2] && + ps[2] != ps[0]) { + for (int k = 0; k < 3; k++) { + int64_t v = tri.at(k); + edge_id_to_v[v] = ps.at(k); + if (!uniq_edge_id.count(v)) { + uniq_edge_id[v] = verts.size(); + verts.push_back(edge_id_to_v[v]); + } + faces.push_back(uniq_edge_id[v]); + } + tri.clear(); + ps.clear(); + } // endif + } // endfor edge enumeration + } // endfor x + } // endfor y + } // endfor z + // Collect returning tensor + const int n_vertices = verts.size(); + const int64_t n_faces = (int64_t)faces.size() / 3; + auto vert_tensor = torch::zeros({n_vertices, 3}, torch::kFloat); + auto id_tensor = torch::zeros({n_vertices}, torch::kInt64); // placeholder + auto face_tensor = torch::zeros({n_faces, 3}, torch::kInt64); + + auto vert_a = vert_tensor.accessor(); + for (int i = 0; i < n_vertices; i++) { + vert_a[i][0] = verts.at(i).x; + vert_a[i][1] = verts.at(i).y; + vert_a[i][2] = verts.at(i).z; + } + + auto face_a = face_tensor.accessor(); + for (int64_t i = 0; i < n_faces; i++) { + face_a[i][0] = faces.at(i * 3 + 0); + face_a[i][1] = faces.at(i * 3 + 1); + face_a[i][2] = faces.at(i * 3 + 2); + } + + return std::make_tuple(vert_tensor, face_tensor, id_tensor); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_utils.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..486e0339eda613f2886bb3165a0fde1d0a5d6bf7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_utils.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include +#include +#include "ATen/core/TensorAccessor.h" +#include "marching_cubes/tables.h" + +// EPS: Used to assess whether two float values are close +const float EPS = 1e-5; + +// Data structures for the marching cubes +struct Vertex { + // Constructor used when performing marching cube in each cell + explicit Vertex(float x = 0.0f, float y = 0.0f, float z = 0.0f) + : x(x), y(y), z(z) {} + + // The */+ operator overrides are used for vertex interpolation + Vertex operator*(float s) const { + return Vertex(x * s, y * s, z * s); + } + Vertex operator+(const Vertex& xyz) const { + return Vertex(x + xyz.x, y + xyz.y, z + xyz.z); + } + // The =/!= operator overrides is used for checking degenerate triangles + bool operator==(const Vertex& xyz) const { + return ( + std::abs(x - xyz.x) < EPS && std::abs(y - xyz.y) < EPS && + std::abs(z - xyz.z) < EPS); + } + bool operator!=(const Vertex& xyz) const { + return ( + std::abs(x - xyz.x) >= EPS || std::abs(y - xyz.y) >= EPS || + std::abs(z - xyz.z) >= EPS); + } + // vertex position + float x, y, z; +}; + +struct Cube { + // Edge and vertex convention: + // v4_______e4____________v5 + // /| /| + // / | / | + // e7/ | e5/ | + // /___|______e6_________/ | + // v7| | |v6 |e9 + // | | | | + // | |e8 |e10| + // e11| | | | + // | |_________________|___| + // | / v0 e0 | /v1 + // | / | / + // | /e3 | /e1 + // |/_____________________|/ + // v3 e2 v2 + + Vertex p[8]; + int x, y, z; + int cubeindex = 0; + Cube( + int x, + int y, + int z, + const at::TensorAccessor& vol_a, + const float isolevel) + : x(x), y(y), z(z) { + // vertex position (x, y, z) for v0-v1-v4-v5-v3-v2-v7-v6 + for (int v = 0; v < 8; v++) { + p[v] = Vertex(x + (v & 1), y + (v >> 1 & 1), z + (v >> 2 & 1)); + } + // Calculates cube configuration index given values of the cube vertices + for (int i = 0; i < 8; i++) { + const int idx = _INDEX_TABLE[i]; + Vertex v = p[idx]; + if (vol_a[v.z][v.y][v.x] < isolevel) { + cubeindex |= (1 << i); + } + } + } + + // Linearly interpolate the position where an isosurface cuts an edge + // between two vertices, based on their scalar values + // + // Args: + // isolevel: float value used as threshold + // edge: edge (ID) to interpolate + // cube: current cube vertices + // vol_a: 3D scalar field + // + // Returns: + // point: interpolated vertex + Vertex VertexInterp( + float isolevel, + const int edge, + const at::TensorAccessor& vol_a) { + const int v1 = _EDGE_TO_VERTICES[edge][0]; + const int v2 = _EDGE_TO_VERTICES[edge][1]; + Vertex p1 = p[v1]; + Vertex p2 = p[v2]; + float val1 = vol_a[p1.z][p1.y][p1.x]; + float val2 = vol_a[p2.z][p2.y][p2.x]; + + float ratio = 1.0f; + if (std::abs(isolevel - val1) < EPS) { + return p1; + } else if (std::abs(isolevel - val2) < EPS) { + return p2; + } else if (std::abs(val1 - val2) < EPS) { + return p1; + } + // interpolate vertex p based on two vertices on the edge + ratio = (isolevel - val1) / (val2 - val1); + return p1 * (1 - ratio) + p2 * ratio; + } + + // Hash an edge into a global edge_id. The function binds an + // edge with an integer to address floating point precision issue. + // + // Args: + // v1_id: global id of vertex 1 + // v2_id: global id of vertex 2 + // W: width of the 3d grid + // H: height of the 3d grid + // D: depth of the 3d grid + // + // Returns: + // hashing for a pair of vertex ids + // + int64_t HashVpair(const int edge, int W, int H, int D) { + const int v1 = _EDGE_TO_VERTICES[edge][0]; + const int v2 = _EDGE_TO_VERTICES[edge][1]; + const int v1_id = p[v1].x + p[v1].y * W + p[v1].z * W * H; + const int v2_id = p[v2].x + p[v2].y * W + p[v2].z * W * H; + return (int64_t)v1_id * (W + W * H + W * H * D) + (int64_t)v2_id; + } +}; diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/tables.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/tables.h new file mode 100644 index 0000000000000000000000000000000000000000..3aff617c53e5ba963c014cabd7beb1c2cd61a053 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/marching_cubes/tables.h @@ -0,0 +1,294 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +using uint = unsigned int; + +// A table mapping from cubeindex to a list of face configurations. +// Each list contains at most 5 faces, where each face is represented with +// 3 consecutive numbers +// Table adapted from http://paulbourke.net/geometry/polygonise/ +// +#define X 255 +const unsigned char _FACE_TABLE[256][16] = { + {X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 8, 3, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 1, 9, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {1, 8, 3, 9, 8, 1, X, X, X, X, X, X, X, X, X, X}, + {1, 2, 10, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 8, 3, 1, 2, 10, X, X, X, X, X, X, X, X, X, X}, + {9, 2, 10, 0, 2, 9, X, X, X, X, X, X, X, X, X, X}, + {2, 8, 3, 2, 10, 8, 10, 9, 8, X, X, X, X, X, X, X}, + {3, 11, 2, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 11, 2, 8, 11, 0, X, X, X, X, X, X, X, X, X, X}, + {1, 9, 0, 2, 3, 11, X, X, X, X, X, X, X, X, X, X}, + {1, 11, 2, 1, 9, 11, 9, 8, 11, X, X, X, X, X, X, X}, + {3, 10, 1, 11, 10, 3, X, X, X, X, X, X, X, X, X, X}, + {0, 10, 1, 0, 8, 10, 8, 11, 10, X, X, X, X, X, X, X}, + {3, 9, 0, 3, 11, 9, 11, 10, 9, X, X, X, X, X, X, X}, + {9, 8, 10, 10, 8, 11, X, X, X, X, X, X, X, X, X, X}, + {4, 7, 8, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {4, 3, 0, 7, 3, 4, X, X, X, X, X, X, X, X, X, X}, + {0, 1, 9, 8, 4, 7, X, X, X, X, X, X, X, X, X, X}, + {4, 1, 9, 4, 7, 1, 7, 3, 1, X, X, X, X, X, X, X}, + {1, 2, 10, 8, 4, 7, X, X, X, X, X, X, X, X, X, X}, + {3, 4, 7, 3, 0, 4, 1, 2, 10, X, X, X, X, X, X, X}, + {9, 2, 10, 9, 0, 2, 8, 4, 7, X, X, X, X, X, X, X}, + {2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, X, X, X, X}, + {8, 4, 7, 3, 11, 2, X, X, X, X, X, X, X, X, X, X}, + {11, 4, 7, 11, 2, 4, 2, 0, 4, X, X, X, X, X, X, X}, + {9, 0, 1, 8, 4, 7, 2, 3, 11, X, X, X, X, X, X, X}, + {4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, X, X, X, X}, + {3, 10, 1, 3, 11, 10, 7, 8, 4, X, X, X, X, X, X, X}, + {1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, X, X, X, X}, + {4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, X, X, X, X}, + {4, 7, 11, 4, 11, 9, 9, 11, 10, X, X, X, X, X, X, X}, + {9, 5, 4, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {9, 5, 4, 0, 8, 3, X, X, X, X, X, X, X, X, X, X}, + {0, 5, 4, 1, 5, 0, X, X, X, X, X, X, X, X, X, X}, + {8, 5, 4, 8, 3, 5, 3, 1, 5, X, X, X, X, X, X, X}, + {1, 2, 10, 9, 5, 4, X, X, X, X, X, X, X, X, X, X}, + {3, 0, 8, 1, 2, 10, 4, 9, 5, X, X, X, X, X, X, X}, + {5, 2, 10, 5, 4, 2, 4, 0, 2, X, X, X, X, X, X, X}, + {2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, X, X, X, X}, + {9, 5, 4, 2, 3, 11, X, X, X, X, X, X, X, X, X, X}, + {0, 11, 2, 0, 8, 11, 4, 9, 5, X, X, X, X, X, X, X}, + {0, 5, 4, 0, 1, 5, 2, 3, 11, X, X, X, X, X, X, X}, + {2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, X, X, X, X}, + {10, 3, 11, 10, 1, 3, 9, 5, 4, X, X, X, X, X, X, X}, + {4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, X, X, X, X}, + {5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, X, X, X, X}, + {5, 4, 8, 5, 8, 10, 10, 8, 11, X, X, X, X, X, X, X}, + {9, 7, 8, 5, 7, 9, X, X, X, X, X, X, X, X, X, X}, + {9, 3, 0, 9, 5, 3, 5, 7, 3, X, X, X, X, X, X, X}, + {0, 7, 8, 0, 1, 7, 1, 5, 7, X, X, X, X, X, X, X}, + {1, 5, 3, 3, 5, 7, X, X, X, X, X, X, X, X, X, X}, + {9, 7, 8, 9, 5, 7, 10, 1, 2, X, X, X, X, X, X, X}, + {10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, X, X, X, X}, + {8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, X, X, X, X}, + {2, 10, 5, 2, 5, 3, 3, 5, 7, X, X, X, X, X, X, X}, + {7, 9, 5, 7, 8, 9, 3, 11, 2, X, X, X, X, X, X, X}, + {9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, X, X, X, X}, + {2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, X, X, X, X}, + {11, 2, 1, 11, 1, 7, 7, 1, 5, X, X, X, X, X, X, X}, + {9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, X, X, X, X}, + {5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, X}, + {11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, X}, + {11, 10, 5, 7, 11, 5, X, X, X, X, X, X, X, X, X, X}, + {10, 6, 5, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 8, 3, 5, 10, 6, X, X, X, X, X, X, X, X, X, X}, + {9, 0, 1, 5, 10, 6, X, X, X, X, X, X, X, X, X, X}, + {1, 8, 3, 1, 9, 8, 5, 10, 6, X, X, X, X, X, X, X}, + {1, 6, 5, 2, 6, 1, X, X, X, X, X, X, X, X, X, X}, + {1, 6, 5, 1, 2, 6, 3, 0, 8, X, X, X, X, X, X, X}, + {9, 6, 5, 9, 0, 6, 0, 2, 6, X, X, X, X, X, X, X}, + {5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, X, X, X, X}, + {2, 3, 11, 10, 6, 5, X, X, X, X, X, X, X, X, X, X}, + {11, 0, 8, 11, 2, 0, 10, 6, 5, X, X, X, X, X, X, X}, + {0, 1, 9, 2, 3, 11, 5, 10, 6, X, X, X, X, X, X, X}, + {5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, X, X, X, X}, + {6, 3, 11, 6, 5, 3, 5, 1, 3, X, X, X, X, X, X, X}, + {0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, X, X, X, X}, + {3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, X, X, X, X}, + {6, 5, 9, 6, 9, 11, 11, 9, 8, X, X, X, X, X, X, X}, + {5, 10, 6, 4, 7, 8, X, X, X, X, X, X, X, X, X, X}, + {4, 3, 0, 4, 7, 3, 6, 5, 10, X, X, X, X, X, X, X}, + {1, 9, 0, 5, 10, 6, 8, 4, 7, X, X, X, X, X, X, X}, + {10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, X, X, X, X}, + {6, 1, 2, 6, 5, 1, 4, 7, 8, X, X, X, X, X, X, X}, + {1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, X, X, X, X}, + {8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, X, X, X, X}, + {7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, X}, + {3, 11, 2, 7, 8, 4, 10, 6, 5, X, X, X, X, X, X, X}, + {5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, X, X, X, X}, + {0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, X, X, X, X}, + {9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, X}, + {8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, X, X, X, X}, + {5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, X}, + {0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, X}, + {6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, X, X, X, X}, + {10, 4, 9, 6, 4, 10, X, X, X, X, X, X, X, X, X, X}, + {4, 10, 6, 4, 9, 10, 0, 8, 3, X, X, X, X, X, X, X}, + {10, 0, 1, 10, 6, 0, 6, 4, 0, X, X, X, X, X, X, X}, + {8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, X, X, X, X}, + {1, 4, 9, 1, 2, 4, 2, 6, 4, X, X, X, X, X, X, X}, + {3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, X, X, X, X}, + {0, 2, 4, 4, 2, 6, X, X, X, X, X, X, X, X, X, X}, + {8, 3, 2, 8, 2, 4, 4, 2, 6, X, X, X, X, X, X, X}, + {10, 4, 9, 10, 6, 4, 11, 2, 3, X, X, X, X, X, X, X}, + {0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, X, X, X, X}, + {3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, X, X, X, X}, + {6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, X}, + {9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, X, X, X, X}, + {8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, X}, + {3, 11, 6, 3, 6, 0, 0, 6, 4, X, X, X, X, X, X, X}, + {6, 4, 8, 11, 6, 8, X, X, X, X, X, X, X, X, X, X}, + {7, 10, 6, 7, 8, 10, 8, 9, 10, X, X, X, X, X, X, X}, + {0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, X, X, X, X}, + {10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, X, X, X, X}, + {10, 6, 7, 10, 7, 1, 1, 7, 3, X, X, X, X, X, X, X}, + {1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, X, X, X, X}, + {2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, X}, + {7, 8, 0, 7, 0, 6, 6, 0, 2, X, X, X, X, X, X, X}, + {7, 3, 2, 6, 7, 2, X, X, X, X, X, X, X, X, X, X}, + {2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, X, X, X, X}, + {2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, X}, + {1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, X}, + {11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, X, X, X, X}, + {8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, X}, + {0, 9, 1, 11, 6, 7, X, X, X, X, X, X, X, X, X, X}, + {7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, X, X, X, X}, + {7, 11, 6, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {7, 6, 11, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {3, 0, 8, 11, 7, 6, X, X, X, X, X, X, X, X, X, X}, + {0, 1, 9, 11, 7, 6, X, X, X, X, X, X, X, X, X, X}, + {8, 1, 9, 8, 3, 1, 11, 7, 6, X, X, X, X, X, X, X}, + {10, 1, 2, 6, 11, 7, X, X, X, X, X, X, X, X, X, X}, + {1, 2, 10, 3, 0, 8, 6, 11, 7, X, X, X, X, X, X, X}, + {2, 9, 0, 2, 10, 9, 6, 11, 7, X, X, X, X, X, X, X}, + {6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, X, X, X, X}, + {7, 2, 3, 6, 2, 7, X, X, X, X, X, X, X, X, X, X}, + {7, 0, 8, 7, 6, 0, 6, 2, 0, X, X, X, X, X, X, X}, + {2, 7, 6, 2, 3, 7, 0, 1, 9, X, X, X, X, X, X, X}, + {1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, X, X, X, X}, + {10, 7, 6, 10, 1, 7, 1, 3, 7, X, X, X, X, X, X, X}, + {10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, X, X, X, X}, + {0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, X, X, X, X}, + {7, 6, 10, 7, 10, 8, 8, 10, 9, X, X, X, X, X, X, X}, + {6, 8, 4, 11, 8, 6, X, X, X, X, X, X, X, X, X, X}, + {3, 6, 11, 3, 0, 6, 0, 4, 6, X, X, X, X, X, X, X}, + {8, 6, 11, 8, 4, 6, 9, 0, 1, X, X, X, X, X, X, X}, + {9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, X, X, X, X}, + {6, 8, 4, 6, 11, 8, 2, 10, 1, X, X, X, X, X, X, X}, + {1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, X, X, X, X}, + {4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, X, X, X, X}, + {10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, X}, + {8, 2, 3, 8, 4, 2, 4, 6, 2, X, X, X, X, X, X, X}, + {0, 4, 2, 4, 6, 2, X, X, X, X, X, X, X, X, X, X}, + {1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, X, X, X, X}, + {1, 9, 4, 1, 4, 2, 2, 4, 6, X, X, X, X, X, X, X}, + {8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, X, X, X, X}, + {10, 1, 0, 10, 0, 6, 6, 0, 4, X, X, X, X, X, X, X}, + {4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, X}, + {10, 9, 4, 6, 10, 4, X, X, X, X, X, X, X, X, X, X}, + {4, 9, 5, 7, 6, 11, X, X, X, X, X, X, X, X, X, X}, + {0, 8, 3, 4, 9, 5, 11, 7, 6, X, X, X, X, X, X, X}, + {5, 0, 1, 5, 4, 0, 7, 6, 11, X, X, X, X, X, X, X}, + {11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, X, X, X, X}, + {9, 5, 4, 10, 1, 2, 7, 6, 11, X, X, X, X, X, X, X}, + {6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, X, X, X, X}, + {7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, X, X, X, X}, + {3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, X}, + {7, 2, 3, 7, 6, 2, 5, 4, 9, X, X, X, X, X, X, X}, + {9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, X, X, X, X}, + {3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, X, X, X, X}, + {6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, X}, + {9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, X, X, X, X}, + {1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, X}, + {4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, X}, + {7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, X, X, X, X}, + {6, 9, 5, 6, 11, 9, 11, 8, 9, X, X, X, X, X, X, X}, + {3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, X, X, X, X}, + {0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, X, X, X, X}, + {6, 11, 3, 6, 3, 5, 5, 3, 1, X, X, X, X, X, X, X}, + {1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, X, X, X, X}, + {0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, X}, + {11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, X}, + {6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, X, X, X, X}, + {5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, X, X, X, X}, + {9, 5, 6, 9, 6, 0, 0, 6, 2, X, X, X, X, X, X, X}, + {1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, X}, + {1, 5, 6, 2, 1, 6, X, X, X, X, X, X, X, X, X, X}, + {1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, X}, + {10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, X, X, X, X}, + {0, 3, 8, 5, 6, 10, X, X, X, X, X, X, X, X, X, X}, + {10, 5, 6, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {11, 5, 10, 7, 5, 11, X, X, X, X, X, X, X, X, X, X}, + {11, 5, 10, 11, 7, 5, 8, 3, 0, X, X, X, X, X, X, X}, + {5, 11, 7, 5, 10, 11, 1, 9, 0, X, X, X, X, X, X, X}, + {10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, X, X, X, X}, + {11, 1, 2, 11, 7, 1, 7, 5, 1, X, X, X, X, X, X, X}, + {0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, X, X, X, X}, + {9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, X, X, X, X}, + {7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, X}, + {2, 5, 10, 2, 3, 5, 3, 7, 5, X, X, X, X, X, X, X}, + {8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, X, X, X, X}, + {9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, X, X, X, X}, + {9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, X}, + {1, 3, 5, 3, 7, 5, X, X, X, X, X, X, X, X, X, X}, + {0, 8, 7, 0, 7, 1, 1, 7, 5, X, X, X, X, X, X, X}, + {9, 0, 3, 9, 3, 5, 5, 3, 7, X, X, X, X, X, X, X}, + {9, 8, 7, 5, 9, 7, X, X, X, X, X, X, X, X, X, X}, + {5, 8, 4, 5, 10, 8, 10, 11, 8, X, X, X, X, X, X, X}, + {5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, X, X, X, X}, + {0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, X, X, X, X}, + {10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, X}, + {2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, X, X, X, X}, + {0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, X}, + {0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, X}, + {9, 4, 5, 2, 11, 3, X, X, X, X, X, X, X, X, X, X}, + {2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, X, X, X, X}, + {5, 10, 2, 5, 2, 4, 4, 2, 0, X, X, X, X, X, X, X}, + {3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, X}, + {5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, X, X, X, X}, + {8, 4, 5, 8, 5, 3, 3, 5, 1, X, X, X, X, X, X, X}, + {0, 4, 5, 1, 0, 5, X, X, X, X, X, X, X, X, X, X}, + {8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, X, X, X, X}, + {9, 4, 5, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {4, 11, 7, 4, 9, 11, 9, 10, 11, X, X, X, X, X, X, X}, + {0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, X, X, X, X}, + {1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, X, X, X, X}, + {3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, X}, + {4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, X, X, X, X}, + {9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, X}, + {11, 7, 4, 11, 4, 2, 2, 4, 0, X, X, X, X, X, X, X}, + {11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, X, X, X, X}, + {2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, X, X, X, X}, + {9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, X}, + {3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, X}, + {1, 10, 2, 8, 7, 4, X, X, X, X, X, X, X, X, X, X}, + {4, 9, 1, 4, 1, 7, 7, 1, 3, X, X, X, X, X, X, X}, + {4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, X, X, X, X}, + {4, 0, 3, 7, 4, 3, X, X, X, X, X, X, X, X, X, X}, + {4, 8, 7, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {9, 10, 8, 10, 11, 8, X, X, X, X, X, X, X, X, X, X}, + {3, 0, 9, 3, 9, 11, 11, 9, 10, X, X, X, X, X, X, X}, + {0, 1, 10, 0, 10, 8, 8, 10, 11, X, X, X, X, X, X, X}, + {3, 1, 10, 11, 3, 10, X, X, X, X, X, X, X, X, X, X}, + {1, 2, 11, 1, 11, 9, 9, 11, 8, X, X, X, X, X, X, X}, + {3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, X, X, X, X}, + {0, 2, 11, 8, 0, 11, X, X, X, X, X, X, X, X, X, X}, + {3, 2, 11, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {2, 3, 8, 2, 8, 10, 10, 8, 9, X, X, X, X, X, X, X}, + {9, 10, 2, 0, 9, 2, X, X, X, X, X, X, X, X, X, X}, + {2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, X, X, X, X}, + {1, 10, 2, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {1, 3, 8, 9, 1, 8, X, X, X, X, X, X, X, X, X, X}, + {0, 9, 1, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 3, 8, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X}}; +#undef X + +// Table mapping each edge to the corresponding cube vertices offsets +const uint _EDGE_TO_VERTICES[12][2] = { + {0, 1}, + {1, 5}, + {4, 5}, + {0, 4}, + {2, 3}, + {3, 7}, + {6, 7}, + {2, 6}, + {0, 2}, + {1, 3}, + {5, 7}, + {4, 6}, +}; + +// Table mapping from 0-7 to v0-v7 in cube.vertices +const int _INDEX_TABLE[8] = {0, 1, 5, 4, 2, 3, 7, 6}; diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/mesh_normal_consistency/mesh_normal_consistency.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/mesh_normal_consistency/mesh_normal_consistency.h new file mode 100644 index 0000000000000000000000000000000000000000..17795ae1eb4b6bceb8a9960bc6d7523eb3c2acb6 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/mesh_normal_consistency/mesh_normal_consistency.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include "utils/pytorch3d_cutils.h" + +// For mesh_normal_consistency, find pairs of vertices opposite the same edge. +// +// Args: +// edge_num: int64 Tensor of shape (E,) giving the number of vertices +// corresponding to each edge. +// +// Returns: +// pairs: int64 Tensor of shape (N,2) + +at::Tensor MeshNormalConsistencyFindVerticesCpu(const at::Tensor& edge_num); + +// Exposed implementation. +at::Tensor MeshNormalConsistencyFindVertices(const at::Tensor& edge_num) { + if (edge_num.is_cuda()) { + AT_ERROR("This function needs a CPU tensor."); + } + return MeshNormalConsistencyFindVerticesCpu(edge_num); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/mesh_normal_consistency/mesh_normal_consistency_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/mesh_normal_consistency/mesh_normal_consistency_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1b0d5e6ea0a76cb2aac5e3f6ddb8eca77635da97 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/mesh_normal_consistency/mesh_normal_consistency_cpu.cpp @@ -0,0 +1,53 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +at::Tensor MeshNormalConsistencyFindVerticesCpu(const at::Tensor& edge_num) { + // We take a LongTensor of shape (E,) giving the number of things intersecting + // each edge. The things are taken to be numbered in order. + // (In fact, the "things" are opposite vertices to edges, renumbered). + // We return a tensor of shape (?, 2) where for every pair of things which + // intersect the same edge there is a row of their numbers in the output. + + // Example possible inputs and outputs (order of output is not specified): + // [1,0,1,1,0] => [[]] + // [3] => [[0,1], [0,2], [1,2]] + // [0,3] => [[0,1], [0,2], [1,2]] + // [1,3] => [[1,2], [1,3], [2,3]] + //[1,0,2,1,0,2] => [[1,2], [4,5]] + + const auto num_edges = edge_num.size(0); + auto edges_a = edge_num.accessor(); + + int64_t vert_idx = 0; + std::vector> pairs; + for (int64_t i_edge = 0; i_edge < num_edges; ++i_edge) { + int64_t e = edges_a[i_edge]; + for (int64_t j = 0; j < e; ++j) { + for (int64_t i = 0; i < j; ++i) { + pairs.emplace_back(vert_idx + i, vert_idx + j); + } + } + vert_idx += e; + } + + // Convert from std::vector by copying over the items to a new empty torch + // tensor. + auto pairs_tensor = at::empty({(int64_t)pairs.size(), 2}, edge_num.options()); + auto pairs_a = pairs_tensor.accessor(); + for (int64_t i_pair = 0; i_pair < pairs.size(); ++i_pair) { + auto accessor = pairs_a[i_pair]; + accessor[0] = pairs[i_pair].first; + accessor[1] = pairs[i_pair].second; + } + + return pairs_tensor; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu new file mode 100644 index 0000000000000000000000000000000000000000..94f22c18431bb8bc4557584acdd5894155a17e37 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu @@ -0,0 +1,241 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +// Kernel for inputs_packed of shape (F, D), where D > 1 +template +__global__ void PackedToPaddedKernel( + const scalar_t* __restrict__ inputs_packed, + const int64_t* __restrict__ first_idxs, + scalar_t* __restrict__ inputs_padded, + const size_t batch_size, + const size_t max_size, + const size_t num_inputs, + const size_t D) { + // Batch elements split evenly across blocks (num blocks = batch_size) and + // values for each element split across threads in the block. Each thread adds + // the values of its respective input elements to the global inputs_padded + // tensor. + const size_t tid = threadIdx.x; + const size_t batch_idx = blockIdx.x; + + const int64_t start = first_idxs[batch_idx]; + const int64_t end = + batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; + const int num = end - start; + for (size_t f = tid; f < num; f += blockDim.x) { + for (size_t j = 0; j < D; ++j) { + inputs_padded[batch_idx * max_size * D + f * D + j] = + inputs_packed[(start + f) * D + j]; + } + } +} + +// Kernel for inputs of shape (F, 1) +template +__global__ void PackedToPaddedKernelD1( + const scalar_t* __restrict__ inputs_packed, + const int64_t* __restrict__ first_idxs, + scalar_t* __restrict__ inputs_padded, + const size_t batch_size, + const size_t max_size, + const size_t num_inputs) { + // Batch elements split evenly across blocks (num blocks = batch_size) and + // values for each element split across threads in the block. Each thread adds + // the values of its respective input elements to the global inputs_padded + // tensor. + const size_t tid = threadIdx.x; + const size_t batch_idx = blockIdx.x; + + const int64_t start = first_idxs[batch_idx]; + const int64_t end = + batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; + const int num = end - start; + for (size_t f = tid; f < num; f += blockDim.x) { + inputs_padded[batch_idx * max_size + f] = inputs_packed[start + f]; + } +} + +// Kernel for inputs_padded of shape (B, F, D), where D > 1 +template +__global__ void PaddedToPackedKernel( + const scalar_t* __restrict__ inputs_padded, + const int64_t* __restrict__ first_idxs, + scalar_t* __restrict__ inputs_packed, + const size_t batch_size, + const size_t max_size, + const size_t num_inputs, + const size_t D) { + // Batch elements split evenly across blocks (num blocks = batch_size) and + // values for each element split across threads in the block. Each thread adds + // the values of its respective input elements to the global inputs_packed + // tensor. + const size_t tid = threadIdx.x; + const size_t batch_idx = blockIdx.x; + + const int64_t start = first_idxs[batch_idx]; + const int64_t end = + batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; + const int num = end - start; + for (size_t f = tid; f < num; f += blockDim.x) { + for (size_t j = 0; j < D; ++j) { + inputs_packed[(start + f) * D + j] = + inputs_padded[batch_idx * max_size * D + f * D + j]; + } + } +} + +// Kernel for inputs_padded of shape (B, F, 1) +template +__global__ void PaddedToPackedKernelD1( + const scalar_t* __restrict__ inputs_padded, + const int64_t* __restrict__ first_idxs, + scalar_t* __restrict__ inputs_packed, + const size_t batch_size, + const size_t max_size, + const size_t num_inputs) { + // Batch elements split evenly across blocks (num blocks = batch_size) and + // values for each element split across threads in the block. Each thread adds + // the values of its respective input elements to the global inputs_packed + // tensor. + const size_t tid = threadIdx.x; + const size_t batch_idx = blockIdx.x; + + const int64_t start = first_idxs[batch_idx]; + const int64_t end = + batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; + const int num = end - start; + for (size_t f = tid; f < num; f += blockDim.x) { + inputs_packed[start + f] = inputs_padded[batch_idx * max_size + f]; + } +} + +at::Tensor PackedToPaddedCuda( + const at::Tensor inputs_packed, + const at::Tensor first_idxs, + const int64_t max_size) { + // Check inputs are on the same device + at::TensorArg inputs_packed_t{inputs_packed, "inputs_packed", 1}, + first_idxs_t{first_idxs, "first_idxs", 2}; + at::CheckedFrom c = "PackedToPaddedCuda"; + at::checkAllSameGPU(c, {inputs_packed_t, first_idxs_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(inputs_packed.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int64_t num_inputs = inputs_packed.size(0); + const int64_t batch_size = first_idxs.size(0); + + TORCH_CHECK( + inputs_packed.dim() == 2, "inputs_packed must be a 2-dimensional tensor"); + const int64_t D = inputs_packed.size(1); + at::Tensor inputs_padded = + at::zeros({batch_size, max_size, D}, inputs_packed.options()); + + if (inputs_padded.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return inputs_padded; + } + + const int threads = 512; + const int blocks = batch_size; + if (D == 1) { + AT_DISPATCH_FLOATING_TYPES( + inputs_packed.scalar_type(), "packed_to_padded_d1_kernel", ([&] { + PackedToPaddedKernelD1<<>>( + inputs_packed.contiguous().data_ptr(), + first_idxs.contiguous().data_ptr(), + inputs_padded.data_ptr(), + batch_size, + max_size, + num_inputs); + })); + } else { + AT_DISPATCH_FLOATING_TYPES( + inputs_packed.scalar_type(), "packed_to_padded_kernel", ([&] { + PackedToPaddedKernel<<>>( + inputs_packed.contiguous().data_ptr(), + first_idxs.contiguous().data_ptr(), + inputs_padded.data_ptr(), + batch_size, + max_size, + num_inputs, + D); + })); + } + + AT_CUDA_CHECK(cudaGetLastError()); + return inputs_padded; +} + +at::Tensor PaddedToPackedCuda( + const at::Tensor inputs_padded, + const at::Tensor first_idxs, + const int64_t num_inputs) { + // Check inputs are on the same device + at::TensorArg inputs_padded_t{inputs_padded, "inputs_padded", 1}, + first_idxs_t{first_idxs, "first_idxs", 2}; + at::CheckedFrom c = "PaddedToPackedCuda"; + at::checkAllSameGPU(c, {inputs_padded_t, first_idxs_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(inputs_padded.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int64_t batch_size = inputs_padded.size(0); + const int64_t max_size = inputs_padded.size(1); + + TORCH_CHECK(batch_size == first_idxs.size(0), "sizes mismatch"); + TORCH_CHECK( + inputs_padded.dim() == 3, + "inputs_padded must be a 3-dimensional tensor"); + const int64_t D = inputs_padded.size(2); + + at::Tensor inputs_packed = + at::zeros({num_inputs, D}, inputs_padded.options()); + + if (inputs_packed.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return inputs_packed; + } + + const int threads = 512; + const int blocks = batch_size; + + if (D == 1) { + AT_DISPATCH_FLOATING_TYPES( + inputs_padded.scalar_type(), "padded_to_packed_d1_kernel", ([&] { + PaddedToPackedKernelD1<<>>( + inputs_padded.contiguous().data_ptr(), + first_idxs.contiguous().data_ptr(), + inputs_packed.data_ptr(), + batch_size, + max_size, + num_inputs); + })); + } else { + AT_DISPATCH_FLOATING_TYPES( + inputs_padded.scalar_type(), "padded_to_packed_kernel", ([&] { + PaddedToPackedKernel<<>>( + inputs_padded.contiguous().data_ptr(), + first_idxs.contiguous().data_ptr(), + inputs_packed.data_ptr(), + batch_size, + max_size, + num_inputs, + D); + })); + } + + AT_CUDA_CHECK(cudaGetLastError()); + return inputs_packed; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c9176a1afd5e6736f938dc938dfc1d62c1052ddc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +at::Tensor PackedToPaddedCpu( + const at::Tensor inputs_packed, + const at::Tensor first_idxs, + const int64_t max_size) { + const int64_t num_inputs = inputs_packed.size(0); + const int64_t batch_size = first_idxs.size(0); + + AT_ASSERTM( + inputs_packed.dim() == 2, "inputs_packed must be a 2-dimensional tensor"); + const int64_t D = inputs_packed.size(1); + + torch::Tensor inputs_padded = + torch::zeros({batch_size, max_size, D}, inputs_packed.options()); + + auto inputs_packed_a = inputs_packed.accessor(); + auto first_idxs_a = first_idxs.accessor(); + auto inputs_padded_a = inputs_padded.accessor(); + + for (int b = 0; b < batch_size; ++b) { + const int64_t start = first_idxs_a[b]; + const int64_t end = b + 1 < batch_size ? first_idxs_a[b + 1] : num_inputs; + const int64_t num = end - start; + for (int i = 0; i < num; ++i) { + for (int j = 0; j < D; ++j) { + inputs_padded_a[b][i][j] = inputs_packed_a[start + i][j]; + } + } + } + return inputs_padded; +} + +at::Tensor PaddedToPackedCpu( + const at::Tensor inputs_padded, + const at::Tensor first_idxs, + const int64_t num_inputs) { + const int64_t batch_size = inputs_padded.size(0); + + AT_ASSERTM( + inputs_padded.dim() == 3, "inputs_padded must be a 3-dimensional tensor"); + const int64_t D = inputs_padded.size(2); + + torch::Tensor inputs_packed = + torch::zeros({num_inputs, D}, inputs_padded.options()); + + auto inputs_padded_a = inputs_padded.accessor(); + auto first_idxs_a = first_idxs.accessor(); + auto inputs_packed_a = inputs_packed.accessor(); + + for (int b = 0; b < batch_size; ++b) { + const int64_t start = first_idxs_a[b]; + const int64_t end = b + 1 < batch_size ? first_idxs_a[b + 1] : num_inputs; + const int64_t num = end - start; + for (int i = 0; i < num; ++i) { + for (int j = 0; j < D; ++j) { + inputs_packed_a[start + i][j] = inputs_padded_a[b][i][j]; + } + } + } + return inputs_packed; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/constants.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/constants.h new file mode 100644 index 0000000000000000000000000000000000000000..a2eee6217158d3a2e7a3e92a52e5afa4107494ab --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/constants.h @@ -0,0 +1,19 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_CONSTANTS_H_ +#define PULSAR_NATIVE_CONSTANTS_H_ + +#define EPS 1E-6 +#define FEPS 1E-6f +#define MAX_FLOAT 3.4E38f +#define MAX_INT 2147483647 +#define MAX_UINT 4294967295u +#define MAX_USHORT 65535u + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/README.md b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/README.md new file mode 100644 index 0000000000000000000000000000000000000000..60c5d07cba3b8d403693e9aa3db2a0b74f66c472 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/README.md @@ -0,0 +1,5 @@ +# CUDA device compilation units + +This folder contains `.cu` files to create compilation units +for device-specific functions. See `../include/README.md` for +more information. diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/commands.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/commands.h new file mode 100644 index 0000000000000000000000000000000000000000..00e6f37852169c6dd3ccaaf02d0381039fe2edbc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/commands.h @@ -0,0 +1,505 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_CUDA_COMMANDS_H_ +#define PULSAR_NATIVE_CUDA_COMMANDS_H_ + +// Definitions for GPU commands. +#include +#include +namespace cg = cooperative_groups; + +#ifdef __DRIVER_TYPES_H__ +#ifndef DEVICE_RESET +#define DEVICE_RESET cudaDeviceReset(); +#endif +#else +#ifndef DEVICE_RESET +#define DEVICE_RESET +#endif +#endif + +#define HANDLECUDA(CMD) CMD +// handleCudaError((CMD), __FILE__, __LINE__) +inline void +handleCudaError(const cudaError_t err, const char* file, const int line) { + if (err != cudaSuccess) { +#ifndef __NVCC__ + fprintf( + stderr, + "%s(%i) : getLastCudaError() CUDA error :" + " (%d) %s.\n", + file, + line, + static_cast(err), + cudaGetErrorString(err)); + DEVICE_RESET + exit(1); +#endif + } +} +inline void +getLastCudaError(const char* errorMessage, const char* file, const int line) { + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "Error: %s.", errorMessage); + handleCudaError(err, file, line); + } +} + +#define ALIGN(VAL) __align__(VAL) +#define SYNC() HANDLECUDE(cudaDeviceSynchronize()) +#define THREADFENCE_B() __threadfence_block() +#define SHFL_SYNC(a, b, c) __shfl_sync((a), (b), (c)) +#define SHARED __shared__ +#define ACTIVEMASK() __activemask() +#define BALLOT(mask, val) __ballot_sync((mask), val) +/** + * Find the cumulative sum within a warp up to the current + * thread lane, with each mask thread contributing base. + */ +template +DEVICE T +WARP_CUMSUM(const cg::coalesced_group& group, const uint& mask, const T& base) { + T ret = base; + T shfl_val; + shfl_val = __shfl_down_sync(mask, ret, 1u); // Deactivate the rightmost lane. + ret += (group.thread_rank() < 31) * shfl_val; + shfl_val = __shfl_down_sync(mask, ret, 2u); + ret += (group.thread_rank() < 30) * shfl_val; + shfl_val = __shfl_down_sync(mask, ret, 4u); // ...4 + ret += (group.thread_rank() < 28) * shfl_val; + shfl_val = __shfl_down_sync(mask, ret, 8u); // ...8 + ret += (group.thread_rank() < 24) * shfl_val; + shfl_val = __shfl_down_sync(mask, ret, 16u); // ...16 + ret += (group.thread_rank() < 16) * shfl_val; + return ret; +} + +template +DEVICE T +WARP_MAX(const cg::coalesced_group& group, const uint& mask, const T& base) { + T ret = base; + ret = max(ret, __shfl_down_sync(mask, ret, 16u)); + ret = max(ret, __shfl_down_sync(mask, ret, 8u)); + ret = max(ret, __shfl_down_sync(mask, ret, 4u)); + ret = max(ret, __shfl_down_sync(mask, ret, 2u)); + ret = max(ret, __shfl_down_sync(mask, ret, 1u)); + return ret; +} + +template +DEVICE T +WARP_SUM(const cg::coalesced_group& group, const uint& mask, const T& base) { + T ret = base; + ret = ret + __shfl_down_sync(mask, ret, 16u); + ret = ret + __shfl_down_sync(mask, ret, 8u); + ret = ret + __shfl_down_sync(mask, ret, 4u); + ret = ret + __shfl_down_sync(mask, ret, 2u); + ret = ret + __shfl_down_sync(mask, ret, 1u); + return ret; +} + +INLINE DEVICE float3 WARP_SUM_FLOAT3( + const cg::coalesced_group& group, + const uint& mask, + const float3& base) { + float3 ret = base; + ret.x = WARP_SUM(group, mask, base.x); + ret.y = WARP_SUM(group, mask, base.y); + ret.z = WARP_SUM(group, mask, base.z); + return ret; +} + +// Floating point. +// #define FMUL(a, b) __fmul_rn((a), (b)) +#define FMUL(a, b) ((a) * (b)) +#define FDIV(a, b) __fdiv_rn((a), (b)) +// #define FSUB(a, b) __fsub_rn((a), (b)) +#define FSUB(a, b) ((a) - (b)) +#define FADD(a, b) __fadd_rn((a), (b)) +#define FSQRT(a) __fsqrt_rn(a) +#define FEXP(a) fasterexp(a) +#define FLN(a) fasterlog(a) +#define FPOW(a, b) __powf((a), (b)) +#define FMAX(a, b) fmax((a), (b)) +#define FMIN(a, b) fmin((a), (b)) +#define FCEIL(a) ceilf(a) +#define FFLOOR(a) floorf(a) +#define FROUND(x) nearbyintf(x) +#define FSATURATE(x) __saturatef(x) +#define FABS(a) abs(a) +#define IASF(a, loc) (loc) = __int_as_float(a) +#define FASI(a, loc) (loc) = __float_as_int(a) +#define FABSLEQAS(a, b, c) \ + ((a) <= (b) ? FSUB((b), (a)) <= (c) : FSUB((a), (b)) < (c)) +/** Calculates x*y+z. */ +#define FMA(x, y, z) __fmaf_rn((x), (y), (z)) +#define I2F(a) __int2float_rn(a) +#define FRCP(x) __frcp_rn(x) +__device__ static float atomicMax(float* address, float val) { + int* address_as_i = (int*)address; + int old = *address_as_i, assumed; + do { + assumed = old; + old = ::atomicCAS( + address_as_i, + assumed, + __float_as_int(::fmaxf(val, __int_as_float(assumed)))); + } while (assumed != old); + return __int_as_float(old); +} +__device__ static float atomicMin(float* address, float val) { + int* address_as_i = (int*)address; + int old = *address_as_i, assumed; + do { + assumed = old; + old = ::atomicCAS( + address_as_i, + assumed, + __float_as_int(::fminf(val, __int_as_float(assumed)))); + } while (assumed != old); + return __int_as_float(old); +} +#define DMAX(a, b) FMAX(a, b) +#define DMIN(a, b) FMIN(a, b) +#define DSQRT(a) sqrt(a) +#define DSATURATE(a) DMIN(1., DMAX(0., (a))) +// half +#define HADD(a, b) __hadd((a), (b)) +#define HSUB2(a, b) __hsub2((a), (b)) +#define HMUL2(a, b) __hmul2((a), (b)) +#define HSQRT(a) hsqrt(a) + +// uint. +#define CLZ(VAL) __clz(VAL) +#define POPC(a) __popc(a) +// +// +// +// +// +// +// +// +// +#define ATOMICADD(PTR, VAL) atomicAdd((PTR), (VAL)) +#define ATOMICADD_F3(PTR, VAL) \ + ATOMICADD(&((PTR)->x), VAL.x); \ + ATOMICADD(&((PTR)->y), VAL.y); \ + ATOMICADD(&((PTR)->z), VAL.z); +#if (CUDART_VERSION >= 10000) && (__CUDA_ARCH__ >= 600) +#define ATOMICADD_B(PTR, VAL) atomicAdd_block((PTR), (VAL)) +#else +#define ATOMICADD_B(PTR, VAL) ATOMICADD(PTR, VAL) +#endif +// +// +// +// +// int. +#define IMIN(a, b) min((a), (b)) +#define IMAX(a, b) max((a), (b)) +#define IABS(a) abs(a) + +// Checks. +// like TORCH_CHECK_ARG in PyTorch > 1.10 +#define ARGCHECK(cond, argN, ...) \ + TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__) + +// Math. +#define NORM3DF(x, y, z) norm3df(x, y, z) +#define RNORM3DF(x, y, z) rnorm3df(x, y, z) + +// High level. +#define GET_SORT_WS_SIZE(RES_PTR, KEY_TYPE, VAL_TYPE, NUM_OBJECTS) \ + cub::DeviceRadixSort::SortPairsDescending( \ + (void*)NULL, \ + *(RES_PTR), \ + reinterpret_cast(NULL), \ + reinterpret_cast(NULL), \ + reinterpret_cast(NULL), \ + reinterpret_cast(NULL), \ + (NUM_OBJECTS)); +#define GET_REDUCE_WS_SIZE(RES_PTR, TYPE, REDUCE_OP, NUM_OBJECTS) \ + { \ + TYPE init = TYPE(); \ + cub::DeviceReduce::Reduce( \ + (void*)NULL, \ + *(RES_PTR), \ + (TYPE*)NULL, \ + (TYPE*)NULL, \ + (NUM_OBJECTS), \ + (REDUCE_OP), \ + init); \ + } +#define GET_SELECT_WS_SIZE( \ + RES_PTR, TYPE_SELECTOR, TYPE_SELECTION, NUM_OBJECTS) \ + { \ + cub::DeviceSelect::Flagged( \ + (void*)NULL, \ + *(RES_PTR), \ + (TYPE_SELECTION*)NULL, \ + (TYPE_SELECTOR*)NULL, \ + (TYPE_SELECTION*)NULL, \ + (int*)NULL, \ + (NUM_OBJECTS)); \ + } +#define GET_SUM_WS_SIZE(RES_PTR, TYPE_SUM, NUM_OBJECTS) \ + { \ + cub::DeviceReduce::Sum( \ + (void*)NULL, \ + *(RES_PTR), \ + (TYPE_SUM*)NULL, \ + (TYPE_SUM*)NULL, \ + NUM_OBJECTS); \ + } +#define GET_MM_WS_SIZE(RES_PTR, TYPE, NUM_OBJECTS) \ + { \ + TYPE init = TYPE(); \ + cub::DeviceReduce::Max( \ + (void*)NULL, *(RES_PTR), (TYPE*)NULL, (TYPE*)NULL, (NUM_OBJECTS)); \ + } +#define SORT_DESCENDING( \ + TMPN1, SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS) \ + void* TMPN1 = NULL; \ + size_t TMPN1##_bytes = 0; \ + cub::DeviceRadixSort::SortPairsDescending( \ + TMPN1, \ + TMPN1##_bytes, \ + (SORT_PTR), \ + (SORTED_PTR), \ + (VAL_PTR), \ + (VAL_SORTED_PTR), \ + (NUM_OBJECTS)); \ + HANDLECUDA(cudaMalloc(&TMPN1, TMPN1##_bytes)); \ + cub::DeviceRadixSort::SortPairsDescending( \ + TMPN1, \ + TMPN1##_bytes, \ + (SORT_PTR), \ + (SORTED_PTR), \ + (VAL_PTR), \ + (VAL_SORTED_PTR), \ + (NUM_OBJECTS)); \ + HANDLECUDA(cudaFree(TMPN1)); +#define SORT_DESCENDING_WS( \ + TMPN1, \ + SORT_PTR, \ + SORTED_PTR, \ + VAL_PTR, \ + VAL_SORTED_PTR, \ + NUM_OBJECTS, \ + WORKSPACE_PTR, \ + WORKSPACE_BYTES) \ + cub::DeviceRadixSort::SortPairsDescending( \ + (WORKSPACE_PTR), \ + (WORKSPACE_BYTES), \ + (SORT_PTR), \ + (SORTED_PTR), \ + (VAL_PTR), \ + (VAL_SORTED_PTR), \ + (NUM_OBJECTS)); +#define SORT_ASCENDING_WS( \ + SORT_PTR, \ + SORTED_PTR, \ + VAL_PTR, \ + VAL_SORTED_PTR, \ + NUM_OBJECTS, \ + WORKSPACE_PTR, \ + WORKSPACE_BYTES, \ + STREAM) \ + cub::DeviceRadixSort::SortPairs( \ + (WORKSPACE_PTR), \ + (WORKSPACE_BYTES), \ + (SORT_PTR), \ + (SORTED_PTR), \ + (VAL_PTR), \ + (VAL_SORTED_PTR), \ + (NUM_OBJECTS), \ + 0, \ + sizeof(*(SORT_PTR)) * 8, \ + (STREAM)); +#define SUM_WS( \ + SUM_PTR, OUT_PTR, NUM_OBJECTS, WORKSPACE_PTR, WORKSPACE_BYTES, STREAM) \ + cub::DeviceReduce::Sum( \ + (WORKSPACE_PTR), \ + (WORKSPACE_BYTES), \ + (SUM_PTR), \ + (OUT_PTR), \ + (NUM_OBJECTS), \ + (STREAM)); +#define MIN_WS( \ + MIN_PTR, OUT_PTR, NUM_OBJECTS, WORKSPACE_PTR, WORKSPACE_BYTES, STREAM) \ + cub::DeviceReduce::Min( \ + (WORKSPACE_PTR), \ + (WORKSPACE_BYTES), \ + (MIN_PTR), \ + (OUT_PTR), \ + (NUM_OBJECTS), \ + (STREAM)); +#define MAX_WS( \ + MAX_PTR, OUT_PTR, NUM_OBJECTS, WORKSPACE_PTR, WORKSPACE_BYTES, STREAM) \ + cub::DeviceReduce::Min( \ + (WORKSPACE_PTR), \ + (WORKSPACE_BYTES), \ + (MAX_PTR), \ + (OUT_PTR), \ + (NUM_OBJECTS), \ + (STREAM)); +// +// +// +// TODO: rewrite using nested contexts instead of temporary names. +#define REDUCE(REDUCE_PTR, RESULT_PTR, NUM_ITEMS, REDUCE_OP, REDUCE_INIT) \ + cub::DeviceReduce::Reduce( \ + TMPN1, \ + TMPN1##_bytes, \ + (REDUCE_PTR), \ + (RESULT_PTR), \ + (NUM_ITEMS), \ + (REDUCE_OP), \ + (REDUCE_INIT)); \ + HANDLECUDA(cudaMalloc(&TMPN1, TMPN1##_bytes)); \ + cub::DeviceReduce::Reduce( \ + TMPN1, \ + TMPN1##_bytes, \ + (REDUCE_PTR), \ + (RESULT_PTR), \ + (NUM_ITEMS), \ + (REDUCE_OP), \ + (REDUCE_INIT)); \ + HANDLECUDA(cudaFree(TMPN1)); +#define REDUCE_WS( \ + REDUCE_PTR, \ + RESULT_PTR, \ + NUM_ITEMS, \ + REDUCE_OP, \ + REDUCE_INIT, \ + WORKSPACE_PTR, \ + WORSPACE_BYTES, \ + STREAM) \ + cub::DeviceReduce::Reduce( \ + (WORKSPACE_PTR), \ + (WORSPACE_BYTES), \ + (REDUCE_PTR), \ + (RESULT_PTR), \ + (NUM_ITEMS), \ + (REDUCE_OP), \ + (REDUCE_INIT), \ + (STREAM)); +#define SELECT_FLAGS_WS( \ + FLAGS_PTR, \ + ITEM_PTR, \ + OUT_PTR, \ + NUM_SELECTED_PTR, \ + NUM_ITEMS, \ + WORKSPACE_PTR, \ + WORSPACE_BYTES, \ + STREAM) \ + cub::DeviceSelect::Flagged( \ + (WORKSPACE_PTR), \ + (WORSPACE_BYTES), \ + (ITEM_PTR), \ + (FLAGS_PTR), \ + (OUT_PTR), \ + (NUM_SELECTED_PTR), \ + (NUM_ITEMS), \ + stream = (STREAM)); + +#define COPY_HOST_DEV(PTR_D, PTR_H, TYPE, SIZE) \ + HANDLECUDA(cudaMemcpy( \ + (PTR_D), (PTR_H), sizeof(TYPE) * (SIZE), cudaMemcpyHostToDevice)) +#define COPY_DEV_HOST(PTR_H, PTR_D, TYPE, SIZE) \ + HANDLECUDA(cudaMemcpy( \ + (PTR_H), (PTR_D), sizeof(TYPE) * (SIZE), cudaMemcpyDeviceToHost)) +#define COPY_DEV_DEV(PTR_T, PTR_S, TYPE, SIZE) \ + HANDLECUDA(cudaMemcpy( \ + (PTR_T), (PTR_S), sizeof(TYPE) * (SIZE), cudaMemcpyDeviceToDevice)) +// +// We *must* use cudaMallocManaged for pointers on device that should +// interact with pytorch. However, this comes at a significant speed penalty. +// We're using plain CUDA pointers for the rendering operations and +// explicitly copy results to managed pointers wrapped for pytorch (see +// pytorch/util.h). +#define MALLOC(VAR, TYPE, SIZE) cudaMalloc(&(VAR), sizeof(TYPE) * (SIZE)) +#define FREE(PTR) HANDLECUDA(cudaFree(PTR)) +#define MEMSET(VAR, VAL, TYPE, SIZE, STREAM) \ + HANDLECUDA(cudaMemsetAsync((VAR), (VAL), sizeof(TYPE) * (SIZE), (STREAM))) + +#define LAUNCH_MAX_PARALLEL_1D(FUNC, N, STREAM, ...) \ + { \ + int64_t max_threads = \ + at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; \ + uint num_threads = min((N), max_threads); \ + uint num_blocks = iDivCeil((N), num_threads); \ + FUNC<<>>(__VA_ARGS__); \ + } +#define LAUNCH_PARALLEL_1D(FUNC, N, TN, STREAM, ...) \ + { \ + uint num_threads = min(static_cast(N), static_cast(TN)); \ + uint num_blocks = iDivCeil((N), num_threads); \ + FUNC<<>>(__VA_ARGS__); \ + } +#define LAUNCH_MAX_PARALLEL_2D(FUNC, NX, NY, STREAM, ...) \ + { \ + int64_t max_threads = \ + at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; \ + int64_t max_threads_sqrt = static_cast(sqrt(max_threads)); \ + dim3 num_threads, num_blocks; \ + num_threads.x = min((NX), max_threads_sqrt); \ + num_blocks.x = iDivCeil((NX), num_threads.x); \ + num_threads.y = min((NY), max_threads_sqrt); \ + num_blocks.y = iDivCeil((NY), num_threads.y); \ + num_threads.z = 1; \ + num_blocks.z = 1; \ + FUNC<<>>(__VA_ARGS__); \ + } +#define LAUNCH_PARALLEL_2D(FUNC, NX, NY, TX, TY, STREAM, ...) \ + { \ + dim3 num_threads, num_blocks; \ + num_threads.x = min((NX), (TX)); \ + num_blocks.x = iDivCeil((NX), num_threads.x); \ + num_threads.y = min((NY), (TY)); \ + num_blocks.y = iDivCeil((NY), num_threads.y); \ + num_threads.z = 1; \ + num_blocks.z = 1; \ + FUNC<<>>(__VA_ARGS__); \ + } + +#define GET_PARALLEL_IDX_1D(VARNAME, N) \ + const uint VARNAME = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; \ + if (VARNAME >= (N)) { \ + return; \ + } +#define GET_PARALLEL_IDS_2D(VAR_X, VAR_Y, WIDTH, HEIGHT) \ + const uint VAR_X = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; \ + const uint VAR_Y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y; \ + if (VAR_X >= (WIDTH) || VAR_Y >= (HEIGHT)) \ + return; +#define END_PARALLEL() +#define END_PARALLEL_NORET() +#define END_PARALLEL_2D_NORET() +#define END_PARALLEL_2D() +#define RETURN_PARALLEL() return +#define CHECKLAUNCH() C10_CUDA_CHECK(cudaGetLastError()); +#define ISONDEVICE true +#define SYNCDEVICE() HANDLECUDA(cudaDeviceSynchronize()) +#define START_TIME(TN) \ + cudaEvent_t __time_start_##TN, __time_stop_##TN; \ + cudaEventCreate(&__time_start_##TN); \ + cudaEventCreate(&__time_stop_##TN); \ + cudaEventRecord(__time_start_##TN); +#define STOP_TIME(TN) cudaEventRecord(__time_stop_##TN); +#define GET_TIME(TN, TOPTR) \ + cudaEventSynchronize(__time_stop_##TN); \ + cudaEventElapsedTime((TOPTR), __time_start_##TN, __time_stop_##TN); +#define START_TIME_CU(TN) START_TIME(CN) +#define STOP_TIME_CU(TN) STOP_TIME(TN) +#define GET_TIME_CU(TN, TOPTR) GET_TIME(TN, TOPTR) + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.backward.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.backward.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..e0da7b7020c0a3f5ae0647030282adf0e0103d39 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.backward.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.backward.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.backward_dbg.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.backward_dbg.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..a95bb421d2d9b6bfec1a9286e035b042b0d9842c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.backward_dbg.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.backward_dbg.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.calc_gradients.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.calc_gradients.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..ff38b08e0dfe46e65a94039c8dec7da721d0421a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.calc_gradients.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.calc_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.calc_signature.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.calc_signature.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..81c72192eaa877038d9383cfdd0adf9a91e06f97 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.calc_signature.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.calc_signature.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.construct.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.construct.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..67583511aec2a6bd4dd8670aeb809939a3d2e19c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.construct.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.construct.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.create_selector.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.create_selector.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..52e265bcb2ab8ca9e4d08d90d1dc4fef75294520 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.create_selector.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.create_selector.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.destruct.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.destruct.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..e61be93fa4c4893e6c4800f71cf49ef81c717ff0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.destruct.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.destruct.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.fill_bg.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.fill_bg.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..6c7b1a48b675b1dbe69992c81a8cbb8c8861911e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.fill_bg.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.fill_bg.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.forward.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.forward.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..bfb42debeeaa7444daec94a88830c39825239170 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.forward.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.forward.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.norm_cam_gradients.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.norm_cam_gradients.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..93d666324a4973f44ad4becbeecaf34e0c7b96e5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.norm_cam_gradients.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.norm_cam_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.norm_sphere_gradients.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.norm_sphere_gradients.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..65339caea11645e4b7ba99a0af77c21b4ae2f738 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.norm_sphere_gradients.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.norm_sphere_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.render.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.render.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..eb46adbafbc1c2a60dfb21fa9ce222828e53e31b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/cuda/renderer.render.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.render.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/global.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/global.h new file mode 100644 index 0000000000000000000000000000000000000000..3cea957e1f09d32494bc6e644e8929a22534270d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/global.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_GLOBAL_H +#define PULSAR_GLOBAL_H + +#include "./constants.h" +#ifndef WIN32 +#include +#endif + +#if defined(_WIN64) || defined(_WIN32) +#define uint unsigned int +#define ushort unsigned short +#endif + +#include "./logging.h" // <- include before torch/extension.h + +#define MAX_GRAD_SPHERES 128 + +#ifdef __CUDACC__ +#define INLINE __forceinline__ +#define HOST __host__ +#define DEVICE __device__ +#define GLOBAL __global__ +#define RESTRICT __restrict__ +#define DEBUGBREAK() +#ifdef __NVCC_DIAG_PRAGMA_SUPPORT__ +#pragma nv_diag_suppress 1866 +#pragma nv_diag_suppress 2941 +#pragma nv_diag_suppress 2951 +#pragma nv_diag_suppress 2967 +#else +#pragma diag_suppress = attribute_not_allowed +#pragma diag_suppress = 1866 +#pragma diag_suppress = 2941 +#pragma diag_suppress = 2951 +#pragma diag_suppress = 2967 +#endif +#else // __CUDACC__ +#define INLINE inline +#define HOST +#define DEVICE +#define GLOBAL +#define RESTRICT +#define DEBUGBREAK() std::raise(SIGINT) +// Don't care about pytorch warnings; they shouldn't clutter our warnings. +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Weverything" +#include +#pragma clang diagnostic pop +#ifdef WITH_CUDA +#include +#include +#else +#ifndef cudaStream_t +typedef void* cudaStream_t; +#endif +struct int2 { + int x, y; +}; +struct ushort2 { + unsigned short x, y; +}; +struct float2 { + float x, y; +}; +struct float3 { + float x, y, z; +}; +inline float3 make_float3(const float& x, const float& y, const float& z) { + float3 res; + res.x = x; + res.y = y; + res.z = z; + return res; +} +#endif +namespace py = pybind11; + +inline bool operator==(const float3& a, const float3& b) { + return a.x == b.x && a.y == b.y && a.z == b.z; +} +#endif // __CUDACC__ +#define IHD INLINE HOST DEVICE + +// An assertion command that can be used on host and device. +#ifdef PULSAR_ASSERTIONS +#ifdef __CUDACC__ +#define PASSERT(VAL) \ + if (!(VAL)) { \ + printf( \ + "Pulsar assertion failed in %s, line %d: %s.\n", \ + __FILE__, \ + __LINE__, \ + #VAL); \ + } +#else +#define PASSERT(VAL) \ + if (!(VAL)) { \ + printf( \ + "Pulsar assertion failed in %s, line %d: %s.\n", \ + __FILE__, \ + __LINE__, \ + #VAL); \ + std::raise(SIGINT); \ + } +#endif +#else +#define PASSERT(VAL) +#endif + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/README.md b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/README.md new file mode 100644 index 0000000000000000000000000000000000000000..34f1bade9134da24f4038425c4b50fe1fffc45dc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/README.md @@ -0,0 +1,5 @@ +# Device-specific host compilation units + +This folder contains `.cpp` files to create compilation units +for device specific functions. See `../include/README.md` for +more information. diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/commands.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/commands.h new file mode 100644 index 0000000000000000000000000000000000000000..a48eaaa901d557874ed84e92751da76a5bcbf6c3 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/commands.h @@ -0,0 +1,391 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_COMMANDS_H_ +#define PULSAR_NATIVE_COMMANDS_H_ + +#ifdef _MSC_VER +#include +#define __builtin_popcount (int)__popcnt +#endif + +// Definitions for CPU commands. +// #include +// #include + +namespace cg { +struct coalesced_group { + INLINE uint thread_rank() const { + return 0u; + } + INLINE uint size() const { + return 1u; + } + INLINE uint ballot(uint val) const { + return static_cast(val > 0); + } +}; + +struct thread_block { + INLINE uint thread_rank() const { + return 0u; + } + INLINE uint size() const { + return 1u; + } + INLINE void sync() const {} +}; + +INLINE coalesced_group coalesced_threads() { + coalesced_group ret; + return ret; +} + +INLINE thread_block this_thread_block() { + thread_block ret; + return ret; +} +} // namespace cg +#define SHFL_SYNC(a, b, c) (b) +template +T WARP_CUMSUM( + const cg::coalesced_group& group, + const uint& mask, + const T& base) { + return base; +} + +template +DEVICE T +WARP_MAX(const cg::coalesced_group& group, const uint& mask, const T& base) { + return base; +} + +template +DEVICE T +WARP_SUM(const cg::coalesced_group& group, const uint& mask, const T& base) { + return base; +} + +INLINE DEVICE float3 WARP_SUM_FLOAT3( + const cg::coalesced_group& group, + const uint& mask, + const float3& base) { + return base; +} + +#define ACTIVEMASK() (1u << 31) +#define ALIGN(VAL) +#define SYNC() +#define THREADFENCE_B() +#define BALLOT(mask, val) (val != 0) +#define SHARED +// Floating point. +#define FMAX(a, b) std::fmax((a), (b)) +#define FMIN(a, b) std::fmin((a), (b)) +INLINE float atomicMax(float* address, float val) { + *address = std::max(*address, val); + return *address; +} +INLINE float atomicMin(float* address, float val) { + *address = std::min(*address, val); + return *address; +} +#define FMUL(a, b) ((a) * (b)) +#define FDIV(a, b) ((a) / (b)) +#define FSUB(a, b) ((a) - (b)) +#define FABSLEQAS(a, b, c) \ + ((a) <= (b) ? FSUB((b), (a)) <= (c) : FSUB((a), (b)) < (c)) +#define FADD(a, b) ((a) + (b)) +#define FSQRT(a) sqrtf(a) +#define FEXP(a) fasterexp(a) +#define FLN(a) fasterlog(a) +#define FPOW(a, b) powf((a), (b)) +#define FROUND(x) roundf(x) +#define FCEIL(a) ceilf(a) +#define FFLOOR(a) floorf(a) +#define FSATURATE(x) std::max(0.f, std::min(1.f, x)) +#define FABS(a) abs(a) +#define FMA(x, y, z) ((x) * (y) + (z)) +#define I2F(a) static_cast(a) +#define FRCP(x) (1.f / (x)) +#define IASF(x, loc) memcpy(&(loc), &(x), sizeof(x)) +#define FASI(x, loc) memcpy(&(loc), &(x), sizeof(x)) +#define DMAX(a, b) std::max((a), (b)) +#define DMIN(a, b) std::min((a), (b)) +#define DSATURATE(a) DMIN(1., DMAX(0., (a))) +#define DSQRT(a) sqrt(a) +// +// +// +// +// +// +// +// +// +// +// +// +// uint. +#define CLZ(VAL) _clz(VAL) +template +INLINE T ATOMICADD(T* address, T val) { + T old = *address; + *address += val; + return old; +} +template +INLINE void ATOMICADD_F3(T* address, T val) { + ATOMICADD(&(address->x), val.x); + ATOMICADD(&(address->y), val.y); + ATOMICADD(&(address->z), val.z); +} +#define ATOMICADD_B(a, b) ATOMICADD((a), (b)) +#define POPC(a) __builtin_popcount(a) + +// int. +#define IMIN(a, b) std::min((a), (b)) +#define IMAX(a, b) std::max((a), (b)) +#define IABS(a) abs(a) + +// Checks. +// like TORCH_CHECK_ARG in PyTorch > 1.10 +#define ARGCHECK(cond, argN, ...) \ + TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__) + +// Math. +#define NORM3DF(x, y, z) sqrtf(x* x + y * y + z * z) +#define RNORM3DF(x, y, z) (1.f / sqrtf(x * x + y * y + z * z)) + +// High level. +#define PREFETCH(PTR) +#define GET_SORT_WS_SIZE(RES_PTR, KEY_TYPE, VAL_TYPE, NUM_OBJECTS) \ + *(RES_PTR) = 0; +#define GET_REDUCE_WS_SIZE(RES_PTR, TYPE, REDUCE_OP, NUM_OBJECTS) \ + *(RES_PTR) = 0; +#define GET_SELECT_WS_SIZE( \ + RES_PTR, TYPE_SELECTOR, TYPE_SELECTION, NUM_OBJECTS) \ + *(RES_PTR) = 0; +#define GET_SUM_WS_SIZE(RES_PTR, TYPE_SUM, NUM_OBJECTS) *(RES_PTR) = 0; +#define GET_MM_WS_SIZE(RES_PTR, TYPE, NUM_OBJECTS) *(RES_PTR) = 0; + +#define SORT_DESCENDING( \ + TMPN1, SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS) \ + std::vector TMPN1(NUM_OBJECTS); \ + std::iota(TMPN1.begin(), TMPN1.end(), 0); \ + const auto TMPN1##_val_ptr = (SORT_PTR); \ + std::sort( \ + TMPN1.begin(), TMPN1.end(), [&TMPN1##_val_ptr](size_t i1, size_t i2) { \ + return TMPN1##_val_ptr[i1] > TMPN1##_val_ptr[i2]; \ + }); \ + for (int i = 0; i < (NUM_OBJECTS); ++i) { \ + (SORTED_PTR)[i] = (SORT_PTR)[TMPN1[i]]; \ + } \ + for (int i = 0; i < (NUM_OBJECTS); ++i) { \ + (VAL_SORTED_PTR)[i] = (VAL_PTR)[TMPN1[i]]; \ + } + +#define SORT_ASCENDING( \ + SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS, STREAM) \ + { \ + std::vector TMPN1(NUM_OBJECTS); \ + std::iota(TMPN1.begin(), TMPN1.end(), 0); \ + const auto TMPN1_val_ptr = (SORT_PTR); \ + std::sort( \ + TMPN1.begin(), \ + TMPN1.end(), \ + [&TMPN1_val_ptr](size_t i1, size_t i2) -> bool { \ + return TMPN1_val_ptr[i1] < TMPN1_val_ptr[i2]; \ + }); \ + for (int i = 0; i < (NUM_OBJECTS); ++i) { \ + (SORTED_PTR)[i] = (SORT_PTR)[TMPN1[i]]; \ + } \ + for (int i = 0; i < (NUM_OBJECTS); ++i) { \ + (VAL_SORTED_PTR)[i] = (VAL_PTR)[TMPN1[i]]; \ + } \ + } + +#define SORT_DESCENDING_WS( \ + TMPN1, \ + SORT_PTR, \ + SORTED_PTR, \ + VAL_PTR, \ + VAL_SORTED_PTR, \ + NUM_OBJECTS, \ + WORSPACE_PTR, \ + WORKSPACE_SIZE) \ + SORT_DESCENDING( \ + TMPN1, SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS) + +#define SORT_ASCENDING_WS( \ + SORT_PTR, \ + SORTED_PTR, \ + VAL_PTR, \ + VAL_SORTED_PTR, \ + NUM_OBJECTS, \ + WORSPACE_PTR, \ + WORKSPACE_SIZE, \ + STREAM) \ + SORT_ASCENDING( \ + SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS, STREAM) + +#define REDUCE(REDUCE_PTR, RESULT_PTR, NUM_ITEMS, REDUCE_OP, REDUCE_INIT) \ + { \ + *(RESULT_PTR) = (REDUCE_INIT); \ + for (int i = 0; i < (NUM_ITEMS); ++i) { \ + *(RESULT_PTR) = REDUCE_OP(*(RESULT_PTR), (REDUCE_PTR)[i]); \ + } \ + } +#define REDUCE_WS( \ + REDUCE_PTR, \ + RESULT_PTR, \ + NUM_ITEMS, \ + REDUCE_OP, \ + REDUCE_INIT, \ + WORKSPACE_PTR, \ + WORKSPACE_SIZE, \ + STREAM) \ + REDUCE(REDUCE_PTR, RESULT_PTR, NUM_ITEMS, REDUCE_OP, REDUCE_INIT) + +#define SELECT_FLAGS_WS( \ + FLAGS_PTR, \ + ITEM_PTR, \ + OUT_PTR, \ + NUM_SELECTED_PTR, \ + NUM_ITEMS, \ + WORKSPACE_PTR, \ + WORSPACE_BYTES, \ + STREAM) \ + { \ + *NUM_SELECTED_PTR = 0; \ + ptrdiff_t write_pos = 0; \ + for (int i = 0; i < NUM_ITEMS; ++i) { \ + if (FLAGS_PTR[i]) { \ + OUT_PTR[write_pos++] = ITEM_PTR[i]; \ + *NUM_SELECTED_PTR += 1; \ + } \ + } \ + } + +template +void SUM_WS( + T* SUM_PTR, + T* OUT_PTR, + size_t NUM_OBJECTS, + char* WORKSPACE_PTR, + size_t WORKSPACE_BYTES, + cudaStream_t STREAM) { + *(OUT_PTR) = T(); + for (int i = 0; i < (NUM_OBJECTS); ++i) { + *(OUT_PTR) = *(OUT_PTR) + (SUM_PTR)[i]; + } +} + +template +void MIN_WS( + T* MIN_PTR, + T* OUT_PTR, + size_t NUM_OBJECTS, + char* WORKSPACE_PTR, + size_t WORKSPACE_BYTES, + cudaStream_t STREAM) { + *(OUT_PTR) = T(); + for (int i = 0; i < (NUM_OBJECTS); ++i) { + *(OUT_PTR) = std::min(*(OUT_PTR), (MIN_PTR)[i]); + } +} + +template +void MAX_WS( + T* MAX_PTR, + T* OUT_PTR, + size_t NUM_OBJECTS, + char* WORKSPACE_PTR, + size_t WORKSPACE_BYTES, + cudaStream_t STREAM) { + *(OUT_PTR) = T(); + for (int i = 0; i < (NUM_OBJECTS); ++i) { + *(OUT_PTR) = std::max(*(OUT_PTR), (MAX_PTR)[i]); + } +} +// +// +// +// +#define COPY_HOST_DEV(PTR_D, PTR_H, TYPE, SIZE) \ + std::memcpy((PTR_D), (PTR_H), sizeof(TYPE) * (SIZE)) +// +#define COPY_DEV_HOST(PTR_H, PTR_D, TYPE, SIZE) \ + std::memcpy((PTR_H), (PTR_D), sizeof(TYPE) * (SIZE)) +// +#define COPY_DEV_DEV(PTR_T, PTR_S, TYPE, SIZE) \ + std::memcpy((PTR_T), (PTR_S), sizeof(TYPE) * SIZE) +// + +#define MALLOC(VAR, TYPE, SIZE) MALLOC_HOST(VAR, TYPE, SIZE) +#define FREE(PTR) FREE_HOST(PTR) +#define MEMSET(VAR, VAL, TYPE, SIZE, STREAM) \ + memset((VAR), (VAL), sizeof(TYPE) * (SIZE)) +// + +#define LAUNCH_MAX_PARALLEL_1D(FUNC, N, STREAM, ...) FUNC(__VA_ARGS__); +#define LAUNCH_PARALLEL_1D(FUNC, N, TN, STREAM, ...) FUNC(__VA_ARGS__); +#define LAUNCH_MAX_PARALLEL_2D(FUNC, NX, NY, STREAM, ...) FUNC(__VA_ARGS__); +#define LAUNCH_PARALLEL_2D(FUNC, NX, NY, TX, TY, STREAM, ...) FUNC(__VA_ARGS__); +// +// +// +// +// +#define GET_PARALLEL_IDX_1D(VARNAME, N) \ + for (uint VARNAME = 0; VARNAME < (N); ++VARNAME) { +#define GET_PARALLEL_IDS_2D(VAR_X, VAR_Y, WIDTH, HEIGHT) \ + int2 blockDim; \ + blockDim.x = 1; \ + blockDim.y = 1; \ + uint __parallel_2d_width = WIDTH; \ + uint __parallel_2d_height = HEIGHT; \ + for (uint VAR_Y = 0; VAR_Y < __parallel_2d_height; ++(VAR_Y)) { \ + for (uint VAR_X = 0; VAR_X < __parallel_2d_width; ++(VAR_X)) { +// +// +// +#define END_PARALLEL() \ + end_parallel :; \ + } +#define END_PARALLEL_NORET() } +#define END_PARALLEL_2D() \ + end_parallel :; \ + } \ + } +#define END_PARALLEL_2D_NORET() \ + } \ + } +#define RETURN_PARALLEL() goto end_parallel; +#define CHECKLAUNCH() +#define ISONDEVICE false +#define SYNCDEVICE() +#define START_TIME(TN) \ + auto __time_start_##TN = std::chrono::steady_clock::now(); +#define STOP_TIME(TN) auto __time_stop_##TN = std::chrono::steady_clock::now(); +#define GET_TIME(TN, TOPTR) \ + *TOPTR = std::chrono::duration_cast( \ + __time_stop_##TN - __time_start_##TN) \ + .count() +#define START_TIME_CU(TN) \ + cudaEvent_t __time_start_##TN, __time_stop_##TN; \ + cudaEventCreate(&__time_start_##TN); \ + cudaEventCreate(&__time_stop_##TN); \ + cudaEventRecord(__time_start_##TN); +#define STOP_TIME_CU(TN) cudaEventRecord(__time_stop_##TN); +#define GET_TIME_CU(TN, TOPTR) \ + cudaEventSynchronize(__time_stop_##TN); \ + cudaEventElapsedTime((TOPTR), __time_start_##TN, __time_stop_##TN); + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e0da7b7020c0a3f5ae0647030282adf0e0103d39 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.backward.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward_dbg.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward_dbg.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a95bb421d2d9b6bfec1a9286e035b042b0d9842c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward_dbg.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.backward_dbg.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_gradients.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_gradients.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ff38b08e0dfe46e65a94039c8dec7da721d0421a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_gradients.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.calc_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_signature.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_signature.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..81c72192eaa877038d9383cfdd0adf9a91e06f97 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_signature.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.calc_signature.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.construct.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.construct.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..67583511aec2a6bd4dd8670aeb809939a3d2e19c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.construct.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.construct.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.create_selector.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.create_selector.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..52e265bcb2ab8ca9e4d08d90d1dc4fef75294520 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.create_selector.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.create_selector.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.destruct.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.destruct.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e61be93fa4c4893e6c4800f71cf49ef81c717ff0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.destruct.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.destruct.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.fill_bg.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.fill_bg.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c7b1a48b675b1dbe69992c81a8cbb8c8861911e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.fill_bg.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.fill_bg.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.forward.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.forward.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bfb42debeeaa7444daec94a88830c39825239170 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.forward.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.forward.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_cam_gradients.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_cam_gradients.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..93d666324a4973f44ad4becbeecaf34e0c7b96e5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_cam_gradients.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.norm_cam_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_sphere_gradients.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_sphere_gradients.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..65339caea11645e4b7ba99a0af77c21b4ae2f738 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_sphere_gradients.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.norm_sphere_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.render.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.render.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eb46adbafbc1c2a60dfb21fa9ce222828e53e31b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.render.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.render.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/camera.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/camera.device.h new file mode 100644 index 0000000000000000000000000000000000000000..f003db31ba09e177f0119083fc00cb27fb019c0d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/camera.device.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_CAMERA_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_CAMERA_DEVICE_H_ + +#include "../global.h" +#include "./camera.h" +#include "./commands.h" + +namespace pulsar { +IHD CamGradInfo::CamGradInfo() { + cam_pos = make_float3(0.f, 0.f, 0.f); + pixel_0_0_center = make_float3(0.f, 0.f, 0.f); + pixel_dir_x = make_float3(0.f, 0.f, 0.f); + pixel_dir_y = make_float3(0.f, 0.f, 0.f); +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/fastermath.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/fastermath.h new file mode 100644 index 0000000000000000000000000000000000000000..cae598f9c0a7f903b502702dcb62173c8841a3b8 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/fastermath.h @@ -0,0 +1,88 @@ +#ifndef PULSAR_NATIVE_INCLUDE_FASTERMATH_H_ +#define PULSAR_NATIVE_INCLUDE_FASTERMATH_H_ + +// @lint-ignore-every LICENSELINT +/*=====================================================================* + * Copyright (C) 2011 Paul Mineiro * + * All rights reserved. * + * * + * Redistribution and use in source and binary forms, with * + * or without modification, are permitted provided that the * + * following conditions are met: * + * * + * * Redistributions of source code must retain the * + * above copyright notice, this list of conditions and * + * the following disclaimer. * + * * + * * Redistributions in binary form must reproduce the * + * above copyright notice, this list of conditions and * + * the following disclaimer in the documentation and/or * + * other materials provided with the distribution. * + * * + * * Neither the name of Paul Mineiro nor the names * + * of other contributors may be used to endorse or promote * + * products derived from this software without specific * + * prior written permission. * + * * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER * + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * + * POSSIBILITY OF SUCH DAMAGE. * + * * + * Contact: Paul Mineiro * + *=====================================================================*/ + +#include +#include "./commands.h" + +#ifdef __cplusplus +#define cast_uint32_t static_cast +#else +#define cast_uint32_t (uint32_t) +#endif + +IHD float fasterlog2(float x) { + union { + float f; + uint32_t i; + } vx = {x}; + float y = vx.i; + y *= 1.1920928955078125e-7f; + return y - 126.94269504f; +} + +IHD float fasterlog(float x) { + // return 0.69314718f * fasterlog2 (x); + union { + float f; + uint32_t i; + } vx = {x}; + float y = vx.i; + y *= 8.2629582881927490e-8f; + return y - 87.989971088f; +} + +IHD float fasterpow2(float p) { + float clipp = (p < -126) ? -126.0f : p; + union { + uint32_t i; + float f; + } v = {cast_uint32_t((1 << 23) * (clipp + 126.94269504f))}; + return v.f; +} + +IHD float fasterexp(float p) { + return fasterpow2(1.442695040f * p); +} + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/math.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/math.h new file mode 100644 index 0000000000000000000000000000000000000000..d77e2ee1aabb8607c706a7faaee052eb6531b557 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/math.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_IMPL_MATH_H_ +#define PULSAR_NATIVE_IMPL_MATH_H_ + +#include "./camera.h" +#include "./commands.h" +#include "./fastermath.h" + +/** + * Get the direction of val. + * + * Returns +1 if val is positive, -1 if val is zero or negative. + */ +IHD int sign_dir(const int& val) { + return -(static_cast((val <= 0)) << 1) + 1; +}; + +/** + * Get the direction of val. + * + * Returns +1 if val is positive, -1 if val is zero or negative. + */ +IHD float sign_dir(const float& val) { + return static_cast(1 - (static_cast((val <= 0)) << 1)); +}; + +/** + * Integer ceil division. + */ +IHD uint iDivCeil(uint a, uint b) { + return (a % b != 0) ? (a / b + 1) : (a / b); +} + +IHD float3 outer_product_sum(const float3& a) { + return make_float3( + a.x * a.x + a.x * a.y + a.x * a.z, + a.x * a.y + a.y * a.y + a.y * a.z, + a.x * a.z + a.y * a.z + a.z * a.z); +} + +// TODO: put intrinsics here. +IHD float3 operator+(const float3& a, const float3& b) { + return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); +} + +IHD void operator+=(float3& a, const float3& b) { + a.x += b.x; + a.y += b.y; + a.z += b.z; +} + +IHD void operator-=(float3& a, const float3& b) { + a.x -= b.x; + a.y -= b.y; + a.z -= b.z; +} + +IHD void operator/=(float3& a, const float& b) { + a.x /= b; + a.y /= b; + a.z /= b; +} + +IHD void operator*=(float3& a, const float& b) { + a.x *= b; + a.y *= b; + a.z *= b; +} + +IHD float3 operator/(const float3& a, const float& b) { + return make_float3(a.x / b, a.y / b, a.z / b); +} + +IHD float3 operator-(const float3& a, const float3& b) { + return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); +} + +IHD float3 operator*(const float3& a, const float& b) { + return make_float3(a.x * b, a.y * b, a.z * b); +} + +IHD float3 operator*(const float3& a, const float3& b) { + return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); +} + +IHD float3 operator*(const float& a, const float3& b) { + return b * a; +} + +INLINE DEVICE float length(const float3& v) { + // TODO: benchmark what's faster. + return NORM3DF(v.x, v.y, v.z); + // return __fsqrt_rn(v.x * v.x + v.y * v.y + v.z * v.z); +} + +/** + * Left-hand multiplication of the constructed rotation matrix with the vector. + */ +IHD float3 rotate( + const float3& v, + const float3& dir_x, + const float3& dir_y, + const float3& dir_z) { + return make_float3( + dir_x.x * v.x + dir_x.y * v.y + dir_x.z * v.z, + dir_y.x * v.x + dir_y.y * v.y + dir_y.z * v.z, + dir_z.x * v.x + dir_z.y * v.y + dir_z.z * v.z); +} + +INLINE DEVICE float3 normalize(const float3& v) { + return v * RNORM3DF(v.x, v.y, v.z); +} + +INLINE DEVICE float dot(const float3& a, const float3& b) { + return FADD(FADD(FMUL(a.x, b.x), FMUL(a.y, b.y)), FMUL(a.z, b.z)); +} + +INLINE DEVICE float3 cross(const float3& a, const float3& b) { + // TODO: faster + return make_float3( + a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); +} + +namespace pulsar { +IHD CamGradInfo operator+(const CamGradInfo& a, const CamGradInfo& b) { + CamGradInfo res; + res.cam_pos = a.cam_pos + b.cam_pos; + res.pixel_0_0_center = a.pixel_0_0_center + b.pixel_0_0_center; + res.pixel_dir_x = a.pixel_dir_x + b.pixel_dir_x; + res.pixel_dir_y = a.pixel_dir_y + b.pixel_dir_y; + return res; +} + +IHD CamGradInfo operator*(const CamGradInfo& a, const float& b) { + CamGradInfo res; + res.cam_pos = a.cam_pos * b; + res.pixel_0_0_center = a.pixel_0_0_center * b; + res.pixel_dir_x = a.pixel_dir_x * b; + res.pixel_dir_y = a.pixel_dir_y * b; + return res; +} + +IHD IntWrapper operator+(const IntWrapper& a, const IntWrapper& b) { + IntWrapper res; + res.val = a.val + b.val; + return res; +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward.device.h new file mode 100644 index 0000000000000000000000000000000000000000..dcd9dd50fd1e83229073fc8e86b815d4da4c99f9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward.device.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_RENDERER_BACKWARD_DEVICE_H_ +#define PULSAR_NATIVE_RENDERER_BACKWARD_DEVICE_H_ + +#include "./camera.device.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +void backward( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy_d, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + cudaStream_t stream) { + ARGCHECK(gamma > 0.f && gamma <= 1.f, 6, "gamma must be in [0., 1.]"); + ARGCHECK( + percent_allowed_difference >= 0.f && percent_allowed_difference <= 1.f, + 7, + "percent_allowed_difference must be in [0., 1.]"); + ARGCHECK(max_n_hits >= 1u, 8, "max_n_hits must be >= 1"); + ARGCHECK( + num_balls > 0 && num_balls <= self->max_num_balls, + 9, + "num_balls must be >0 and less than max num balls!"); + ARGCHECK( + cam.film_width == self->cam.film_width && + cam.film_height == self->cam.film_height, + 5, + "cam film size must agree"); + ARGCHECK(mode <= 1, 10, "mode must be <= 1!"); + if (percent_allowed_difference < EPS) { + LOG(WARNING) << "percent_allowed_difference < " << FEPS << "! Clamping to " + << FEPS << "."; + percent_allowed_difference = FEPS; + } + if (percent_allowed_difference > 1.f - FEPS) { + LOG(WARNING) << "percent_allowed_difference > " << (1.f - FEPS) + << "! Clamping to " << (1.f - FEPS) << "."; + percent_allowed_difference = 1.f - FEPS; + } + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Rendering backward pass..."; + // Update camera. + self->cam.eye = cam.eye; + self->cam.pixel_0_0_center = cam.pixel_0_0_center - cam.eye; + self->cam.pixel_dir_x = cam.pixel_dir_x; + self->cam.pixel_dir_y = cam.pixel_dir_y; + self->cam.sensor_dir_z = cam.sensor_dir_z; + self->cam.half_pixel_size = cam.half_pixel_size; + self->cam.focal_length = cam.focal_length; + self->cam.aperture_width = cam.aperture_width; + self->cam.aperture_height = cam.aperture_height; + self->cam.min_dist = cam.min_dist; + self->cam.max_dist = cam.max_dist; + self->cam.norm_fac = cam.norm_fac; + self->cam.principal_point_offset_x = cam.principal_point_offset_x; + self->cam.principal_point_offset_y = cam.principal_point_offset_y; + self->cam.film_border_left = cam.film_border_left; + self->cam.film_border_top = cam.film_border_top; +#ifdef PULSAR_TIMINGS_ENABLED + START_TIME(calc_signature); +#endif + LAUNCH_MAX_PARALLEL_1D( + calc_signature, + num_balls, + stream, + *self, + reinterpret_cast(vert_pos), + vert_col, + vert_rad, + num_balls); + CHECKLAUNCH(); +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(calc_signature); + START_TIME(calc_gradients); +#endif + MEMSET(self->grad_pos_d, 0, float3, num_balls, stream); + MEMSET(self->grad_col_d, 0, float, num_balls * self->cam.n_channels, stream); + MEMSET(self->grad_rad_d, 0, float, num_balls, stream); + MEMSET(self->grad_cam_d, 0, float, 12, stream); + MEMSET(self->grad_cam_buf_d, 0, CamGradInfo, num_balls, stream); + MEMSET(self->grad_opy_d, 0, float, num_balls, stream); + MEMSET(self->ids_sorted_d, 0, int, num_balls, stream); + LAUNCH_PARALLEL_2D( + calc_gradients, + self->cam.film_width, + self->cam.film_height, + GRAD_BLOCK_SIZE, + GRAD_BLOCK_SIZE, + stream, + self->cam, + grad_im, + gamma, + reinterpret_cast(vert_pos), + vert_col, + vert_rad, + vert_opy_d, + num_balls, + image, + forw_info, + self->di_d, + self->ii_d, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + self->grad_rad_d, + self->grad_col_d, + self->grad_pos_d, + self->grad_cam_buf_d, + self->grad_opy_d, + self->ids_sorted_d, + self->n_track); + CHECKLAUNCH(); +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(calc_gradients); + START_TIME(normalize); +#endif + LAUNCH_MAX_PARALLEL_1D( + norm_sphere_gradients, num_balls, stream, *self, num_balls); + CHECKLAUNCH(); + if (dif_cam) { + SUM_WS( + self->grad_cam_buf_d, + reinterpret_cast(self->grad_cam_d), + static_cast(num_balls), + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + SUM_WS( + (IntWrapper*)(self->ids_sorted_d), + (IntWrapper*)(self->n_grad_contributions_d), + static_cast(num_balls), + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + LAUNCH_MAX_PARALLEL_1D( + norm_cam_gradients, static_cast(1), stream, *self); + CHECKLAUNCH(); + } +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(normalize); + float time_ms; + // This blocks the result and prevents batch-processing from parallelizing. + GET_TIME(calc_signature, &time_ms); + std::cout << "Time for signature calculation: " << time_ms << " ms" + << std::endl; + GET_TIME(calc_gradients, &time_ms); + std::cout << "Time for gradient calculation: " << time_ms << " ms" + << std::endl; + GET_TIME(normalize, &time_ms); + std::cout << "Time for aggregation and normalization: " << time_ms << " ms" + << std::endl; +#endif + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Backward pass complete."; +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.device.h new file mode 100644 index 0000000000000000000000000000000000000000..f2bdc7e69027d29a8442a14b08d677cc22dc51c9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.device.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_RENDERER_BACKWARD_DBG_DEVICE_H_ +#define PULSAR_NATIVE_RENDERER_BACKWARD_DBG_DEVICE_H_ + +#include "./camera.device.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +void backward_dbg( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy_d, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + const uint& pos_x, + const uint& pos_y, + cudaStream_t stream) { + ARGCHECK(gamma > 0.f && gamma <= 1.f, 6, "gamma must be in [0., 1.]"); + ARGCHECK( + percent_allowed_difference >= 0.f && percent_allowed_difference <= 1.f, + 7, + "percent_allowed_difference must be in [0., 1.]"); + ARGCHECK(max_n_hits >= 1u, 8, "max_n_hits must be >= 1"); + ARGCHECK( + num_balls > 0 && num_balls <= self->max_num_balls, + 9, + "num_balls must be >0 and less than max num balls!"); + ARGCHECK( + cam.film_width == self->cam.film_width && + cam.film_height == self->cam.film_height, + 5, + "cam film size must agree"); + ARGCHECK(mode <= 1, 10, "mode must be <= 1!"); + if (percent_allowed_difference < EPS) { + LOG(WARNING) << "percent_allowed_difference < " << FEPS << "! Clamping to " + << FEPS << "."; + percent_allowed_difference = FEPS; + } + ARGCHECK( + pos_x < cam.film_width && pos_y < cam.film_height, + 15, + "pos_x must be < width and pos_y < height."); + if (percent_allowed_difference > 1.f - FEPS) { + LOG(WARNING) << "percent_allowed_difference > " << (1.f - FEPS) + << "! Clamping to " << (1.f - FEPS) << "."; + percent_allowed_difference = 1.f - FEPS; + } + LOG_IF(INFO, PULSAR_LOG_RENDER) + << "Rendering debug backward pass for x: " << pos_x << ", y: " << pos_y; + // Update camera. + self->cam.eye = cam.eye; + self->cam.pixel_0_0_center = cam.pixel_0_0_center - cam.eye; + self->cam.pixel_dir_x = cam.pixel_dir_x; + self->cam.pixel_dir_y = cam.pixel_dir_y; + self->cam.sensor_dir_z = cam.sensor_dir_z; + self->cam.half_pixel_size = cam.half_pixel_size; + self->cam.focal_length = cam.focal_length; + self->cam.aperture_width = cam.aperture_width; + self->cam.aperture_height = cam.aperture_height; + self->cam.min_dist = cam.min_dist; + self->cam.max_dist = cam.max_dist; + self->cam.norm_fac = cam.norm_fac; + self->cam.principal_point_offset_x = cam.principal_point_offset_x; + self->cam.principal_point_offset_y = cam.principal_point_offset_y; + self->cam.film_border_left = cam.film_border_left; + self->cam.film_border_top = cam.film_border_top; + LAUNCH_MAX_PARALLEL_1D( + calc_signature, + num_balls, + stream, + *self, + reinterpret_cast(vert_pos), + vert_col, + vert_rad, + num_balls); + CHECKLAUNCH(); + MEMSET(self->grad_pos_d, 0, float3, num_balls, stream); + MEMSET(self->grad_col_d, 0, float, num_balls * self->cam.n_channels, stream); + MEMSET(self->grad_rad_d, 0, float, num_balls, stream); + MEMSET(self->grad_cam_d, 0, float, 12, stream); + MEMSET(self->grad_cam_buf_d, 0, CamGradInfo, num_balls, stream); + MEMSET(self->grad_opy_d, 0, float, num_balls, stream); + MEMSET(self->ids_sorted_d, 0, int, num_balls, stream); + LAUNCH_MAX_PARALLEL_2D( + calc_gradients, + (int64_t)1, + (int64_t)1, + stream, + self->cam, + grad_im, + gamma, + reinterpret_cast(vert_pos), + vert_col, + vert_rad, + vert_opy_d, + num_balls, + image, + forw_info, + self->di_d, + self->ii_d, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + self->grad_rad_d, + self->grad_col_d, + self->grad_pos_d, + self->grad_cam_buf_d, + self->grad_opy_d, + self->ids_sorted_d, + self->n_track, + pos_x, + pos_y); + CHECKLAUNCH(); + // We're not doing sphere gradient normalization here. + SUM_WS( + self->grad_cam_buf_d, + reinterpret_cast(self->grad_cam_d), + static_cast(1), + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + // We're not doing camera gradient normalization here. + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Debug backward pass complete."; +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..5a7a1ba1f8e56df0a5ff212e7eb769a0564e7f60 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.instantiate.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.backward_dbg.device.h" + +namespace pulsar { +namespace Renderer { + +template void backward_dbg( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + const uint& pos_x, + const uint& pos_y, + cudaStream_t stream); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.device.h new file mode 100644 index 0000000000000000000000000000000000000000..90b3872e9606c8830b039f18c4d465c3f8c23c1f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.device.h @@ -0,0 +1,198 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CALC_GRADIENTS_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CALC_GRADIENTS_H_ + +#include "../global.h" +#include "./commands.h" +#include "./renderer.h" + +#include "./renderer.draw.device.h" + +namespace pulsar { +namespace Renderer { + +template +GLOBAL void calc_gradients( + const CamInfo cam, /** Camera in world coordinates. */ + float const* const RESTRICT grad_im, /** The gradient image. */ + const float + gamma, /** The transparency parameter used in the forward pass. */ + float3 const* const RESTRICT vert_poss, /** Vertex position vector. */ + float const* const RESTRICT vert_cols, /** Vertex color vector. */ + float const* const RESTRICT vert_rads, /** Vertex radius vector. */ + float const* const RESTRICT opacity, /** Vertex opacity. */ + const uint num_balls, /** Number of balls. */ + float const* const RESTRICT result_d, /** Result image. */ + float const* const RESTRICT forw_info_d, /** Forward pass info. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + // Mode switches. + const bool calc_grad_pos, + const bool calc_grad_col, + const bool calc_grad_rad, + const bool calc_grad_cam, + const bool calc_grad_opy, + // Out variables. + float* const RESTRICT grad_rad_d, /** Radius gradients. */ + float* const RESTRICT grad_col_d, /** Color gradients. */ + float3* const RESTRICT grad_pos_d, /** Position gradients. */ + CamGradInfo* const RESTRICT grad_cam_buf_d, /** Camera gradient buffer. */ + float* const RESTRICT grad_opy_d, /** Opacity gradient buffer. */ + int* const RESTRICT + grad_contributed_d, /** Gradient contribution counter. */ + // Infrastructure. + const int n_track, + const uint offs_x, + const uint offs_y /** Debug offsets. */ +) { + uint limit_x = cam.film_width, limit_y = cam.film_height; + if (offs_x != 0) { + // We're in debug mode. + limit_x = 1; + limit_y = 1; + } + GET_PARALLEL_IDS_2D(coord_x_base, coord_y_base, limit_x, limit_y); + // coord_x_base and coord_y_base are in the film coordinate system. + // We now need to translate to the aperture coordinate system. If + // the principal point was shifted left/up nothing has to be + // subtracted - only shift needs to be added in case it has been + // shifted down/right. + const uint film_coord_x = coord_x_base + offs_x; + const uint ap_coord_x = film_coord_x + + 2 * static_cast(std::max(0, cam.principal_point_offset_x)); + const uint film_coord_y = coord_y_base + offs_y; + const uint ap_coord_y = film_coord_y + + 2 * static_cast(std::max(0, cam.principal_point_offset_y)); + const float3 ray_dir = /** Ray cast through the pixel, normalized. */ + cam.pixel_0_0_center + ap_coord_x * cam.pixel_dir_x + + ap_coord_y * cam.pixel_dir_y; + const float norm_ray_dir = length(ray_dir); + // ray_dir_norm *must* be calculated here in the same way as in the draw + // function to have the same values withno other numerical instabilities + // (for example, ray_dir * FRCP(norm_ray_dir) does not work)! + float3 ray_dir_norm; /** Ray cast through the pixel, normalized. */ + float2 projected_ray; /** Ray intersection with the sensor. */ + if (cam.orthogonal_projection) { + ray_dir_norm = cam.sensor_dir_z; + projected_ray.x = static_cast(ap_coord_x); + projected_ray.y = static_cast(ap_coord_y); + } else { + ray_dir_norm = normalize( + cam.pixel_0_0_center + ap_coord_x * cam.pixel_dir_x + + ap_coord_y * cam.pixel_dir_y); + // This is a reasonable assumption for normal focal lengths and image sizes. + PASSERT(FABS(ray_dir_norm.z) > FEPS); + projected_ray.x = ray_dir_norm.x / ray_dir_norm.z * cam.focal_length; + projected_ray.y = ray_dir_norm.y / ray_dir_norm.z * cam.focal_length; + } + float* result = const_cast( + result_d + film_coord_y * cam.film_width * cam.n_channels + + film_coord_x * cam.n_channels); + const float* grad_im_l = grad_im + + film_coord_y * cam.film_width * cam.n_channels + + film_coord_x * cam.n_channels; + // For writing... + float3 grad_pos; + float grad_rad, grad_opy; + CamGradInfo grad_cam_local = CamGradInfo(); + // Set up shared infrastructure. + const int fwi_loc = film_coord_y * cam.film_width * (3 + 2 * n_track) + + film_coord_x * (3 + 2 * n_track); + float sm_m = forw_info_d[fwi_loc]; + float sm_d = forw_info_d[fwi_loc + 1]; + PULSAR_LOG_DEV_APIX( + PULSAR_LOG_GRAD, + "grad|sm_m: %f, sm_d: %f, result: " + "%f, %f, %f; grad_im: %f, %f, %f.\n", + sm_m, + sm_d, + result[0], + result[1], + result[2], + grad_im_l[0], + grad_im_l[1], + grad_im_l[2]); + // Start processing. + for (int grad_idx = 0; grad_idx < n_track; ++grad_idx) { + int sphere_idx; + FASI(forw_info_d[fwi_loc + 3 + 2 * grad_idx], sphere_idx); + PASSERT( + sphere_idx == -1 || + sphere_idx >= 0 && static_cast(sphere_idx) < num_balls); + if (sphere_idx >= 0) { + // TODO: make more efficient. + grad_pos = make_float3(0.f, 0.f, 0.f); + grad_rad = 0.f; + grad_cam_local = CamGradInfo(); + const DrawInfo di = di_d[sphere_idx]; + grad_opy = 0.f; + draw( + di, + opacity == NULL ? 1.f : opacity[sphere_idx], + cam, + gamma, + ray_dir_norm, + projected_ray, + // Mode switches. + false, // draw only + calc_grad_pos, + calc_grad_col, + calc_grad_rad, + calc_grad_cam, + calc_grad_opy, + // Position info. + ap_coord_x, + ap_coord_y, + sphere_idx, + // Optional in. + &ii_d[sphere_idx], + &ray_dir, + &norm_ray_dir, + grad_im_l, + NULL, + // In/out + &sm_d, + &sm_m, + result, + // Optional out. + NULL, + NULL, + &grad_pos, + grad_col_d + sphere_idx * cam.n_channels, + &grad_rad, + &grad_cam_local, + &grad_opy); + ATOMICADD(&(grad_rad_d[sphere_idx]), grad_rad); + // Color has been added directly. + ATOMICADD_F3(&(grad_pos_d[sphere_idx]), grad_pos); + ATOMICADD_F3( + &(grad_cam_buf_d[sphere_idx].cam_pos), grad_cam_local.cam_pos); + if (!cam.orthogonal_projection) { + ATOMICADD_F3( + &(grad_cam_buf_d[sphere_idx].pixel_0_0_center), + grad_cam_local.pixel_0_0_center); + } + ATOMICADD_F3( + &(grad_cam_buf_d[sphere_idx].pixel_dir_x), + grad_cam_local.pixel_dir_x); + ATOMICADD_F3( + &(grad_cam_buf_d[sphere_idx].pixel_dir_y), + grad_cam_local.pixel_dir_y); + ATOMICADD(&(grad_opy_d[sphere_idx]), grad_opy); + ATOMICADD(&(grad_contributed_d[sphere_idx]), 1); + } + } + END_PARALLEL_2D_NORET(); +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..596c322b28eef850d5466037770cef53caf51cff --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.instantiate.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.calc_gradients.device.h" + +namespace pulsar { +namespace Renderer { + +template GLOBAL void calc_gradients( + const CamInfo cam, /** Camera in world coordinates. */ + float const* const RESTRICT grad_im, /** The gradient image. */ + const float + gamma, /** The transparency parameter used in the forward pass. */ + float3 const* const RESTRICT vert_poss, /** Vertex position vector. */ + float const* const RESTRICT vert_cols, /** Vertex color vector. */ + float const* const RESTRICT vert_rads, /** Vertex radius vector. */ + float const* const RESTRICT opacity, /** Vertex opacity. */ + const uint num_balls, /** Number of balls. */ + float const* const RESTRICT result_d, /** Result image. */ + float const* const RESTRICT forw_info_d, /** Forward pass info. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + // Mode switches. + const bool calc_grad_pos, + const bool calc_grad_col, + const bool calc_grad_rad, + const bool calc_grad_cam, + const bool calc_grad_opy, + // Out variables. + float* const RESTRICT grad_rad_d, /** Radius gradients. */ + float* const RESTRICT grad_col_d, /** Color gradients. */ + float3* const RESTRICT grad_pos_d, /** Position gradients. */ + CamGradInfo* const RESTRICT grad_cam_buf_d, /** Camera gradient buffer. */ + float* const RESTRICT grad_opy_d, /** Opacity gradient buffer. */ + int* const RESTRICT + grad_contributed_d, /** Gradient contribution counter. */ + // Infrastructure. + const int n_track, + const uint offs_x, + const uint offs_y); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_signature.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_signature.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..6afa95b44b161d8881b79b22e119c89aad522cc6 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_signature.instantiate.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CALC_SIGNATURE_INSTANTIATE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CALC_SIGNATURE_INSTANTIATE_H_ + +#include "./renderer.calc_signature.device.h" + +namespace pulsar { +namespace Renderer { +template GLOBAL void calc_signature( + Renderer renderer, + float3 const* const RESTRICT vert_poss, + float const* const RESTRICT vert_cols, + float const* const RESTRICT vert_rads, + const uint num_balls); +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.construct.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.construct.device.h new file mode 100644 index 0000000000000000000000000000000000000000..984f7710ba50e4dbeea8e61a2bac7ab41e608697 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.construct.device.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CONSTRUCT_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CONSTRUCT_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +HOST void construct( + Renderer* self, + const size_t& max_num_balls, + const int& width, + const int& height, + const bool& orthogonal_projection, + const bool& right_handed_system, + const float& background_normalization_depth, + const uint& n_channels, + const uint& n_track) { + ARGCHECK( + (max_num_balls > 0 && max_num_balls < MAX_INT), + 2, + ("the maximum number of balls must be >0 and <" + + std::to_string(MAX_INT) + ". Is " + std::to_string(max_num_balls) + ".") + .c_str()); + ARGCHECK(width > 1, 3, "the image width must be > 1"); + ARGCHECK(height > 1, 4, "the image height must be > 1"); + ARGCHECK( + background_normalization_depth > 0.f && + background_normalization_depth < 1.f, + 6, + "background_normalization_depth must be in ]0., 1.[."); + ARGCHECK(n_channels > 0, 7, "n_channels must be >0!"); + ARGCHECK( + n_track > 0 && n_track <= MAX_GRAD_SPHERES, + 8, + ("n_track must be >0 and <" + std::to_string(MAX_GRAD_SPHERES) + ". Is " + + std::to_string(n_track) + ".") + .c_str()); + self->cam.film_width = width; + self->cam.film_height = height; + self->max_num_balls = max_num_balls; + MALLOC(self->result_d, float, width* height* n_channels); + self->cam.orthogonal_projection = orthogonal_projection; + self->cam.right_handed = right_handed_system; + self->cam.background_normalization_depth = background_normalization_depth; + self->cam.n_channels = n_channels; + MALLOC(self->min_depth_d, float, max_num_balls); + MALLOC(self->min_depth_sorted_d, float, max_num_balls); + MALLOC(self->ii_d, IntersectInfo, max_num_balls); + MALLOC(self->ii_sorted_d, IntersectInfo, max_num_balls); + MALLOC(self->ids_d, int, max_num_balls); + MALLOC(self->ids_sorted_d, int, max_num_balls); + size_t sort_id_size = 0; + GET_SORT_WS_SIZE(&sort_id_size, float, int, max_num_balls); + CHECKLAUNCH(); + size_t sort_ii_size = 0; + GET_SORT_WS_SIZE(&sort_ii_size, float, IntersectInfo, max_num_balls); + CHECKLAUNCH(); + size_t sort_di_size = 0; + GET_SORT_WS_SIZE(&sort_di_size, float, DrawInfo, max_num_balls); + CHECKLAUNCH(); + size_t select_ii_size = 0; + GET_SELECT_WS_SIZE(&select_ii_size, char, IntersectInfo, max_num_balls); + size_t select_di_size = 0; + GET_SELECT_WS_SIZE(&select_di_size, char, DrawInfo, max_num_balls); + size_t sum_size = 0; + GET_SUM_WS_SIZE(&sum_size, CamGradInfo, max_num_balls); + size_t sum_cont_size = 0; + GET_SUM_WS_SIZE(&sum_cont_size, int, max_num_balls); + size_t reduce_size = 0; + GET_REDUCE_WS_SIZE( + &reduce_size, IntersectInfo, IntersectInfoMinMax(), max_num_balls); + self->workspace_size = IMAX( + IMAX(IMAX(sort_id_size, sort_ii_size), sort_di_size), + IMAX( + IMAX(select_di_size, select_ii_size), + IMAX(IMAX(sum_size, sum_cont_size), reduce_size))); + MALLOC(self->workspace_d, char, self->workspace_size); + MALLOC(self->di_d, DrawInfo, max_num_balls); + MALLOC(self->di_sorted_d, DrawInfo, max_num_balls); + MALLOC(self->region_flags_d, char, max_num_balls); + MALLOC(self->num_selected_d, size_t, 1); + MALLOC(self->forw_info_d, float, width* height * (3 + 2 * n_track)); + MALLOC(self->min_max_pixels_d, IntersectInfo, 1); + MALLOC(self->grad_pos_d, float3, max_num_balls); + MALLOC(self->grad_col_d, float, max_num_balls* n_channels); + MALLOC(self->grad_rad_d, float, max_num_balls); + MALLOC(self->grad_cam_d, float, 12); + MALLOC(self->grad_cam_buf_d, CamGradInfo, max_num_balls); + MALLOC(self->grad_opy_d, float, max_num_balls); + MALLOC(self->n_grad_contributions_d, int, 1); + self->n_track = static_cast(n_track); +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.create_selector.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.create_selector.device.h new file mode 100644 index 0000000000000000000000000000000000000000..747ad03cd3a3a49c34d81485a1780d81a332a215 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.create_selector.device.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CREATE_SELECTOR_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CREATE_SELECTOR_DEVICE_H_ + +#include "../global.h" +#include "./commands.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +GLOBAL void create_selector( + IntersectInfo const* const RESTRICT ii_sorted_d, + const uint num_balls, + const int min_x, + const int max_x, + const int min_y, + const int max_y, + /* Out variables. */ + char* RESTRICT region_flags_d) { + GET_PARALLEL_IDX_1D(idx, num_balls); + bool hit = (static_cast(ii_sorted_d[idx].min.x) <= max_x) && + (static_cast(ii_sorted_d[idx].max.x) > min_x) && + (static_cast(ii_sorted_d[idx].min.y) <= max_y) && + (static_cast(ii_sorted_d[idx].max.y) > min_y); + region_flags_d[idx] = hit; + END_PARALLEL_NORET(); +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.destruct.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.destruct.device.h new file mode 100644 index 0000000000000000000000000000000000000000..8520233c59be062fa72376158a9935afa50c3950 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.destruct.device.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_DESTRUCT_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_DESTRUCT_H_ + +#include "../global.h" +#include "./commands.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +HOST void destruct(Renderer* self) { + if (self->result_d != NULL) + FREE(self->result_d); + self->result_d = NULL; + if (self->min_depth_d != NULL) + FREE(self->min_depth_d); + self->min_depth_d = NULL; + if (self->min_depth_sorted_d != NULL) + FREE(self->min_depth_sorted_d); + self->min_depth_sorted_d = NULL; + if (self->ii_d != NULL) + FREE(self->ii_d); + self->ii_d = NULL; + if (self->ii_sorted_d != NULL) + FREE(self->ii_sorted_d); + self->ii_sorted_d = NULL; + if (self->ids_d != NULL) + FREE(self->ids_d); + self->ids_d = NULL; + if (self->ids_sorted_d != NULL) + FREE(self->ids_sorted_d); + self->ids_sorted_d = NULL; + if (self->workspace_d != NULL) + FREE(self->workspace_d); + self->workspace_d = NULL; + if (self->di_d != NULL) + FREE(self->di_d); + self->di_d = NULL; + if (self->di_sorted_d != NULL) + FREE(self->di_sorted_d); + self->di_sorted_d = NULL; + if (self->region_flags_d != NULL) + FREE(self->region_flags_d); + self->region_flags_d = NULL; + if (self->num_selected_d != NULL) + FREE(self->num_selected_d); + self->num_selected_d = NULL; + if (self->forw_info_d != NULL) + FREE(self->forw_info_d); + self->forw_info_d = NULL; + if (self->min_max_pixels_d != NULL) + FREE(self->min_max_pixels_d); + self->min_max_pixels_d = NULL; + if (self->grad_pos_d != NULL) + FREE(self->grad_pos_d); + self->grad_pos_d = NULL; + if (self->grad_col_d != NULL) + FREE(self->grad_col_d); + self->grad_col_d = NULL; + if (self->grad_rad_d != NULL) + FREE(self->grad_rad_d); + self->grad_rad_d = NULL; + if (self->grad_cam_d != NULL) + FREE(self->grad_cam_d); + self->grad_cam_d = NULL; + if (self->grad_cam_buf_d != NULL) + FREE(self->grad_cam_buf_d); + self->grad_cam_buf_d = NULL; + if (self->grad_opy_d != NULL) + FREE(self->grad_opy_d); + self->grad_opy_d = NULL; + if (self->n_grad_contributions_d != NULL) + FREE(self->n_grad_contributions_d); + self->n_grad_contributions_d = NULL; +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.draw.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.draw.device.h new file mode 100644 index 0000000000000000000000000000000000000000..cb8ecabed3eefce77f7120d234fad15b0bed064c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.draw.device.h @@ -0,0 +1,846 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CALC_SIGNATURE_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CALC_SIGNATURE_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +/** + * Draw a ball into the `result`. + * + * Returns whether a hit was noticed. See README for an explanation of sphere + * points and variable notation. + */ +INLINE DEVICE bool draw( + /* In variables. */ + const DrawInfo& draw_info, /** The draw information for this ball. */ + const float& opacity, /** The sphere opacity. */ + const CamInfo& + cam, /** Camera information. Doesn't have to be normalized. */ + const float& gamma, /** 'Transparency' indicator (see paper for details). */ + const float3& ray_dir_norm, /** The direction of the ray, normalized. */ + const float2& projected_ray, /** The intersection of the ray with the image + in pixel space. */ + /** Mode switches. */ + const bool& draw_only, /** Whether we are in draw vs. grad mode. */ + const bool& calc_grad_pos, /** Calculate position gradients. */ + const bool& calc_grad_col, /** Calculate color gradients. */ + const bool& calc_grad_rad, /** Calculate radius gradients. */ + const bool& calc_grad_cam, /** Calculate camera gradients. */ + const bool& calc_grad_opy, /** Calculate opacity gradients. */ + /** Position info. */ + const uint& coord_x, /** The pixel position x to draw at. */ + const uint& coord_y, /** The pixel position y to draw at. */ + const uint& idx, /** The id of the sphere to process. */ + /* Optional in variables. */ + IntersectInfo const* const RESTRICT + intersect_info, /** The intersect information for this ball. */ + float3 const* const RESTRICT ray_dir, /** The ray direction (not normalized) + to draw at. Only used for grad computation. */ + float const* const RESTRICT norm_ray_dir, /** The length of the direction + vector. Only used for grad computation. */ + float const* const RESTRICT grad_pix, /** The gradient for this pixel. Only + used for grad computation. */ + float const* const RESTRICT + ln_pad_over_1minuspad, /** Allowed percentage indicator. */ + /* In or out variables, depending on mode. */ + float* const RESTRICT sm_d, /** Normalization denominator. */ + float* const RESTRICT + sm_m, /** Maximum of normalization weight factors observed. */ + float* const RESTRICT + result, /** Result pixel color. Must be zeros initially. */ + /* Optional out variables. */ + float* const RESTRICT depth_threshold, /** The depth threshold to use. Only + used for rendering. */ + float* const RESTRICT intersection_depth_norm_out, /** The intersection + depth. Only set when rendering. */ + float3* const RESTRICT grad_pos, /** Gradient w.r.t. position. */ + float* const RESTRICT grad_col, /** Gradient w.r.t. color. */ + float* const RESTRICT grad_rad, /** Gradient w.r.t. radius. */ + CamGradInfo* const RESTRICT grad_cam, /** Gradient w.r.t. camera. */ + float* const RESTRICT grad_opy /** Gradient w.r.t. opacity. */ +) { + // TODO: variable reuse? + PASSERT( + isfinite(draw_info.ray_center_norm.x) && + isfinite(draw_info.ray_center_norm.y) && + isfinite(draw_info.ray_center_norm.z)); + PASSERT(isfinite(draw_info.t_center) && draw_info.t_center >= 0.f); + PASSERT( + isfinite(draw_info.radius) && draw_info.radius >= 0.f && + draw_info.radius <= draw_info.t_center); + PASSERT(isfinite(ray_dir_norm.x)); + PASSERT(isfinite(ray_dir_norm.y)); + PASSERT(isfinite(ray_dir_norm.z)); + PASSERT(isfinite(*sm_d)); + PASSERT( + cam.orthogonal_projection && cam.focal_length == 0.f || + cam.focal_length > 0.f); + PASSERT(gamma <= 1.f && gamma >= 1e-5f); + /** The ball center in the camera coordinate system. */ + float3 center = draw_info.ray_center_norm * draw_info.t_center; + /** The vector from the reference point to the ball center. */ + float3 raydiff; + if (cam.orthogonal_projection) { + center = rotate( + center, + cam.pixel_dir_x / length(cam.pixel_dir_x), + cam.pixel_dir_y / length(cam.pixel_dir_y), + cam.sensor_dir_z); + raydiff = + make_float3( // TODO: make offset consistent with `get_screen_area`. + center.x - + (projected_ray.x - + static_cast(cam.aperture_width) * .5f) * + (2.f * cam.half_pixel_size), + center.y - + (projected_ray.y - + static_cast(cam.aperture_height) * .5f) * + (2.f * cam.half_pixel_size), + 0.f); + } else { + /** The reference point on the ray; the point in the same distance + * from the camera as the ball center, but along the ray. + */ + const float3 rayref = ray_dir_norm * draw_info.t_center; + raydiff = center - rayref; + } + /** The closeness of the reference point to ball center in world coords. + * + * In [0., radius]. + */ + const float closeness_world = length(raydiff); + /** The reciprocal radius. */ + const float radius_rcp = FRCP(draw_info.radius); + /** The closeness factor normalized with the ball radius. + * + * In [0., 1.]. + */ + float closeness = FSATURATE(FMA(-closeness_world, radius_rcp, 1.f)); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|center: %.9f, %.9f, %.9f. raydiff: %.9f, " + "%.9f, %.9f. closeness_world: %.9f. closeness: %.9f\n", + idx, + center.x, + center.y, + center.z, + raydiff.x, + raydiff.y, + raydiff.z, + closeness_world, + closeness); + /** Whether this is the 'center pixel' for this ball, the pixel that + * is closest to its projected center. This information is used to + * make sure to draw 'tiny' spheres with less than one pixel in + * projected size. + */ + bool ray_through_center_pixel; + float projected_radius, projected_x, projected_y; + if (cam.orthogonal_projection) { + projected_x = center.x / (2.f * cam.half_pixel_size) + + (static_cast(cam.aperture_width) - 1.f) / 2.f; + projected_y = center.y / (2.f * cam.half_pixel_size) + + (static_cast(cam.aperture_height) - 1.f) / 2.f; + projected_radius = draw_info.radius / (2.f * cam.half_pixel_size); + ray_through_center_pixel = + (FABS(FSUB(projected_x, projected_ray.x)) < 0.5f + FEPS && + FABS(FSUB(projected_y, projected_ray.y)) < 0.5f + FEPS); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|closeness_world: %.9f. closeness: %.9f. " + "projected (x, y): %.9f, %.9f. projected_ray (x, y): " + "%.9f, %.9f. ray_through_center_pixel: %d.\n", + idx, + closeness_world, + closeness, + projected_x, + projected_y, + projected_ray.x, + projected_ray.y, + ray_through_center_pixel); + } else { + // Misusing this variable for half pixel size projected to the depth + // at which the sphere resides. Leave some slack for numerical + // inaccuracy (factor 1.5). + projected_x = FMUL(cam.half_pixel_size * 1.5, draw_info.t_center) * + FRCP(cam.focal_length); + projected_radius = FMUL(draw_info.radius, cam.focal_length) * + FRCP(draw_info.t_center) / (2.f * cam.half_pixel_size); + ray_through_center_pixel = projected_x > closeness_world; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|closeness_world: %.9f. closeness: %.9f. " + "projected half pixel size: %.9f. " + "ray_through_center_pixel: %d.\n", + idx, + closeness_world, + closeness, + projected_x, + ray_through_center_pixel); + } + if (draw_only && draw_info.radius < closeness_world && + !ray_through_center_pixel) { + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|Abandoning since no hit has been detected.\n", + idx); + return false; + } else { + // This is always a hit since we are following the forward execution pass. + // p2 is the closest intersection point with the sphere. + } + if (ray_through_center_pixel && projected_radius < 1.f) { + // Make a tiny sphere visible. + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|Setting closeness to 1 (projected radius: %.9f).\n", + idx, + projected_radius); + closeness = 1.; + } + PASSERT(closeness >= 0.f && closeness <= 1.f); + /** Distance between the camera (`o`) and `p1`, the closest point to the + * ball center along the casted ray. + * + * In [t_center - radius, t_center]. + */ + float o__p1_; + /** The distance from ball center to p1. + * + * In [0., sqrt(t_center ^ 2 - (t_center - radius) ^ 2)]. + */ + float c__p1_; + if (cam.orthogonal_projection) { + o__p1_ = FABS(center.z); + c__p1_ = length(raydiff); + } else { + o__p1_ = dot(center, ray_dir_norm); + /** + * This is being calculated as sqrt(t_center^2 - o__p1_^2) = + * sqrt((t_center + o__p1_) * (t_center - o__p1_)) to avoid + * catastrophic cancellation in floating point representations. + */ + c__p1_ = FSQRT( + (draw_info.t_center + o__p1_) * FMAX(draw_info.t_center - o__p1_, 0.f)); + // PASSERT(o__p1_ >= draw_info.t_center - draw_info.radius); + // Numerical errors lead to too large values. + o__p1_ = FMIN(o__p1_, draw_info.t_center); + // PASSERT(o__p1_ <= draw_info.t_center); + } + /** The distance from the closest point to the sphere center (p1) + * to the closest intersection point (p2). + * + * In [0., radius]. + */ + const float p1__p2_ = + FSQRT((draw_info.radius + c__p1_) * FMAX(draw_info.radius - c__p1_, 0.f)); + PASSERT(p1__p2_ >= 0.f && p1__p2_ <= draw_info.radius); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|o__p1_: %.9f, c__p1_: %.9f, p1__p2_: %.9f.\n", + idx, + o__p1_, + c__p1_, + p1__p2_); + /** The intersection depth of the ray with this ball. + * + * In [t_center - radius, t_center]. + */ + const float intersection_depth = (o__p1_ - p1__p2_); + PASSERT( + cam.orthogonal_projection && + (intersection_depth >= center.z - draw_info.radius && + intersection_depth <= center.z) || + intersection_depth >= draw_info.t_center - draw_info.radius && + intersection_depth <= draw_info.t_center); + /** Normalized distance of the closest intersection point; in [0., 1.]. */ + const float norm_dist = + FMUL(FSUB(intersection_depth, cam.min_dist), cam.norm_fac); + PASSERT(norm_dist >= 0.f && norm_dist <= 1.f); + /** Scaled, normalized distance in [1., 0.] (closest, farthest). */ + const float norm_dist_scaled = FSUB(1.f, norm_dist) / gamma * opacity; + PASSERT(norm_dist_scaled >= 0.f && norm_dist_scaled <= 1.f / gamma); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|intersection_depth: %.9f, norm_dist: %.9f, " + "norm_dist_scaled: %.9f.\n", + idx, + intersection_depth, + norm_dist, + norm_dist_scaled); + float const* const col_ptr = + cam.n_channels > 3 ? draw_info.color_union.ptr : &draw_info.first_color; + // The implementation for the numerically stable weighted softmax is based + // on https://arxiv.org/pdf/1805.02867.pdf . + if (draw_only) { + /** The old maximum observed value. */ + const float sm_m_old = *sm_m; + *sm_m = FMAX(*sm_m, norm_dist_scaled); + const float coeff_exp = FEXP(norm_dist_scaled - *sm_m); + PASSERT(isfinite(coeff_exp)); + /** The color coefficient for the ball color; in [0., 1.]. */ + const float coeff = closeness * coeff_exp * opacity; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "draw %u|coeff: %.9f. closeness: %.9f. coeff_exp: %.9f. " + "opacity: %.9f.\n", + idx, + coeff, + closeness, + coeff_exp, + opacity); + // Rendering. + if (sm_m_old == *sm_m) { + // Use the fact that exp(0) = 1 to avoid the exp calculation for + // the case that the maximum remains the same (which it should + // most of the time). + *sm_d = FADD(*sm_d, coeff); + for (uint c_id = 0; c_id < cam.n_channels; ++c_id) { + PASSERT(isfinite(result[c_id])); + result[c_id] = FMA(coeff, col_ptr[c_id], result[c_id]); + } + } else { + const float exp_correction = FEXP(sm_m_old - *sm_m); + *sm_d = FMA(*sm_d, exp_correction, coeff); + for (uint c_id = 0; c_id < cam.n_channels; ++c_id) { + PASSERT(isfinite(result[c_id])); + result[c_id] = + FMA(coeff, col_ptr[c_id], FMUL(result[c_id], exp_correction)); + } + } + PASSERT(isfinite(*sm_d)); + *intersection_depth_norm_out = intersection_depth; + // Update the depth threshold. + *depth_threshold = + 1.f - (FLN(*sm_d + FEPS) + *ln_pad_over_1minuspad + *sm_m) * gamma; + *depth_threshold = + FMA(*depth_threshold, FSUB(cam.max_dist, cam.min_dist), cam.min_dist); + } else { + // Gradient computation. + const float coeff_exp = FEXP(norm_dist_scaled - *sm_m); + const float gamma_rcp = FRCP(gamma); + const float radius_sq = FMUL(draw_info.radius, draw_info.radius); + const float coeff = FMAX( + FMIN(closeness * coeff_exp * opacity, *sm_d - FEPS), + 0.f); // in [0., sm_d - FEPS]. + PASSERT(coeff >= 0.f && coeff <= *sm_d); + const float otherw = *sm_d - coeff; // in [FEPS, sm_d]. + const float p1__p2_safe = FMAX(p1__p2_, FEPS); // in [eps, t_center]. + const float cam_range = FSUB(cam.max_dist, cam.min_dist); // in ]0, inf[ + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|pos: %.9f, %.9f, %.9f. pixeldirx: %.9f, %.9f, %.9f. " + "pixeldiry: %.9f, %.9f, %.9f. pixel00center: %.9f, %.9f, %.9f.\n", + idx, + draw_info.ray_center_norm.x * draw_info.t_center, + draw_info.ray_center_norm.y * draw_info.t_center, + draw_info.ray_center_norm.z * draw_info.t_center, + cam.pixel_dir_x.x, + cam.pixel_dir_x.y, + cam.pixel_dir_x.z, + cam.pixel_dir_y.x, + cam.pixel_dir_y.y, + cam.pixel_dir_y.z, + cam.pixel_0_0_center.x, + cam.pixel_0_0_center.y, + cam.pixel_0_0_center.z); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|ray_dir: %.9f, %.9f, %.9f. " + "ray_dir_norm: %.9f, %.9f, %.9f. " + "draw_info.ray_center_norm: %.9f, %.9f, %.9f.\n", + idx, + ray_dir->x, + ray_dir->y, + ray_dir->z, + ray_dir_norm.x, + ray_dir_norm.y, + ray_dir_norm.z, + draw_info.ray_center_norm.x, + draw_info.ray_center_norm.y, + draw_info.ray_center_norm.z); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|coeff_exp: %.9f. " + "norm_dist_scaled: %.9f. cam.norm_fac: %f.\n", + idx, + coeff_exp, + norm_dist_scaled, + cam.norm_fac); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|p1__p2_: %.9f. p1__p2_safe: %.9f.\n", + idx, + p1__p2_, + p1__p2_safe); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|o__p1_: %.9f. c__p1_: %.9f.\n", + idx, + o__p1_, + c__p1_); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|intersection_depth: %f. norm_dist: %f. " + "coeff: %.9f. closeness: %f. coeff_exp: %f. opacity: " + "%f. color: %f, %f, %f.\n", + idx, + intersection_depth, + norm_dist, + coeff, + closeness, + coeff_exp, + opacity, + draw_info.first_color, + draw_info.color_union.color[0], + draw_info.color_union.color[1]); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|t_center: %.9f. " + "radius: %.9f. max_dist: %f. min_dist: %f. gamma: %f.\n", + idx, + draw_info.t_center, + draw_info.radius, + cam.max_dist, + cam.min_dist, + gamma); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|sm_d: %f. sm_m: %f. grad_pix (first three): %f, %f, %f.\n", + idx, + *sm_d, + *sm_m, + grad_pix[0], + grad_pix[1], + grad_pix[2]); + PULSAR_LOG_DEV_PIX(PULSAR_LOG_GRAD, "grad %u|otherw: %f.\n", idx, otherw); + if (calc_grad_col) { + const float sm_d_norm = FRCP(FMAX(*sm_d, FEPS)); + // First do the multiplication of coeff (in [0., sm_d]) and 1/sm_d. The + // result is a factor in [0., 1.] to be multiplied with the incoming + // gradient. + for (uint c_id = 0; c_id < cam.n_channels; ++c_id) { + ATOMICADD(grad_col + c_id, grad_pix[c_id] * FMUL(coeff, sm_d_norm)); + } + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdcol.x: %f. dresDdcol.x: %f.\n", + idx, + FMUL(coeff, sm_d_norm) * grad_pix[0], + coeff * sm_d_norm); + } + // We disable the computation for too small spheres. + // The comparison is made this way to avoid subtraction of unsigned types. + if (calc_grad_cam || calc_grad_pos || calc_grad_rad || calc_grad_opy) { + //! First find dimDdcoeff. + const float n0 = + otherw * FRCP(FMAX(*sm_d * *sm_d, FEPS)); // in [0., 1. / sm_d]. + PASSERT(isfinite(n0) && n0 >= 0. && n0 <= 1. / *sm_d + 1e2f * FEPS); + // We'll aggergate dimDdcoeff over all the 'color' channels. + float dimDdcoeff = 0.f; + const float otherw_safe_rcp = FRCP(FMAX(otherw, FEPS)); + float othercol; + for (uint c_id = 0; c_id < cam.n_channels; ++c_id) { + othercol = + (result[c_id] * *sm_d - col_ptr[c_id] * coeff) * otherw_safe_rcp; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|othercol[%u]: %.9f.\n", + idx, + c_id, + othercol); + dimDdcoeff += + FMUL(FMUL(grad_pix[c_id], FSUB(col_ptr[c_id], othercol)), n0); + } + PASSERT(isfinite(dimDdcoeff)); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdcoeff: %.9f, n0: %f.\n", + idx, + dimDdcoeff, + n0); + if (calc_grad_opy) { + //! dimDdopacity. + *grad_opy += dimDdcoeff * coeff_exp * closeness * + (1.f + opacity * (1.f - norm_dist) * gamma_rcp); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdopacity: %.9f, dimDdopacity: %.9f.\n", + idx, + coeff_exp * closeness, + dimDdcoeff * coeff_exp * closeness); + } + if (intersect_info->max.x >= intersect_info->min.x + 3 && + intersect_info->max.y >= intersect_info->min.y + 3) { + //! Now find dcoeffDdintersection_depth and dcoeffDdcloseness. + const float dcoeffDdintersection_depth = + -closeness * coeff_exp * opacity * opacity / (gamma * cam_range); + const float dcoeffDdcloseness = coeff_exp * opacity; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdintersection_depth: %.9f. " + "dimDdintersection_depth: %.9f. " + "dcoeffDdcloseness: %.9f. dimDdcloseness: %.9f.\n", + idx, + dcoeffDdintersection_depth, + dimDdcoeff * dcoeffDdintersection_depth, + dcoeffDdcloseness, + dimDdcoeff * dcoeffDdcloseness); + //! Here, the execution paths for orthogonal and pinyhole camera split. + if (cam.orthogonal_projection) { + if (calc_grad_rad) { + //! Find dcoeffDdrad. + float dcoeffDdrad = + dcoeffDdcloseness * (closeness_world / radius_sq) - + dcoeffDdintersection_depth * draw_info.radius / p1__p2_safe; + PASSERT(isfinite(dcoeffDdrad)); + *grad_rad += FMUL(dimDdcoeff, dcoeffDdrad); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdrad: %.9f. dcoeffDdrad: %.9f.\n", + idx, + FMUL(dimDdcoeff, dcoeffDdrad), + dcoeffDdrad); + } + if (calc_grad_pos || calc_grad_cam) { + float3 dimDdcenter = raydiff / + p1__p2_safe; /* making it dintersection_depthDdcenter. */ + dimDdcenter.z = sign_dir(center.z); + PASSERT(FABS(center.z) >= cam.min_dist && cam.min_dist >= FEPS); + dimDdcenter *= dcoeffDdintersection_depth; // dcoeffDdcenter + dimDdcenter -= dcoeffDdcloseness * /* dclosenessDdcenter. */ + raydiff * FRCP(FMAX(length(raydiff) * draw_info.radius, FEPS)); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdcenter: %.9f, %.9f, %.9f.\n", + idx, + dimDdcenter.x, + dimDdcenter.y, + dimDdcenter.z); + // Now dcoeffDdcenter is stored in dimDdcenter. + dimDdcenter *= dimDdcoeff; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdcenter: %.9f, %.9f, %.9f.\n", + idx, + dimDdcenter.x, + dimDdcenter.y, + dimDdcenter.z); + // Prepare for posglob and cam pos. + const float pixel_size = length(cam.pixel_dir_x); + // pixel_size is the same as length(pixeldiry)! + const float pixel_size_rcp = FRCP(pixel_size); + float3 dcenterDdposglob = + (cam.pixel_dir_x + cam.pixel_dir_y) * pixel_size_rcp + + cam.sensor_dir_z; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcenterDdposglob: %.9f, %.9f, %.9f.\n", + idx, + dcenterDdposglob.x, + dcenterDdposglob.y, + dcenterDdposglob.z); + if (calc_grad_pos) { + //! dcenterDdposglob. + *grad_pos += dimDdcenter * dcenterDdposglob; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpos: %.9f, %.9f, %.9f.\n", + idx, + dimDdcenter.x * dcenterDdposglob.x, + dimDdcenter.y * dcenterDdposglob.y, + dimDdcenter.z * dcenterDdposglob.z); + } + if (calc_grad_cam) { + //! Camera. + grad_cam->cam_pos -= dimDdcenter * dcenterDdposglob; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdeye: %.9f, %.9f, %.9f.\n", + idx, + -dimDdcenter.x * dcenterDdposglob.x, + -dimDdcenter.y * dcenterDdposglob.y, + -dimDdcenter.z * dcenterDdposglob.z); + // coord_world + /* + float3 dclosenessDdcoord_world = + raydiff * FRCP(FMAX(draw_info.radius * length(raydiff), FEPS)); + float3 dintersection_depthDdcoord_world = -2.f * raydiff; + */ + float3 dimDdcoord_world = /* dcoeffDdcoord_world */ + dcoeffDdcloseness * raydiff * + FRCP(FMAX(draw_info.radius * length(raydiff), FEPS)) - + dcoeffDdintersection_depth * raydiff / p1__p2_safe; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdcoord_world: %.9f, %.9f, %.9f.\n", + idx, + dimDdcoord_world.x, + dimDdcoord_world.y, + dimDdcoord_world.z); + dimDdcoord_world *= dimDdcoeff; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdcoord_world: %.9f, %.9f, %.9f.\n", + idx, + dimDdcoord_world.x, + dimDdcoord_world.y, + dimDdcoord_world.z); + // The third component of dimDdcoord_world is 0! + PASSERT(dimDdcoord_world.z == 0.f); + float3 coord_world = center - raydiff; + coord_world.z = 0.f; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|coord_world: %.9f, %.9f, %.9f.\n", + idx, + coord_world.x, + coord_world.y, + coord_world.z); + // Do this component-wise to save unnecessary matmul steps. + grad_cam->pixel_dir_x += dimDdcoord_world.x * cam.pixel_dir_x * + coord_world.x * pixel_size_rcp * pixel_size_rcp; + grad_cam->pixel_dir_x += dimDdcoord_world.y * cam.pixel_dir_x * + coord_world.y * pixel_size_rcp * pixel_size_rcp; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel_dir_x|coord_world: %.9f, %.9f, %.9f.\n", + idx, + grad_cam->pixel_dir_x.x, + grad_cam->pixel_dir_x.y, + grad_cam->pixel_dir_x.z); + // dcenterkDdpixel_dir_k. + float3 center_in_pixels = draw_info.ray_center_norm * + draw_info.t_center * pixel_size_rcp; + grad_cam->pixel_dir_x += dimDdcenter.x * + (center_in_pixels - + outer_product_sum(cam.pixel_dir_x) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcenter0dpixel_dir_x: %.9f, %.9f, %.9f.\n", + idx, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_x) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .x, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_x) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .y, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_x) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .z); + grad_cam->pixel_dir_y += dimDdcenter.y * + (center_in_pixels - + outer_product_sum(cam.pixel_dir_y) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcenter1dpixel_dir_y: %.9f, %.9f, %.9f.\n", + idx, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_y) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .x, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_y) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .y, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_y) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .z); + // dcenterzDdpixel_dir_k. + float sensordirz_norm_rcp = FRCP( + FMAX(length(cross(cam.pixel_dir_y, cam.pixel_dir_x)), FEPS)); + grad_cam->pixel_dir_x += dimDdcenter.z * + (dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_y, cam.sensor_dir_z) - + cross(cam.pixel_dir_y, center)) * + sensordirz_norm_rcp; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcenterzDdpixel_dir_x: %.9f, %.9f, %.9f.\n", + idx, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_y, cam.sensor_dir_z) - + cross(cam.pixel_dir_y, center)) * + sensordirz_norm_rcp) + .x, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_y, cam.sensor_dir_z) - + cross(cam.pixel_dir_y, center)) * + sensordirz_norm_rcp) + .y, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_y, cam.sensor_dir_z) - + cross(cam.pixel_dir_y, center)) * + sensordirz_norm_rcp) + .z); + grad_cam->pixel_dir_y += dimDdcenter.z * + (dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_x, cam.sensor_dir_z) - + cross(cam.pixel_dir_x, center)) * + sensordirz_norm_rcp; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcenterzDdpixel_dir_y: %.9f, %.9f, %.9f.\n", + idx, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_x, cam.sensor_dir_z) - + cross(cam.pixel_dir_x, center)) * + sensordirz_norm_rcp) + .x, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_x, cam.sensor_dir_z) - + cross(cam.pixel_dir_x, center)) * + sensordirz_norm_rcp) + .y, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_x, cam.sensor_dir_z) - + cross(cam.pixel_dir_x, center)) * + sensordirz_norm_rcp) + .z); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel_dir_x: %.9f, %.9f, %.9f.\n", + idx, + grad_cam->pixel_dir_x.x, + grad_cam->pixel_dir_x.y, + grad_cam->pixel_dir_x.z); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel_dir_y: %.9f, %.9f, %.9f.\n", + idx, + grad_cam->pixel_dir_y.x, + grad_cam->pixel_dir_y.y, + grad_cam->pixel_dir_y.z); + } + } + } else { + if (calc_grad_rad) { + //! Find dcoeffDdrad. + float dcoeffDdrad = + dcoeffDdcloseness * (closeness_world / radius_sq) - + dcoeffDdintersection_depth * draw_info.radius / p1__p2_safe; + PASSERT(isfinite(dcoeffDdrad)); + *grad_rad += FMUL(dimDdcoeff, dcoeffDdrad); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdrad: %.9f. dcoeffDdrad: %.9f.\n", + idx, + FMUL(dimDdcoeff, dcoeffDdrad), + dcoeffDdrad); + } + if (calc_grad_pos || calc_grad_cam) { + const float3 tmp1 = center - ray_dir_norm * o__p1_; + const float3 tmp1n = tmp1 / p1__p2_safe; + const float ray_dir_normDotRaydiff = dot(ray_dir_norm, raydiff); + const float3 dcoeffDdray = dcoeffDdintersection_depth * + (tmp1 - o__p1_ * tmp1n) / *norm_ray_dir + + dcoeffDdcloseness * + (ray_dir_norm * -ray_dir_normDotRaydiff + raydiff) / + (closeness_world * draw_info.radius) * + (draw_info.t_center / *norm_ray_dir); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdray: %.9f, %.9f, %.9f. dimDdray: " + "%.9f, %.9f, %.9f.\n", + idx, + dcoeffDdray.x, + dcoeffDdray.y, + dcoeffDdray.z, + dimDdcoeff * dcoeffDdray.x, + dimDdcoeff * dcoeffDdray.y, + dimDdcoeff * dcoeffDdray.z); + const float3 dcoeffDdcenter = + dcoeffDdintersection_depth * (ray_dir_norm + tmp1n) + + dcoeffDdcloseness * + (draw_info.ray_center_norm * ray_dir_normDotRaydiff - + raydiff) / + (closeness_world * draw_info.radius); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdcenter: %.9f, %.9f, %.9f. " + "dimDdcenter: %.9f, %.9f, %.9f.\n", + idx, + dcoeffDdcenter.x, + dcoeffDdcenter.y, + dcoeffDdcenter.z, + dimDdcoeff * dcoeffDdcenter.x, + dimDdcoeff * dcoeffDdcenter.y, + dimDdcoeff * dcoeffDdcenter.z); + if (calc_grad_pos) { + *grad_pos += dimDdcoeff * dcoeffDdcenter; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdposglob: %.9f, %.9f, %.9f.\n", + idx, + dimDdcoeff * dcoeffDdcenter.x, + dimDdcoeff * dcoeffDdcenter.y, + dimDdcoeff * dcoeffDdcenter.z); + } + if (calc_grad_cam) { + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdeye: %.9f, %.9f, %.9f.\n", + idx, + -dimDdcoeff * (dcoeffDdcenter.x + dcoeffDdray.x), + -dimDdcoeff * (dcoeffDdcenter.y + dcoeffDdray.y), + -dimDdcoeff * (dcoeffDdcenter.z + dcoeffDdray.z)); + grad_cam->cam_pos += -dimDdcoeff * (dcoeffDdcenter + dcoeffDdray); + grad_cam->pixel_0_0_center += dimDdcoeff * dcoeffDdray; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel00centerglob: %.9f, %.9f, %.9f.\n", + idx, + dimDdcoeff * dcoeffDdray.x, + dimDdcoeff * dcoeffDdray.y, + dimDdcoeff * dcoeffDdray.z); + grad_cam->pixel_dir_x += + (dimDdcoeff * static_cast(coord_x)) * dcoeffDdray; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel_dir_x: %.9f, %.9f, %.9f.\n", + idx, + (dimDdcoeff * static_cast(coord_x)) * dcoeffDdray.x, + (dimDdcoeff * static_cast(coord_x)) * dcoeffDdray.y, + (dimDdcoeff * static_cast(coord_x)) * dcoeffDdray.z); + grad_cam->pixel_dir_y += + (dimDdcoeff * static_cast(coord_y)) * dcoeffDdray; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel_dir_y: %.9f, %.9f, %.9f.\n", + idx, + (dimDdcoeff * static_cast(coord_y)) * dcoeffDdray.x, + (dimDdcoeff * static_cast(coord_y)) * dcoeffDdray.y, + (dimDdcoeff * static_cast(coord_y)) * dcoeffDdray.z); + } + } + } + } + } + } + return true; +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.fill_bg.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.fill_bg.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..02830204a6874b8223bde1615fa9ef8ffa4d318c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.fill_bg.instantiate.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.fill_bg.device.h" + +namespace pulsar { +namespace Renderer { + +template GLOBAL void fill_bg( + Renderer renderer, + const CamInfo norm, + float const* const bg_col_d, + const float gamma, + const uint mode); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.device.h new file mode 100644 index 0000000000000000000000000000000000000000..3f0412f576de4dd77b3f2be6a27ff8ddb144ca74 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.device.h @@ -0,0 +1,302 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_FORWARD_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_FORWARD_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +void forward( + Renderer* self, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* bg_col_d, + const float* opacity_d, + const size_t& num_balls, + const uint& mode, + cudaStream_t stream) { + ARGCHECK(gamma > 0.f && gamma <= 1.f, 6, "gamma must be in [0., 1.]"); + ARGCHECK( + percent_allowed_difference >= 0.f && percent_allowed_difference <= 1.f, + 7, + "percent_allowed_difference must be in [0., 1.]"); + ARGCHECK(max_n_hits >= 1u, 8, "max_n_hits must be >= 1"); + ARGCHECK( + num_balls > 0 && num_balls <= self->max_num_balls, + 9, + ("num_balls must be >0 and <= max num balls! (" + + std::to_string(num_balls) + " vs. " + + std::to_string(self->max_num_balls) + ")") + .c_str()); + ARGCHECK( + cam.film_width == self->cam.film_width && + cam.film_height == self->cam.film_height, + 5, + "cam result width and height must agree"); + ARGCHECK(mode <= 1, 10, "mode must be <= 1!"); + if (percent_allowed_difference > 1.f - FEPS) { + LOG(WARNING) << "percent_allowed_difference > " << (1.f - FEPS) + << "! Clamping to " << (1.f - FEPS) << "."; + percent_allowed_difference = 1.f - FEPS; + } + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Rendering forward pass..."; + // Update camera and transform into a new virtual camera system with + // centered principal point and subsection rendering. + self->cam.eye = cam.eye; + self->cam.pixel_0_0_center = cam.pixel_0_0_center - cam.eye; + self->cam.pixel_dir_x = cam.pixel_dir_x; + self->cam.pixel_dir_y = cam.pixel_dir_y; + self->cam.sensor_dir_z = cam.sensor_dir_z; + self->cam.half_pixel_size = cam.half_pixel_size; + self->cam.focal_length = cam.focal_length; + self->cam.aperture_width = cam.aperture_width; + self->cam.aperture_height = cam.aperture_height; + self->cam.min_dist = cam.min_dist; + self->cam.max_dist = cam.max_dist; + self->cam.norm_fac = cam.norm_fac; + self->cam.principal_point_offset_x = cam.principal_point_offset_x; + self->cam.principal_point_offset_y = cam.principal_point_offset_y; + self->cam.film_border_left = cam.film_border_left; + self->cam.film_border_top = cam.film_border_top; +#ifdef PULSAR_TIMINGS_ENABLED + START_TIME(calc_signature); +#endif + LAUNCH_MAX_PARALLEL_1D( + calc_signature, + num_balls, + stream, + *self, + reinterpret_cast(vert_pos), + vert_col, + vert_rad, + num_balls); + CHECKLAUNCH(); +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(calc_signature); + START_TIME(sort); +#endif + SORT_ASCENDING_WS( + self->min_depth_d, + self->min_depth_sorted_d, + self->ids_d, + self->ids_sorted_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + SORT_ASCENDING_WS( + self->min_depth_d, + self->min_depth_sorted_d, + self->ii_d, + self->ii_sorted_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + SORT_ASCENDING_WS( + self->min_depth_d, + self->min_depth_sorted_d, + self->di_d, + self->di_sorted_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(sort); + START_TIME(minmax); +#endif + IntersectInfo pixel_minmax; + pixel_minmax.min.x = MAX_USHORT; + pixel_minmax.min.y = MAX_USHORT; + pixel_minmax.max.x = 0; + pixel_minmax.max.y = 0; + REDUCE_WS( + self->ii_sorted_d, + self->min_max_pixels_d, + num_balls, + IntersectInfoMinMax(), + pixel_minmax, + self->workspace_d, + self->workspace_size, + stream); + COPY_DEV_HOST(&pixel_minmax, self->min_max_pixels_d, IntersectInfo, 1); + LOG_IF(INFO, PULSAR_LOG_RENDER) + << "Region with pixels to render: " << pixel_minmax.min.x << ":" + << pixel_minmax.max.x << " (x), " << pixel_minmax.min.y << ":" + << pixel_minmax.max.y << " (y)."; +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(minmax); + START_TIME(render); +#endif + MEMSET( + self->result_d, + 0, + float, + self->cam.film_width * self->cam.film_height * self->cam.n_channels, + stream); + MEMSET( + self->forw_info_d, + 0, + float, + self->cam.film_width * self->cam.film_height * (3 + 2 * self->n_track), + stream); + if (pixel_minmax.max.y > pixel_minmax.min.y && + pixel_minmax.max.x > pixel_minmax.min.x) { + PASSERT( + pixel_minmax.min.x >= static_cast(self->cam.film_border_left) && + pixel_minmax.min.x < + static_cast( + self->cam.film_border_left + self->cam.film_width) && + pixel_minmax.max.x <= + static_cast( + self->cam.film_border_left + self->cam.film_width) && + pixel_minmax.min.y >= static_cast(self->cam.film_border_top) && + pixel_minmax.min.y < + static_cast( + self->cam.film_border_top + self->cam.film_height) && + pixel_minmax.max.y <= + static_cast( + self->cam.film_border_top + self->cam.film_height)); + // Cut the image in 3x3 regions. + int y_step = RENDER_BLOCK_SIZE * + iDivCeil(pixel_minmax.max.y - pixel_minmax.min.y, + 3u * RENDER_BLOCK_SIZE); + int x_step = RENDER_BLOCK_SIZE * + iDivCeil(pixel_minmax.max.x - pixel_minmax.min.x, + 3u * RENDER_BLOCK_SIZE); + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Using image slices of size " << x_step + << ", " << y_step << " (W, H)."; + for (int y_min = pixel_minmax.min.y; y_min < pixel_minmax.max.y; + y_min += y_step) { + for (int x_min = pixel_minmax.min.x; x_min < pixel_minmax.max.x; + x_min += x_step) { + // Create region selection. + LAUNCH_MAX_PARALLEL_1D( + create_selector, + num_balls, + stream, + self->ii_sorted_d, + num_balls, + x_min, + x_min + x_step, + y_min, + y_min + y_step, + self->region_flags_d); + CHECKLAUNCH(); + SELECT_FLAGS_WS( + self->region_flags_d, + self->ii_sorted_d, + self->ii_d, + self->num_selected_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + SELECT_FLAGS_WS( + self->region_flags_d, + self->di_sorted_d, + self->di_d, + self->num_selected_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + SELECT_FLAGS_WS( + self->region_flags_d, + self->ids_sorted_d, + self->ids_d, + self->num_selected_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + LAUNCH_PARALLEL_2D( + render, + x_step, + y_step, + RENDER_BLOCK_SIZE, + RENDER_BLOCK_SIZE, + stream, + self->num_selected_d, + self->ii_d, + self->di_d, + self->min_depth_d, + self->ids_d, + opacity_d, + self->cam, + gamma, + percent_allowed_difference, + max_n_hits, + bg_col_d, + mode, + x_min, + y_min, + x_step, + y_step, + self->result_d, + self->forw_info_d, + self->n_track); + CHECKLAUNCH(); + } + } + } + if (mode == 0) { + LAUNCH_MAX_PARALLEL_2D( + fill_bg, + static_cast(self->cam.film_width), + static_cast(self->cam.film_height), + stream, + *self, + self->cam, + bg_col_d, + gamma, + mode); + CHECKLAUNCH(); + } +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(render); + float time_ms; + // This blocks the result and prevents batch-processing from parallelizing. + GET_TIME(calc_signature, &time_ms); + std::cout << "Time for signature calculation: " << time_ms << " ms" + << std::endl; + GET_TIME(sort, &time_ms); + std::cout << "Time for sorting: " << time_ms << " ms" << std::endl; + GET_TIME(minmax, &time_ms); + std::cout << "Time for minmax pixel calculation: " << time_ms << " ms" + << std::endl; + GET_TIME(render, &time_ms); + std::cout << "Time for rendering: " << time_ms << " ms" << std::endl; +#endif + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Forward pass complete."; +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..7f57bc8681b7c7f1356f3c3e134595ab2d1955f0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.instantiate.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.forward.device.h" + +namespace pulsar { +namespace Renderer { + +template void forward( + Renderer* self, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* bg_col_d, + const float* opacity_d, + const size_t& num_balls, + const uint& mode, + cudaStream_t stream); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.get_screen_area.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.get_screen_area.device.h new file mode 100644 index 0000000000000000000000000000000000000000..1a85a1bd20cfa0773e395163871ea5a7a8b39347 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.get_screen_area.device.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_GET_SCREEN_AREA_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_GET_SCREEN_AREA_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" + +namespace pulsar { +namespace Renderer { + +/** + * Find the closest enclosing screen area rectangle in pixels that encloses a + * ball. + * + * The method returns the two x and the two y values of the boundaries. They + * are not ordered yet and you need to find min and max for the left/right and + * lower/upper boundary. + * + * The return values are floats and need to be rounded appropriately. + */ +INLINE DEVICE bool get_screen_area( + const float3& ball_center_cam, + const float3& ray_center_norm, + const float& vert_rad, + const CamInfo& cam, + const uint& idx, + /* Out variables. */ + float* x_1, + float* x_2, + float* y_1, + float* y_2) { + float cos_alpha = dot(cam.sensor_dir_z, ray_center_norm); + float2 o__c_, alpha, theta; + if (cos_alpha < EPS) { + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|ball not visible. cos_alpha: %.9f.\n", + idx, + cos_alpha); + // No intersection, ball won't be visible. + return false; + } + // Multiply the direction vector with the camera rotation matrix + // to have the optical axis being the canonical z vector (0, 0, 1). + // TODO: optimize. + const float3 ball_center_cam_rot = rotate( + ball_center_cam, + cam.pixel_dir_x / length(cam.pixel_dir_x), + cam.pixel_dir_y / length(cam.pixel_dir_y), + cam.sensor_dir_z); + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|ball_center_cam_rot: %f, %f, %f.\n", + idx, + ball_center_cam.x, + ball_center_cam.y, + ball_center_cam.z); + const float pixel_size_norm_fac = FRCP(2.f * cam.half_pixel_size); + const float optical_offset_x = + (static_cast(cam.aperture_width) - 1.f) * .5f; + const float optical_offset_y = + (static_cast(cam.aperture_height) - 1.f) * .5f; + if (cam.orthogonal_projection) { + *x_1 = + FMA(ball_center_cam_rot.x - vert_rad, + pixel_size_norm_fac, + optical_offset_x); + *x_2 = + FMA(ball_center_cam_rot.x + vert_rad, + pixel_size_norm_fac, + optical_offset_x); + *y_1 = + FMA(ball_center_cam_rot.y - vert_rad, + pixel_size_norm_fac, + optical_offset_y); + *y_2 = + FMA(ball_center_cam_rot.y + vert_rad, + pixel_size_norm_fac, + optical_offset_y); + return true; + } else { + o__c_.x = FMAX( + FSQRT( + ball_center_cam_rot.x * ball_center_cam_rot.x + + ball_center_cam_rot.z * ball_center_cam_rot.z), + FEPS); + o__c_.y = FMAX( + FSQRT( + ball_center_cam_rot.y * ball_center_cam_rot.y + + ball_center_cam_rot.z * ball_center_cam_rot.z), + FEPS); + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|o__c_: %f, %f.\n", + idx, + o__c_.x, + o__c_.y); + alpha.x = sign_dir(ball_center_cam_rot.x) * + acos(FMIN(FMAX(ball_center_cam_rot.z / o__c_.x, -1.f), 1.f)); + alpha.y = -sign_dir(ball_center_cam_rot.y) * + acos(FMIN(FMAX(ball_center_cam_rot.z / o__c_.y, -1.f), 1.f)); + theta.x = asin(FMIN(FMAX(vert_rad / o__c_.x, -1.f), 1.f)); + theta.y = asin(FMIN(FMAX(vert_rad / o__c_.y, -1.f), 1.f)); + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|alpha.x: %f, alpha.y: %f, theta.x: %f, theta.y: %f.\n", + idx, + alpha.x, + alpha.y, + theta.x, + theta.y); + *x_1 = tan(alpha.x - theta.x) * cam.focal_length; + *x_2 = tan(alpha.x + theta.x) * cam.focal_length; + *y_1 = tan(alpha.y - theta.y) * cam.focal_length; + *y_2 = tan(alpha.y + theta.y) * cam.focal_length; + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|in sensor plane: x_1: %f, x_2: %f, y_1: %f, y_2: %f.\n", + idx, + *x_1, + *x_2, + *y_1, + *y_2); + *x_1 = FMA(*x_1, pixel_size_norm_fac, optical_offset_x); + *x_2 = FMA(*x_2, pixel_size_norm_fac, optical_offset_x); + *y_1 = FMA(*y_1, -pixel_size_norm_fac, optical_offset_y); + *y_2 = FMA(*y_2, -pixel_size_norm_fac, optical_offset_y); + return true; + } +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.h new file mode 100644 index 0000000000000000000000000000000000000000..d6755ee91887b8f6316563b03cee9c524a6f7315 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.h @@ -0,0 +1,468 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_H_ + +#include + +#include "../global.h" +#include "./camera.h" + +namespace pulsar { +namespace Renderer { + +//! Remember to order struct members from larger size to smaller size +//! to avoid padding (for more info, see for example here: +//! http://www.catb.org/esr/structure-packing/). + +/** + * This is the information that's needed to do a fast screen point + * intersection with one of the balls. + * + * Aim to keep this below 8 bytes (256 bytes per cache-line / 32 threads in a + * warp = 8 bytes per thread). + */ +struct IntersectInfo { + ushort2 min; /** minimum x, y in pixel coordinates. */ + ushort2 max; /** maximum x, y in pixel coordinates. */ +}; +static_assert( + sizeof(IntersectInfo) == 8, + "The compiled size of `IntersectInfo` is wrong."); + +/** + * Reduction operation to find the limits of multiple IntersectInfo objects. + */ +struct IntersectInfoMinMax { + IHD IntersectInfo + operator()(const IntersectInfo& a, const IntersectInfo& b) const { + // Treat the special case of an invalid intersect info object or one for + // a ball out of bounds. + if (b.max.x == MAX_USHORT && b.min.x == MAX_USHORT && + b.max.y == MAX_USHORT && b.min.y == MAX_USHORT) { + return a; + } + if (a.max.x == MAX_USHORT && a.min.x == MAX_USHORT && + a.max.y == MAX_USHORT && a.min.y == MAX_USHORT) { + return b; + } + IntersectInfo result; + result.min.x = std::min(a.min.x, b.min.x); + result.min.y = std::min(a.min.y, b.min.y); + result.max.x = std::max(a.max.x, b.max.x); + result.max.y = std::max(a.max.y, b.max.y); + return result; + } +}; + +/** + * All information that's needed to draw a ball. + * + * It's necessary to keep this information in float (not half) format, + * because the loss in accuracy would be too high and lead to artifacts. + */ +struct DrawInfo { + float3 ray_center_norm; /** Ray to the ball center, normalized. */ + /** Ball color. + * + * This might be the full color in the case of n_channels <= 3. Otherwise, + * a pointer to the original 'color' data is stored in the following union. + */ + float first_color; + union { + float color[2]; + float* ptr; + } color_union; + float t_center; /** Distance from the camera to the ball center. */ + float radius; /** Ball radius. */ +}; +static_assert( + sizeof(DrawInfo) == 8 * 4, + "The compiled size of `DrawInfo` is wrong."); + +/** + * An object to collect all associated data with the renderer. + * + * The `_d` suffixed pointers point to memory 'on-device', potentially on the + * GPU. All other variables are expected to point to CPU memory. + */ +struct Renderer { + /** Dummy initializer to make sure all pointers are set to NULL to + * be safe for the device-specific 'construct' and 'destruct' methods. + */ + inline Renderer() { + max_num_balls = 0; + result_d = NULL; + min_depth_d = NULL; + min_depth_sorted_d = NULL; + ii_d = NULL; + ii_sorted_d = NULL; + ids_d = NULL; + ids_sorted_d = NULL; + workspace_d = NULL; + di_d = NULL; + di_sorted_d = NULL; + region_flags_d = NULL; + num_selected_d = NULL; + forw_info_d = NULL; + grad_pos_d = NULL; + grad_col_d = NULL; + grad_rad_d = NULL; + grad_cam_d = NULL; + grad_opy_d = NULL; + grad_cam_buf_d = NULL; + n_grad_contributions_d = NULL; + }; + /** The camera for this renderer. In world-coordinates. */ + CamInfo cam; + /** + * The maximum amount of balls the renderer can handle. Resources are + * pre-allocated to account for this size. Less than this amount of balls + * can be rendered, but not more. + */ + int max_num_balls; + /** The result buffer. */ + float* result_d; + /** Closest possible intersection depth per sphere w.r.t. the camera. */ + float* min_depth_d; + /** Closest possible intersection depth per sphere, ordered ascending. */ + float* min_depth_sorted_d; + /** The intersect infos per sphere. */ + IntersectInfo* ii_d; + /** The intersect infos per sphere, ordered by their closest possible + * intersection depth (asc.). */ + IntersectInfo* ii_sorted_d; + /** Original sphere IDs. */ + int* ids_d; + /** Original sphere IDs, ordered by their closest possible intersection depth + * (asc.). */ + int* ids_sorted_d; + /** Workspace for CUB routines. */ + char* workspace_d; + /** Workspace size for CUB routines. */ + size_t workspace_size; + /** The draw information structures for each sphere. */ + DrawInfo* di_d; + /** The draw information structures sorted by closest possible intersection + * depth (asc.). */ + DrawInfo* di_sorted_d; + /** Region association buffer. */ + char* region_flags_d; + /** Num spheres in the current region. */ + size_t* num_selected_d; + /** Pointer to information from the forward pass. */ + float* forw_info_d; + /** Struct containing information about the min max pixels that contain + * rendered information in the image. */ + IntersectInfo* min_max_pixels_d; + /** Gradients w.r.t. position. */ + float3* grad_pos_d; + /** Gradients w.r.t. color. */ + float* grad_col_d; + /** Gradients w.r.t. radius. */ + float* grad_rad_d; + /** Gradients w.r.t. camera parameters. */ + float* grad_cam_d; + /** Gradients w.r.t. opacity. */ + float* grad_opy_d; + /** Camera gradient information by sphere. + * + * Here, every sphere's contribution to the camera gradients is stored. It is + * aggregated and written to grad_cam_d in a separate step. This avoids write + * conflicts when processing the spheres. + */ + CamGradInfo* grad_cam_buf_d; + /** Total of all gradient contributions for this image. */ + int* n_grad_contributions_d; + /** The number of spheres to track for backpropagation. */ + int n_track; +}; + +inline bool operator==(const Renderer& a, const Renderer& b) { + return a.cam == b.cam && a.max_num_balls == b.max_num_balls; +} + +/** + * Construct a renderer. + */ +template +void construct( + Renderer* self, + const size_t& max_num_balls, + const int& width, + const int& height, + const bool& orthogonal_projection, + const bool& right_handed_system, + const float& background_normalization_depth, + const uint& n_channels, + const uint& n_track); + +/** + * Destruct the renderer and free the associated memory. + */ +template +void destruct(Renderer* self); + +/** + * Create a selection of points inside a rectangle. + * + * This write boolen values into `region_flags_d', which can + * for example be used by a CUB function to extract the selection. + */ +template +GLOBAL void create_selector( + IntersectInfo const* const RESTRICT ii_sorted_d, + const uint num_balls, + const int min_x, + const int max_x, + const int min_y, + const int max_y, + /* Out variables. */ + char* RESTRICT region_flags_d); + +/** + * Calculate a signature for a ball. + * + * Populate the `ids_d`, `ii_d`, `di_d` and `min_depth_d` fields of the + * renderer. For spheres not visible in the image, sets the id field to -1, + * min_depth_d to MAX_FLOAT and the ii_d.min.x fields to MAX_USHORT. + */ +template +GLOBAL void calc_signature( + Renderer renderer, + float3 const* const RESTRICT vert_poss, + float const* const RESTRICT vert_cols, + float const* const RESTRICT vert_rads, + const uint num_balls); + +/** + * The block size for rendering. + * + * This should be as large as possible, but is limited due to the amount + * of variables we use and the memory required per thread. + */ +#define RENDER_BLOCK_SIZE 16 +/** + * The buffer size of spheres to be loaded and analyzed for relevance. + * + * This must be at least RENDER_BLOCK_SIZE * RENDER_BLOCK_SIZE so that + * for every iteration through the loading loop every thread could add a + * 'hit' to the buffer. + */ +#define RENDER_BUFFER_SIZE RENDER_BLOCK_SIZE* RENDER_BLOCK_SIZE * 2 +/** + * The threshold after which the spheres that are in the render buffer + * are rendered and the buffer is flushed. + * + * Must be less than RENDER_BUFFER_SIZE. + */ +#define RENDER_BUFFER_LOAD_THRESH 16 * 4 + +/** + * The render function. + * + * Assumptions: + * * the focal length is appropriately chosen, + * * ray_dir_norm.z is > EPS. + * * to be completed... + */ +template +GLOBAL void render( + size_t const* const RESTRICT + num_balls, /** Number of balls relevant for this pass. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + float const* const RESTRICT min_depth_d, /** Minimum depth per sphere. */ + int const* const RESTRICT id_d, /** IDs. */ + float const* const RESTRICT op_d, /** Opacity. */ + const CamInfo cam_norm, /** Camera normalized with all vectors to be in the + * camera coordinate system. + */ + const float gamma, /** Transparency parameter. **/ + const float percent_allowed_difference, /** Maximum allowed + error in color. */ + const uint max_n_hits, + const float* bg_col_d, + const uint mode, + const int x_min, + const int y_min, + const int x_step, + const int y_step, + // Out variables. + float* const RESTRICT result_d, /** The result image. */ + float* const RESTRICT forw_info_d, /** Additional information needed for the + grad computation. */ + // Infrastructure. + const int n_track /** The number of spheres to track. */ +); + +/** + * Makes sure to paint background information. + * + * This is required as a separate post-processing step because certain + * pixels may not be processed during the forward pass if there is no + * possibility for a sphere to be present at their location. + */ +template +GLOBAL void fill_bg( + Renderer renderer, + const CamInfo norm, + float const* const bg_col_d, + const float gamma, + const uint mode); + +/** + * Rendering forward pass. + * + * Takes a renderer and sphere data as inputs and creates a rendering. + */ +template +void forward( + Renderer* self, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* bg_col_d, + const float* opacity_d, + const size_t& num_balls, + const uint& mode, + cudaStream_t stream); + +/** + * Normalize the camera gradients by the number of spheres that contributed. + */ +template +GLOBAL void norm_cam_gradients(Renderer renderer); + +/** + * Normalize the sphere gradients. + * + * We're assuming that the samples originate from a Monte Carlo + * sampling process and normalize by number and sphere area. + */ +template +GLOBAL void norm_sphere_gradients(Renderer renderer, const int num_balls); + +#define GRAD_BLOCK_SIZE 16 +/** Calculate the gradients. + */ +template +GLOBAL void calc_gradients( + const CamInfo cam, /** Camera in world coordinates. */ + float const* const RESTRICT grad_im, /** The gradient image. */ + const float + gamma, /** The transparency parameter used in the forward pass. */ + float3 const* const RESTRICT vert_poss, /** Vertex position vector. */ + float const* const RESTRICT vert_cols, /** Vertex color vector. */ + float const* const RESTRICT vert_rads, /** Vertex radius vector. */ + float const* const RESTRICT opacity, /** Vertex opacity. */ + const uint num_balls, /** Number of balls. */ + float const* const RESTRICT result_d, /** Result image. */ + float const* const RESTRICT forw_info_d, /** Forward pass info. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + // Mode switches. + const bool calc_grad_pos, + const bool calc_grad_col, + const bool calc_grad_rad, + const bool calc_grad_cam, + const bool calc_grad_opy, + // Out variables. + float* const RESTRICT grad_rad_d, /** Radius gradients. */ + float* const RESTRICT grad_col_d, /** Color gradients. */ + float3* const RESTRICT grad_pos_d, /** Position gradients. */ + CamGradInfo* const RESTRICT grad_cam_buf_d, /** Camera gradient buffer. */ + float* const RESTRICT grad_opy_d, /** Opacity gradient buffer. */ + int* const RESTRICT + grad_contributed_d, /** Gradient contribution counter. */ + // Infrastructure. + const int n_track, + const uint offs_x = 0, + const uint offs_y = 0); + +/** + * A full backward pass. + * + * Creates the gradients for the given gradient_image and the spheres. + */ +template +void backward( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + cudaStream_t stream); + +/** + * A debug backward pass. + * + * This is a function to debug the gradient calculation. It calculates the + * gradients for exactly one pixel (set with pos_x and pos_y) without averaging. + * + * *Uses only the first sphere for camera gradient calculation!* + */ +template +void backward_dbg( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + const uint& pos_x, + const uint& pos_y, + cudaStream_t stream); + +template +void nn( + const float* ref_ptr, + const float* tar_ptr, + const uint& k, + const uint& d, + const uint& n, + float* dist_ptr, + int32_t* inds_ptr, + cudaStream_t stream); + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.device.h new file mode 100644 index 0000000000000000000000000000000000000000..e1dfd55d0b1363c1d8d38709460e00a75efeef5a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.device.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_NORM_CAM_GRADIENTS_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_NORM_CAM_GRADIENTS_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +/** + * Normalize the camera gradients by the number of spheres that contributed. + */ +template +GLOBAL void norm_cam_gradients(Renderer renderer) { + GET_PARALLEL_IDX_1D(idx, 1); + CamGradInfo* cgi = reinterpret_cast(renderer.grad_cam_d); + *cgi = *cgi * FRCP(static_cast(*renderer.n_grad_contributions_d)); + END_PARALLEL_NORET(); +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..98e05a67e470237a9328d7a441e7b700a7ce675d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.instantiate.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.norm_cam_gradients.device.h" + +namespace pulsar { +namespace Renderer { + +template GLOBAL void norm_cam_gradients(Renderer renderer); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_sphere_gradients.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_sphere_gradients.device.h new file mode 100644 index 0000000000000000000000000000000000000000..37e0eb00a5179911216a5d2827feb83ade487755 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_sphere_gradients.device.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_NORM_SPHERE_GRADIENTS_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_NORM_SPHERE_GRADIENTS_H_ + +#include "../global.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +/** + * Normalize the sphere gradients. + * + * We're assuming that the samples originate from a Monte Carlo + * sampling process and normalize by number and sphere area. + */ +template +GLOBAL void norm_sphere_gradients(Renderer renderer, const int num_balls) { + GET_PARALLEL_IDX_1D(idx, num_balls); + float norm_fac = 0.f; + IntersectInfo ii; + if (renderer.ids_sorted_d[idx] > 0) { + ii = renderer.ii_d[idx]; + // Normalize the sphere gradients as averages. + // This avoids the case that there are small spheres in a scene with still + // un-converged colors whereas the big spheres already converged, just + // because their integrated learning rate is 'higher'. + norm_fac = FRCP(static_cast(renderer.ids_sorted_d[idx])); + } + PULSAR_LOG_DEV_NODE( + PULSAR_LOG_NORMALIZE, + "ids_sorted_d[idx]: %d, norm_fac: %.9f.\n", + renderer.ids_sorted_d[idx], + norm_fac); + renderer.grad_rad_d[idx] *= norm_fac; + for (uint c_idx = 0; c_idx < renderer.cam.n_channels; ++c_idx) { + renderer.grad_col_d[idx * renderer.cam.n_channels + c_idx] *= norm_fac; + } + renderer.grad_pos_d[idx] *= norm_fac; + renderer.grad_opy_d[idx] *= norm_fac; + + if (renderer.ids_sorted_d[idx] > 0) { + // For the camera, we need to be more correct and have the gradients + // be proportional to the area they cover in the image. + // This leads to a formulation very much like in monte carlo integration: + norm_fac = FRCP(static_cast(renderer.ids_sorted_d[idx])) * + (static_cast(ii.max.x) - static_cast(ii.min.x)) * + (static_cast(ii.max.y) - static_cast(ii.min.y)) * + 1e-3f; // for better numerics. + } + renderer.grad_cam_buf_d[idx].cam_pos *= norm_fac; + renderer.grad_cam_buf_d[idx].pixel_0_0_center *= norm_fac; + renderer.grad_cam_buf_d[idx].pixel_dir_x *= norm_fac; + renderer.grad_cam_buf_d[idx].pixel_dir_y *= norm_fac; + // The sphere only contributes to the camera gradients if it is + // large enough in screen space. + if (renderer.ids_sorted_d[idx] > 0 && ii.max.x >= ii.min.x + 3 && + ii.max.y >= ii.min.y + 3) + renderer.ids_sorted_d[idx] = 1; + END_PARALLEL_NORET(); +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_sphere_gradients.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_sphere_gradients.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..bedcf81611cb20b2b404776f477cb3fe174608d2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_sphere_gradients.instantiate.h @@ -0,0 +1,19 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.norm_sphere_gradients.device.h" + +namespace pulsar { +namespace Renderer { + +template GLOBAL void norm_sphere_gradients( + Renderer renderer, + const int num_balls); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.render.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.render.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..9c1f326e63b8b4860137d9f0d0f440896adb2a88 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.render.instantiate.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_RENDER_INSTANTIATE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_RENDER_INSTANTIATE_H_ + +#include "./renderer.render.device.h" + +namespace pulsar { +namespace Renderer { +template GLOBAL void render( + size_t const* const RESTRICT + num_balls, /** Number of balls relevant for this pass. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + float const* const RESTRICT min_depth_d, /** Minimum depth per sphere. */ + int const* const RESTRICT id_d, /** IDs. */ + float const* const RESTRICT op_d, /** Opacity. */ + const CamInfo cam_norm, /** Camera normalized with all vectors to be in the + * camera coordinate system. + */ + const float gamma, /** Transparency parameter. **/ + const float percent_allowed_difference, /** Maximum allowed + error in color. */ + const uint max_n_hits, + const float* bg_col_d, + const uint mode, + const int x_min, + const int y_min, + const int x_step, + const int y_step, + // Out variables. + float* const RESTRICT result_d, /** The result image. */ + float* const RESTRICT forw_info_d, /** Additional information needed for the + grad computation. */ + const int n_track /** The number of spheres to track for backprop. */ +); +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/logging.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..63d472257671287156ccf77531c6897beff1fcd2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/logging.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_LOGGING_H_ +#define PULSAR_LOGGING_H_ + +// #define PULSAR_LOGGING_ENABLED +/** + * Enable detailed per-operation timings. + * + * This timing scheme is not appropriate to measure batched calculations. + * Use `PULSAR_TIMINGS_BATCHED_ENABLED` for that. + */ +// #define PULSAR_TIMINGS_ENABLED +/** + * Time batched operations. + */ +// #define PULSAR_TIMINGS_BATCHED_ENABLED +#if defined(PULSAR_TIMINGS_BATCHED_ENABLED) && defined(PULSAR_TIMINGS_ENABLED) +#pragma message("Pulsar|batched and unbatched timings enabled. This will not") +#pragma message("Pulsar|create meaningful results.") +#endif + +#ifdef PULSAR_LOGGING_ENABLED + +// Control logging. +// 0: INFO, 1: WARNING, 2: ERROR, 3: FATAL (Abort after logging). +#define CAFFE2_LOG_THRESHOLD 0 +#define PULSAR_LOG_INIT false +#define PULSAR_LOG_FORWARD false +#define PULSAR_LOG_CALC_SIGNATURE false +#define PULSAR_LOG_RENDER false +#define PULSAR_LOG_RENDER_PIX false +#define PULSAR_LOG_RENDER_PIX_X 428 +#define PULSAR_LOG_RENDER_PIX_Y 669 +#define PULSAR_LOG_RENDER_PIX_ALL false +#define PULSAR_LOG_TRACKER_PIX false +#define PULSAR_LOG_TRACKER_PIX_X 428 +#define PULSAR_LOG_TRACKER_PIX_Y 669 +#define PULSAR_LOG_TRACKER_PIX_ALL false +#define PULSAR_LOG_DRAW_PIX false +#define PULSAR_LOG_DRAW_PIX_X 428 +#define PULSAR_LOG_DRAW_PIX_Y 669 +#define PULSAR_LOG_DRAW_PIX_ALL false +#define PULSAR_LOG_BACKWARD false +#define PULSAR_LOG_GRAD false +#define PULSAR_LOG_GRAD_X 509 +#define PULSAR_LOG_GRAD_Y 489 +#define PULSAR_LOG_GRAD_ALL false +#define PULSAR_LOG_NORMALIZE false +#define PULSAR_LOG_NORMALIZE_X 0 +#define PULSAR_LOG_NORMALIZE_ALL false + +#define PULSAR_LOG_DEV(ID, ...) \ + if ((ID)) { \ + printf(__VA_ARGS__); \ + } +#define PULSAR_LOG_DEV_APIX(ID, MSG, ...) \ + if ((ID) && (film_coord_x == (ID##_X) && film_coord_y == (ID##_Y)) || \ + ID##_ALL) { \ + printf( \ + "%u %u (ap %u %u)|" MSG, \ + film_coord_x, \ + film_coord_y, \ + ap_coord_x, \ + ap_coord_y, \ + __VA_ARGS__); \ + } +#define PULSAR_LOG_DEV_PIX(ID, MSG, ...) \ + if ((ID) && (coord_x == (ID##_X) && coord_y == (ID##_Y)) || ID##_ALL) { \ + printf("%u %u|" MSG, coord_x, coord_y, __VA_ARGS__); \ + } +#ifdef __CUDACC__ +#define PULSAR_LOG_DEV_PIXB(ID, MSG, ...) \ + if ((ID) && static_cast(block_area.min.x) <= (ID##_X) && \ + static_cast(block_area.max.x) > (ID##_X) && \ + static_cast(block_area.min.y) <= (ID##_Y) && \ + static_cast(block_area.max.y) > (ID##_Y)) { \ + printf("%u %u|" MSG, coord_x, coord_y, __VA_ARGS__); \ + } +#else +#define PULSAR_LOG_DEV_PIXB(ID, MSG, ...) \ + if ((ID) && coord_x == (ID##_X) && coord_y == (ID##_Y)) { \ + printf("%u %u|" MSG, coord_x, coord_y, __VA_ARGS__); \ + } +#endif +#define PULSAR_LOG_DEV_NODE(ID, MSG, ...) \ + if ((ID) && idx == (ID##_X) || (ID##_ALL)) { \ + printf("%u|" MSG, idx, __VA_ARGS__); \ + } + +#else + +#define CAFFE2_LOG_THRESHOLD 2 + +#define PULSAR_LOG_RENDER false +#define PULSAR_LOG_INIT false +#define PULSAR_LOG_FORWARD false +#define PULSAR_LOG_BACKWARD false +#define PULSAR_LOG_TRACKER_PIX false + +#define PULSAR_LOG_DEV(...) +#define PULSAR_LOG_DEV_APIX(...) +#define PULSAR_LOG_DEV_PIX(...) +#define PULSAR_LOG_DEV_PIXB(...) +#define PULSAR_LOG_DEV_NODE(...) + +#endif + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c3794e7edf90f4af50632ea91bc131bd87fd751f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.cpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./camera.h" +#include "../include/math.h" + +namespace pulsar { +namespace pytorch { + +CamInfo cam_info_from_params( + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& principal_point_offset, + const float& focal_length, + const uint& width, + const uint& height, + const float& min_dist, + const float& max_dist, + const bool& right_handed) { + CamInfo res; + fill_cam_vecs( + cam_pos.detach().cpu(), + pixel_0_0_center.detach().cpu(), + pixel_vec_x.detach().cpu(), + pixel_vec_y.detach().cpu(), + principal_point_offset.detach().cpu(), + right_handed, + &res); + res.half_pixel_size = 0.5f * length(res.pixel_dir_x); + if (length(res.pixel_dir_y) * 0.5f - res.half_pixel_size > EPS) { + throw std::runtime_error("Pixel sizes must agree in x and y direction!"); + } + res.focal_length = focal_length; + res.aperture_width = + width + 2u * static_cast(abs(res.principal_point_offset_x)); + res.aperture_height = + height + 2u * static_cast(abs(res.principal_point_offset_y)); + res.pixel_0_0_center -= + res.pixel_dir_x * static_cast(abs(res.principal_point_offset_x)); + res.pixel_0_0_center -= + res.pixel_dir_y * static_cast(abs(res.principal_point_offset_y)); + res.film_width = width; + res.film_height = height; + res.film_border_left = + static_cast(std::max(0, 2 * res.principal_point_offset_x)); + res.film_border_top = + static_cast(std::max(0, 2 * res.principal_point_offset_y)); + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Aperture width, height: " << res.aperture_width << ", " + << res.aperture_height; + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Film width, height: " << res.film_width << ", " << res.film_height; + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Film border left, top: " << res.film_border_left << ", " + << res.film_border_top; + res.min_dist = min_dist; + res.max_dist = max_dist; + res.norm_fac = 1.f / (max_dist - min_dist); + return res; +}; + +} // namespace pytorch +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.h new file mode 100644 index 0000000000000000000000000000000000000000..9ecd95353ad76efd2760a4a634493917fda7b468 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_CAMERA_H_ +#define PULSAR_NATIVE_CAMERA_H_ + +#include +#include "../global.h" + +#include "../include/camera.h" + +namespace pulsar { +namespace pytorch { + +inline void fill_cam_vecs( + const torch::Tensor& pos_vec, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_dir_x, + const torch::Tensor& pixel_dir_y, + const torch::Tensor& principal_point_offset, + const bool& right_handed, + CamInfo* res) { + res->eye.x = pos_vec.data_ptr()[0]; + res->eye.y = pos_vec.data_ptr()[1]; + res->eye.z = pos_vec.data_ptr()[2]; + res->pixel_0_0_center.x = pixel_0_0_center.data_ptr()[0]; + res->pixel_0_0_center.y = pixel_0_0_center.data_ptr()[1]; + res->pixel_0_0_center.z = pixel_0_0_center.data_ptr()[2]; + res->pixel_dir_x.x = pixel_dir_x.data_ptr()[0]; + res->pixel_dir_x.y = pixel_dir_x.data_ptr()[1]; + res->pixel_dir_x.z = pixel_dir_x.data_ptr()[2]; + res->pixel_dir_y.x = pixel_dir_y.data_ptr()[0]; + res->pixel_dir_y.y = pixel_dir_y.data_ptr()[1]; + res->pixel_dir_y.z = pixel_dir_y.data_ptr()[2]; + auto sensor_dir_z = pixel_dir_y.cross(pixel_dir_x, -1); + sensor_dir_z /= sensor_dir_z.norm(); + if (right_handed) { + sensor_dir_z *= -1.f; + } + res->sensor_dir_z.x = sensor_dir_z.data_ptr()[0]; + res->sensor_dir_z.y = sensor_dir_z.data_ptr()[1]; + res->sensor_dir_z.z = sensor_dir_z.data_ptr()[2]; + res->principal_point_offset_x = principal_point_offset.data_ptr()[0]; + res->principal_point_offset_y = principal_point_offset.data_ptr()[1]; +} + +CamInfo cam_info_from_params( + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& principal_point_offset, + const float& focal_length, + const uint& width, + const uint& height, + const float& min_dist, + const float& max_dist, + const bool& right_handed); + +} // namespace pytorch +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4349aea796cfea0a63a5f76f7669816993fe3f2b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.cpp @@ -0,0 +1,1599 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.h" +#include "../include/commands.h" +#include "./camera.h" +#include "./util.h" + +#include +#ifdef WITH_CUDA +#include +#include +#endif + +#ifndef TORCH_CHECK_ARG +// torch <= 1.10 +#define TORCH_CHECK_ARG(cond, argN, ...) \ + TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__) +#endif + +namespace PRE = ::pulsar::Renderer; + +namespace pulsar { +namespace pytorch { + +Renderer::Renderer( + const unsigned int& width, + const unsigned int& height, + const unsigned int& max_n_balls, + const bool& orthogonal_projection, + const bool& right_handed_system, + const float& background_normalization_depth, + const uint& n_channels, + const uint& n_track) { + LOG_IF(INFO, PULSAR_LOG_INIT) << "Initializing renderer."; + TORCH_CHECK_ARG(width > 0, 1, "image width must be > 0!"); + TORCH_CHECK_ARG(height > 0, 2, "image height must be > 0!"); + TORCH_CHECK_ARG(max_n_balls > 0, 3, "max_n_balls must be > 0!"); + TORCH_CHECK_ARG( + background_normalization_depth > 0.f && + background_normalization_depth < 1.f, + 5, + "background_normalization_depth must be in ]0., 1.["); + TORCH_CHECK_ARG(n_channels > 0, 6, "n_channels must be > 0"); + TORCH_CHECK_ARG( + n_track > 0 && n_track <= MAX_GRAD_SPHERES, + 7, + ("n_track must be > 0 and <" + std::to_string(MAX_GRAD_SPHERES) + + ". Is " + std::to_string(n_track) + ".") + .c_str()); + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Image width: " << width << ", height: " << height; + this->renderer_vec.emplace_back(); + this->device_type = c10::DeviceType::CPU; + this->device_index = -1; + PRE::construct( + this->renderer_vec.data(), + max_n_balls, + width, + height, + orthogonal_projection, + right_handed_system, + background_normalization_depth, + n_channels, + n_track); + this->device_tracker = torch::zeros(1); +}; + +Renderer::~Renderer() { + if (this->device_type == c10::DeviceType::CUDA) { +// Can't happen in the case that not compiled with CUDA. +#ifdef WITH_CUDA + at::cuda::CUDAGuard device_guard(this->device_tracker.device()); + for (auto nrend : this->renderer_vec) { + PRE::destruct(&nrend); + } +#endif + } else { + for (auto nrend : this->renderer_vec) { + PRE::destruct(&nrend); + } + } +} + +bool Renderer::operator==(const Renderer& rhs) const { + LOG_IF(INFO, PULSAR_LOG_INIT) << "Equality check."; + bool renderer_agrees = (this->renderer_vec[0] == rhs.renderer_vec[0]); + LOG_IF(INFO, PULSAR_LOG_INIT) << " Renderer agrees: " << renderer_agrees; + bool device_agrees = + (this->device_tracker.device() == rhs.device_tracker.device()); + LOG_IF(INFO, PULSAR_LOG_INIT) << " Device agrees: " << device_agrees; + return (renderer_agrees && device_agrees); +}; + +void Renderer::ensure_on_device(torch::Device device, bool /*non_blocking*/) { + TORCH_CHECK_ARG( + device.type() == c10::DeviceType::CUDA || + device.type() == c10::DeviceType::CPU, + 1, + "Only CPU and CUDA device types are supported."); + if (device.type() != this->device_type || + device.index() != this->device_index) { +#ifdef WITH_CUDA + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Transferring render buffers between devices."; + int prev_active; + cudaGetDevice(&prev_active); + if (this->device_type == c10::DeviceType::CUDA) { + LOG_IF(INFO, PULSAR_LOG_INIT) << " Destructing on CUDA."; + cudaSetDevice(this->device_index); + for (auto& nrend : this->renderer_vec) { + PRE::destruct(&nrend); + } + } else { + LOG_IF(INFO, PULSAR_LOG_INIT) << " Destructing on CPU."; + for (auto& nrend : this->renderer_vec) { + PRE::destruct(&nrend); + } + } + if (device.type() == c10::DeviceType::CUDA) { + LOG_IF(INFO, PULSAR_LOG_INIT) << " Constructing on CUDA."; + cudaSetDevice(device.index()); + for (auto& nrend : this->renderer_vec) { + PRE::construct( + &nrend, + this->renderer_vec[0].max_num_balls, + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + this->renderer_vec[0].cam.orthogonal_projection, + this->renderer_vec[0].cam.right_handed, + this->renderer_vec[0].cam.background_normalization_depth, + this->renderer_vec[0].cam.n_channels, + this->n_track()); + } + } else { + LOG_IF(INFO, PULSAR_LOG_INIT) << " Constructing on CPU."; + for (auto& nrend : this->renderer_vec) { + PRE::construct( + &nrend, + this->renderer_vec[0].max_num_balls, + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + this->renderer_vec[0].cam.orthogonal_projection, + this->renderer_vec[0].cam.right_handed, + this->renderer_vec[0].cam.background_normalization_depth, + this->renderer_vec[0].cam.n_channels, + this->n_track()); + } + } + cudaSetDevice(prev_active); + this->device_type = device.type(); + this->device_index = device.index(); +#else + throw std::runtime_error( + "pulsar was built without CUDA " + "but a device move to a CUDA device was initiated."); +#endif + } +}; + +void Renderer::ensure_n_renderers_gte(const size_t& batch_size) { + if (this->renderer_vec.size() < batch_size) { + ptrdiff_t diff = batch_size - this->renderer_vec.size(); + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Increasing render buffers by " << diff + << " to account for batch size " << batch_size; + for (ptrdiff_t i = 0; i < diff; ++i) { + this->renderer_vec.emplace_back(); + if (this->device_type == c10::DeviceType::CUDA) { +#ifdef WITH_CUDA + PRE::construct( + &this->renderer_vec[this->renderer_vec.size() - 1], + this->max_num_balls(), + this->width(), + this->height(), + this->renderer_vec[0].cam.orthogonal_projection, + this->renderer_vec[0].cam.right_handed, + this->renderer_vec[0].cam.background_normalization_depth, + this->renderer_vec[0].cam.n_channels, + this->n_track()); +#endif + } else { + PRE::construct( + &this->renderer_vec[this->renderer_vec.size() - 1], + this->max_num_balls(), + this->width(), + this->height(), + this->renderer_vec[0].cam.orthogonal_projection, + this->renderer_vec[0].cam.right_handed, + this->renderer_vec[0].cam.background_normalization_depth, + this->renderer_vec[0].cam.n_channels, + this->n_track()); + } + } + } +} + +std::tuple Renderer::arg_check( + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float& min_depth, + const c10::optional& bg_col, + const c10::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode) { + LOG_IF(INFO, PULSAR_LOG_FORWARD || PULSAR_LOG_BACKWARD) << "Arg check."; + size_t batch_size = 1; + size_t n_points; + bool batch_processing = false; + if (vert_pos.ndimension() == 3) { + // Check all parameters adhere batch size. + batch_processing = true; + batch_size = vert_pos.size(0); + TORCH_CHECK_ARG( + vert_col.ndimension() == 3 && + vert_col.size(0) == static_cast(batch_size), + 2, + "vert_col needs to have batch size."); + TORCH_CHECK_ARG( + vert_radii.ndimension() == 2 && + vert_radii.size(0) == static_cast(batch_size), + 3, + "vert_radii must be specified per batch."); + TORCH_CHECK_ARG( + cam_pos.ndimension() == 2 && + cam_pos.size(0) == static_cast(batch_size), + 4, + "cam_pos must be specified per batch and have the correct batch size."); + TORCH_CHECK_ARG( + pixel_0_0_center.ndimension() == 2 && + pixel_0_0_center.size(0) == static_cast(batch_size), + 5, + "pixel_0_0_center must be specified per batch."); + TORCH_CHECK_ARG( + pixel_vec_x.ndimension() == 2 && + pixel_vec_x.size(0) == static_cast(batch_size), + 6, + "pixel_vec_x must be specified per batch."); + TORCH_CHECK_ARG( + pixel_vec_y.ndimension() == 2 && + pixel_vec_y.size(0) == static_cast(batch_size), + 7, + "pixel_vec_y must be specified per batch."); + TORCH_CHECK_ARG( + focal_length.ndimension() == 1 && + focal_length.size(0) == static_cast(batch_size), + 8, + "focal_length must be specified per batch."); + TORCH_CHECK_ARG( + principal_point_offsets.ndimension() == 2 && + principal_point_offsets.size(0) == static_cast(batch_size), + 9, + "principal_point_offsets must be specified per batch."); + if (opacity.has_value()) { + TORCH_CHECK_ARG( + opacity.value().ndimension() == 2 && + opacity.value().size(0) == static_cast(batch_size), + 13, + "Opacity needs to be specified batch-wise."); + } + // Check all parameters are for a matching number of points. + n_points = vert_pos.size(1); + TORCH_CHECK_ARG( + vert_col.size(1) == static_cast(n_points), + 2, + ("The number of points for vertex positions (" + + std::to_string(n_points) + ") and vertex colors (" + + std::to_string(vert_col.size(1)) + ") doesn't agree.") + .c_str()); + TORCH_CHECK_ARG( + vert_radii.size(1) == static_cast(n_points), + 3, + ("The number of points for vertex positions (" + + std::to_string(n_points) + ") and vertex radii (" + + std::to_string(vert_col.size(1)) + ") doesn't agree.") + .c_str()); + if (opacity.has_value()) { + TORCH_CHECK_ARG( + opacity.value().size(1) == static_cast(n_points), + 13, + "Opacity needs to be specified per point."); + } + // Check all parameters have the correct last dimension size. + TORCH_CHECK_ARG( + vert_pos.size(2) == 3, + 1, + ("Vertex positions must be 3D (have shape " + + std::to_string(vert_pos.size(2)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + vert_col.size(2) == this->renderer_vec[0].cam.n_channels, + 2, + ("Vertex colors must have the right number of channels (have shape " + + std::to_string(vert_col.size(2)) + ", need " + + std::to_string(this->renderer_vec[0].cam.n_channels) + ")!") + .c_str()); + TORCH_CHECK_ARG( + cam_pos.size(1) == 3, + 4, + ("Camera position must be 3D (has shape " + + std::to_string(cam_pos.size(1)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_0_0_center.size(1) == 3, + 5, + ("pixel_0_0_center must be 3D (has shape " + + std::to_string(pixel_0_0_center.size(1)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_vec_x.size(1) == 3, + 6, + ("pixel_vec_x must be 3D (has shape " + + std::to_string(pixel_vec_x.size(1)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_vec_y.size(1) == 3, + 7, + ("pixel_vec_y must be 3D (has shape " + + std::to_string(pixel_vec_y.size(1)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + principal_point_offsets.size(1) == 2, + 9, + "principal_point_offsets must contain x and y offsets."); + // Ensure enough renderers are available for the batch. + ensure_n_renderers_gte(batch_size); + } else { + // Check all parameters are of correct dimension. + TORCH_CHECK_ARG( + vert_col.ndimension() == 2, 2, "vert_col needs to have dimension 2."); + TORCH_CHECK_ARG( + vert_radii.ndimension() == 1, 3, "vert_radii must have dimension 1."); + TORCH_CHECK_ARG( + cam_pos.ndimension() == 1, 4, "cam_pos must have dimension 1."); + TORCH_CHECK_ARG( + pixel_0_0_center.ndimension() == 1, + 5, + "pixel_0_0_center must have dimension 1."); + TORCH_CHECK_ARG( + pixel_vec_x.ndimension() == 1, 6, "pixel_vec_x must have dimension 1."); + TORCH_CHECK_ARG( + pixel_vec_y.ndimension() == 1, 7, "pixel_vec_y must have dimension 1."); + TORCH_CHECK_ARG( + focal_length.ndimension() == 0, + 8, + "focal_length must have dimension 0."); + TORCH_CHECK_ARG( + principal_point_offsets.ndimension() == 1, + 9, + "principal_point_offsets must have dimension 1."); + if (opacity.has_value()) { + TORCH_CHECK_ARG( + opacity.value().ndimension() == 1, + 13, + "Opacity needs to be specified per sample."); + } + // Check each. + n_points = vert_pos.size(0); + TORCH_CHECK_ARG( + vert_col.size(0) == static_cast(n_points), + 2, + ("The number of points for vertex positions (" + + std::to_string(n_points) + ") and vertex colors (" + + std::to_string(vert_col.size(0)) + ") doesn't agree.") + .c_str()); + TORCH_CHECK_ARG( + vert_radii.size(0) == static_cast(n_points), + 3, + ("The number of points for vertex positions (" + + std::to_string(n_points) + ") and vertex radii (" + + std::to_string(vert_col.size(0)) + ") doesn't agree.") + .c_str()); + if (opacity.has_value()) { + TORCH_CHECK_ARG( + opacity.value().size(0) == static_cast(n_points), + 12, + "Opacity needs to be specified per point."); + } + // Check all parameters have the correct last dimension size. + TORCH_CHECK_ARG( + vert_pos.size(1) == 3, + 1, + ("Vertex positions must be 3D (have shape " + + std::to_string(vert_pos.size(1)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + vert_col.size(1) == this->renderer_vec[0].cam.n_channels, + 2, + ("Vertex colors must have the right number of channels (have shape " + + std::to_string(vert_col.size(1)) + ", need " + + std::to_string(this->renderer_vec[0].cam.n_channels) + ")!") + .c_str()); + TORCH_CHECK_ARG( + cam_pos.size(0) == 3, + 4, + ("Camera position must be 3D (has shape " + + std::to_string(cam_pos.size(0)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_0_0_center.size(0) == 3, + 5, + ("pixel_0_0_center must be 3D (has shape " + + std::to_string(pixel_0_0_center.size(0)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_vec_x.size(0) == 3, + 6, + ("pixel_vec_x must be 3D (has shape " + + std::to_string(pixel_vec_x.size(0)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_vec_y.size(0) == 3, + 7, + ("pixel_vec_y must be 3D (has shape " + + std::to_string(pixel_vec_y.size(0)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + principal_point_offsets.size(0) == 2, + 9, + "principal_point_offsets must have x and y component."); + } + // Check device placement. + auto dev = torch::device_of(vert_pos).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 1, + ("Vertex positions must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(vert_col).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 2, + ("Vertex colors must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(vert_radii).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 3, + ("Vertex radii must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(cam_pos).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 4, + ("Camera position must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(pixel_0_0_center).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 5, + ("pixel_0_0_center must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(pixel_vec_x).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 6, + ("pixel_vec_x must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(pixel_vec_y).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 7, + ("pixel_vec_y must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(principal_point_offsets).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 9, + ("principal_point_offsets must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + if (opacity.has_value()) { + dev = torch::device_of(opacity.value()).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 13, + ("opacity must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Is stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + } + // Type checks. + TORCH_CHECK_ARG( + vert_pos.scalar_type() == c10::kFloat, 1, "pulsar requires float types."); + TORCH_CHECK_ARG( + vert_col.scalar_type() == c10::kFloat, 2, "pulsar requires float types."); + TORCH_CHECK_ARG( + vert_radii.scalar_type() == c10::kFloat, + 3, + "pulsar requires float types."); + TORCH_CHECK_ARG( + cam_pos.scalar_type() == c10::kFloat, 4, "pulsar requires float types."); + TORCH_CHECK_ARG( + pixel_0_0_center.scalar_type() == c10::kFloat, + 5, + "pulsar requires float types."); + TORCH_CHECK_ARG( + pixel_vec_x.scalar_type() == c10::kFloat, + 6, + "pulsar requires float types."); + TORCH_CHECK_ARG( + pixel_vec_y.scalar_type() == c10::kFloat, + 7, + "pulsar requires float types."); + TORCH_CHECK_ARG( + focal_length.scalar_type() == c10::kFloat, + 8, + "pulsar requires float types."); + TORCH_CHECK_ARG( + // Unfortunately, the PyTorch interface is inconsistent for + // Int32: in Python, there exists an explicit int32 type, in + // C++ this is currently `c10::kInt`. + principal_point_offsets.scalar_type() == c10::kInt, + 9, + "principal_point_offsets must be provided as int32."); + if (opacity.has_value()) { + TORCH_CHECK_ARG( + opacity.value().scalar_type() == c10::kFloat, + 13, + "opacity must be a float type."); + } + // Content checks. + TORCH_CHECK_ARG( + (vert_radii > FEPS).all().item(), + 3, + ("Vertex radii must be > FEPS (min is " + + std::to_string(vert_radii.min().item()) + ").") + .c_str()); + if (this->orthogonal()) { + TORCH_CHECK_ARG( + (focal_length == 0.f).all().item(), + 8, + ("for an orthogonal projection focal length must be zero (abs max: " + + std::to_string(focal_length.abs().max().item()) + ").") + .c_str()); + } else { + TORCH_CHECK_ARG( + (focal_length > FEPS).all().item(), + 8, + ("for a perspective projection focal length must be > FEPS (min " + + std::to_string(focal_length.min().item()) + ").") + .c_str()); + } + TORCH_CHECK_ARG( + gamma <= 1.f && gamma >= 1E-5f, + 10, + ("gamma must be in [1E-5, 1] (" + std::to_string(gamma) + ").").c_str()); + if (min_depth == 0.f) { + min_depth = focal_length.max().item() + 2.f * FEPS; + } + TORCH_CHECK_ARG( + min_depth > focal_length.max().item(), + 12, + ("min_depth must be > focal_length (" + std::to_string(min_depth) + + " vs. " + std::to_string(focal_length.max().item()) + ").") + .c_str()); + TORCH_CHECK_ARG( + max_depth > min_depth + FEPS, + 11, + ("max_depth must be > min_depth + FEPS (" + std::to_string(max_depth) + + " vs. " + std::to_string(min_depth + FEPS) + ").") + .c_str()); + TORCH_CHECK_ARG( + percent_allowed_difference >= 0.f && percent_allowed_difference < 1.f, + 14, + ("percent_allowed_difference must be in [0., 1.[ (" + + std::to_string(percent_allowed_difference) + ").") + .c_str()); + TORCH_CHECK_ARG(max_n_hits > 0, 14, "max_n_hits must be > 0!"); + TORCH_CHECK_ARG(mode < 2, 15, "mode must be in {0, 1}."); + torch::Tensor real_bg_col; + if (bg_col.has_value()) { + TORCH_CHECK_ARG( + bg_col.value().device().type() == this->device_type && + bg_col.value().device().index() == this->device_index, + 13, + "bg_col must be stored on the renderer device!"); + TORCH_CHECK_ARG( + bg_col.value().ndimension() == 1 && + bg_col.value().size(0) == renderer_vec[0].cam.n_channels, + 13, + "bg_col must have the same number of channels as the image,)."); + real_bg_col = bg_col.value(); + } else { + real_bg_col = torch::ones( + {renderer_vec[0].cam.n_channels}, + c10::Device(this->device_type, this->device_index)) + .to(c10::kFloat); + } + if (opacity.has_value()) { + TORCH_CHECK_ARG( + (opacity.value() >= 0.f).all().item(), + 13, + "opacity must be >= 0."); + TORCH_CHECK_ARG( + (opacity.value() <= 1.f).all().item(), + 13, + "opacity must be <= 1."); + } + LOG_IF(INFO, PULSAR_LOG_FORWARD || PULSAR_LOG_BACKWARD) + << " batch_size: " << batch_size; + LOG_IF(INFO, PULSAR_LOG_FORWARD || PULSAR_LOG_BACKWARD) + << " n_points: " << n_points; + LOG_IF(INFO, PULSAR_LOG_FORWARD || PULSAR_LOG_BACKWARD) + << " batch_processing: " << batch_processing; + return std::tuple( + batch_size, n_points, batch_processing, real_bg_col); +} + +std::tuple Renderer::forward( + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float min_depth, + const c10::optional& bg_col, + const c10::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode) { + // Parameter checks. + this->ensure_on_device(this->device_tracker.device()); + size_t batch_size; + size_t n_points; + bool batch_processing; + torch::Tensor real_bg_col; + std::tie(batch_size, n_points, batch_processing, real_bg_col) = + this->arg_check( + vert_pos, + vert_col, + vert_radii, + cam_pos, + pixel_0_0_center, + pixel_vec_x, + pixel_vec_y, + focal_length, + principal_point_offsets, + gamma, + max_depth, + min_depth, + bg_col, + opacity, + percent_allowed_difference, + max_n_hits, + mode); + LOG_IF(INFO, PULSAR_LOG_FORWARD) << "Extracting camera objects..."; + // Create the camera information. + std::vector cam_infos(batch_size); + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + cam_infos[batch_i] = cam_info_from_params( + cam_pos[batch_i], + pixel_0_0_center[batch_i], + pixel_vec_x[batch_i], + pixel_vec_y[batch_i], + principal_point_offsets[batch_i], + focal_length[batch_i].item(), + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + min_depth, + max_depth, + this->renderer_vec[0].cam.right_handed); + } + } else { + cam_infos[0] = cam_info_from_params( + cam_pos, + pixel_0_0_center, + pixel_vec_x, + pixel_vec_y, + principal_point_offsets, + focal_length.item(), + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + min_depth, + max_depth, + this->renderer_vec[0].cam.right_handed); + } + LOG_IF(INFO, PULSAR_LOG_FORWARD) << "Processing..."; + // Let's go! + // Contiguous version of opacity, if available. We need to create this object + // in scope to keep it alive. + torch::Tensor opacity_contiguous; + float const* opacity_ptr = nullptr; + if (opacity.has_value()) { + opacity_contiguous = opacity.value().contiguous(); + opacity_ptr = opacity_contiguous.data_ptr(); + } + if (this->device_type == c10::DeviceType::CUDA) { +// No else check necessary - if not compiled with CUDA +// we can't even reach this code (the renderer can't be +// moved to a CUDA device). +#ifdef WITH_CUDA + int prev_active; + cudaGetDevice(&prev_active); + cudaSetDevice(this->device_index); +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + START_TIME_CU(batch_forward); +#endif + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + // These calls are non-blocking and just kick off the computations. + PRE::forward( + &this->renderer_vec[batch_i], + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + real_bg_col.contiguous().data_ptr(), + opacity_ptr, + n_points, + mode, + at::cuda::getCurrentCUDAStream()); + } + } else { + PRE::forward( + this->renderer_vec.data(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + real_bg_col.contiguous().data_ptr(), + opacity_ptr, + n_points, + mode, + at::cuda::getCurrentCUDAStream()); + } +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + STOP_TIME_CU(batch_forward); + float time_ms; + GET_TIME_CU(batch_forward, &time_ms); + std::cout << "Forward render batched time per example: " + << time_ms / static_cast(batch_size) << "ms" << std::endl; +#endif + cudaSetDevice(prev_active); +#endif + } else { +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + START_TIME(batch_forward); +#endif + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + // These calls are non-blocking and just kick off the computations. + PRE::forward( + &this->renderer_vec[batch_i], + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + real_bg_col.contiguous().data_ptr(), + opacity_ptr, + n_points, + mode, + nullptr); + } + } else { + PRE::forward( + this->renderer_vec.data(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + real_bg_col.contiguous().data_ptr(), + opacity_ptr, + n_points, + mode, + nullptr); + } +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + STOP_TIME(batch_forward); + float time_ms; + GET_TIME(batch_forward, &time_ms); + std::cout << "Forward render batched time per example: " + << time_ms / static_cast(batch_size) << "ms" << std::endl; +#endif + } + LOG_IF(INFO, PULSAR_LOG_FORWARD) << "Extracting results..."; + // Create the results. + std::vector results(batch_size); + std::vector forw_infos(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + results[batch_i] = from_blob( + this->renderer_vec[batch_i].result_d, + {this->renderer_vec[0].cam.film_height, + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.n_channels}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + if (mode == 1) + results[batch_i] = results[batch_i].slice(2, 0, 1, 1); + forw_infos[batch_i] = from_blob( + this->renderer_vec[batch_i].forw_info_d, + {this->renderer_vec[0].cam.film_height, + this->renderer_vec[0].cam.film_width, + 3 + 2 * this->n_track()}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + LOG_IF(INFO, PULSAR_LOG_FORWARD) << "Forward render complete."; + if (batch_processing) { + return std::tuple( + torch::stack(results), torch::stack(forw_infos)); + } else { + return std::tuple(results[0], forw_infos[0]); + } +}; + +std::tuple< + at::optional, + at::optional, + at::optional, + at::optional, + at::optional, + at::optional, + at::optional, + at::optional> +Renderer::backward( + const torch::Tensor& grad_im, + const torch::Tensor& image, + const torch::Tensor& forw_info, + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float min_depth, + const c10::optional& bg_col, + const c10::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + const at::optional>& dbg_pos) { + this->ensure_on_device(this->device_tracker.device()); + size_t batch_size; + size_t n_points; + bool batch_processing; + torch::Tensor real_bg_col; + std::tie(batch_size, n_points, batch_processing, real_bg_col) = + this->arg_check( + vert_pos, + vert_col, + vert_radii, + cam_pos, + pixel_0_0_center, + pixel_vec_x, + pixel_vec_y, + focal_length, + principal_point_offsets, + gamma, + max_depth, + min_depth, + bg_col, + opacity, + percent_allowed_difference, + max_n_hits, + mode); + // Additional checks for the gradient computation. + TORCH_CHECK_ARG( + (grad_im.ndimension() == 3 + batch_processing && + static_cast(grad_im.size(0 + batch_processing)) == + this->height() && + static_cast(grad_im.size(1 + batch_processing)) == this->width() && + static_cast(grad_im.size(2 + batch_processing)) == + this->renderer_vec[0].cam.n_channels), + 1, + "The gradient image size is not correct."); + TORCH_CHECK_ARG( + (image.ndimension() == 3 + batch_processing && + static_cast(image.size(0 + batch_processing)) == this->height() && + static_cast(image.size(1 + batch_processing)) == this->width() && + static_cast(image.size(2 + batch_processing)) == + this->renderer_vec[0].cam.n_channels), + 2, + "The result image size is not correct."); + TORCH_CHECK_ARG( + grad_im.scalar_type() == c10::kFloat, + 1, + "The gradient image must be of float type."); + TORCH_CHECK_ARG( + image.scalar_type() == c10::kFloat, + 2, + "The image must be of float type."); + if (dif_opy) { + TORCH_CHECK_ARG( + opacity.has_value(), 13, "dif_opy set requires opacity values."); + } + if (batch_processing) { + TORCH_CHECK_ARG( + grad_im.size(0) == static_cast(batch_size), + 1, + "Gradient image batch size must agree."); + TORCH_CHECK_ARG( + image.size(0) == static_cast(batch_size), + 2, + "Image batch size must agree."); + TORCH_CHECK_ARG( + forw_info.size(0) == static_cast(batch_size), + 3, + "forward info must have batch size."); + } + TORCH_CHECK_ARG( + (forw_info.ndimension() == 3 + batch_processing && + static_cast(forw_info.size(0 + batch_processing)) == + this->height() && + static_cast(forw_info.size(1 + batch_processing)) == + this->width() && + static_cast(forw_info.size(2 + batch_processing)) == + 3 + 2 * this->n_track()), + 3, + "The forward info image size is not correct."); + TORCH_CHECK_ARG( + forw_info.scalar_type() == c10::kFloat, + 3, + "The forward info must be of float type."); + // Check device. + auto dev = torch::device_of(grad_im).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 1, + ("grad_im must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(image).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 2, + ("image must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(forw_info).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 3, + ("forw_info must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + if (dbg_pos.has_value()) { + TORCH_CHECK_ARG( + dbg_pos.value().first < this->width() && + dbg_pos.value().second < this->height(), + 23, + "The debug position must be within image bounds."); + } + // Prepare the return value. + std::tuple< + at::optional, + at::optional, + at::optional, + at::optional, + at::optional, + at::optional, + at::optional, + at::optional> + ret; + if (mode == 1 || (!dif_pos && !dif_col && !dif_rad && !dif_cam && !dif_opy)) { + return ret; + } + // Create the camera information. + std::vector cam_infos(batch_size); + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + cam_infos[batch_i] = cam_info_from_params( + cam_pos[batch_i], + pixel_0_0_center[batch_i], + pixel_vec_x[batch_i], + pixel_vec_y[batch_i], + principal_point_offsets[batch_i], + focal_length[batch_i].item(), + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + min_depth, + max_depth, + this->renderer_vec[0].cam.right_handed); + } + } else { + cam_infos[0] = cam_info_from_params( + cam_pos, + pixel_0_0_center, + pixel_vec_x, + pixel_vec_y, + principal_point_offsets, + focal_length.item(), + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + min_depth, + max_depth, + this->renderer_vec[0].cam.right_handed); + } + // Let's go! + // Contiguous version of opacity, if available. We need to create this object + // in scope to keep it alive. + torch::Tensor opacity_contiguous; + float const* opacity_ptr = nullptr; + if (opacity.has_value()) { + opacity_contiguous = opacity.value().contiguous(); + opacity_ptr = opacity_contiguous.data_ptr(); + } + if (this->device_type == c10::DeviceType::CUDA) { +// No else check necessary - it's not possible to move +// the renderer to a CUDA device if not built with CUDA. +#ifdef WITH_CUDA + int prev_active; + cudaGetDevice(&prev_active); + cudaSetDevice(this->device_index); +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + START_TIME_CU(batch_backward); +#endif + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + // These calls are non-blocking and just kick off the computations. + if (dbg_pos.has_value()) { + PRE::backward_dbg( + &this->renderer_vec[batch_i], + grad_im[batch_i].contiguous().data_ptr(), + image[batch_i].contiguous().data_ptr(), + forw_info[batch_i].contiguous().data_ptr(), + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + dbg_pos.value().first, + dbg_pos.value().second, + at::cuda::getCurrentCUDAStream()); + } else { + PRE::backward( + &this->renderer_vec[batch_i], + grad_im[batch_i].contiguous().data_ptr(), + image[batch_i].contiguous().data_ptr(), + forw_info[batch_i].contiguous().data_ptr(), + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + at::cuda::getCurrentCUDAStream()); + } + } + } else { + if (dbg_pos.has_value()) { + PRE::backward_dbg( + this->renderer_vec.data(), + grad_im.contiguous().data_ptr(), + image.contiguous().data_ptr(), + forw_info.contiguous().data_ptr(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + dbg_pos.value().first, + dbg_pos.value().second, + at::cuda::getCurrentCUDAStream()); + } else { + PRE::backward( + this->renderer_vec.data(), + grad_im.contiguous().data_ptr(), + image.contiguous().data_ptr(), + forw_info.contiguous().data_ptr(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + at::cuda::getCurrentCUDAStream()); + } + } + cudaSetDevice(prev_active); +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + STOP_TIME_CU(batch_backward); + float time_ms; + GET_TIME_CU(batch_backward, &time_ms); + std::cout << "Backward render batched time per example: " + << time_ms / static_cast(batch_size) << "ms" << std::endl; +#endif +#endif // WITH_CUDA + } else { +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + START_TIME(batch_backward); +#endif + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + // These calls are non-blocking and just kick off the computations. + if (dbg_pos.has_value()) { + PRE::backward_dbg( + &this->renderer_vec[batch_i], + grad_im[batch_i].contiguous().data_ptr(), + image[batch_i].contiguous().data_ptr(), + forw_info[batch_i].contiguous().data_ptr(), + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + dbg_pos.value().first, + dbg_pos.value().second, + nullptr); + } else { + PRE::backward( + &this->renderer_vec[batch_i], + grad_im[batch_i].contiguous().data_ptr(), + image[batch_i].contiguous().data_ptr(), + forw_info[batch_i].contiguous().data_ptr(), + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + nullptr); + } + } + } else { + if (dbg_pos.has_value()) { + PRE::backward_dbg( + this->renderer_vec.data(), + grad_im.contiguous().data_ptr(), + image.contiguous().data_ptr(), + forw_info.contiguous().data_ptr(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + dbg_pos.value().first, + dbg_pos.value().second, + nullptr); + } else { + PRE::backward( + this->renderer_vec.data(), + grad_im.contiguous().data_ptr(), + image.contiguous().data_ptr(), + forw_info.contiguous().data_ptr(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + nullptr); + } + } +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + STOP_TIME(batch_backward); + float time_ms; + GET_TIME(batch_backward, &time_ms); + std::cout << "Backward render batched time per example: " + << time_ms / static_cast(batch_size) << "ms" << std::endl; +#endif + } + if (dif_pos) { + if (batch_processing) { + std::vector results(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + results[batch_i] = from_blob( + reinterpret_cast(this->renderer_vec[batch_i].grad_pos_d), + {static_cast(n_points), 3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + std::get<0>(ret) = torch::stack(results); + } else { + std::get<0>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_pos_d), + {static_cast(n_points), 3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + } + if (dif_col) { + if (batch_processing) { + std::vector results(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + results[batch_i] = from_blob( + reinterpret_cast(this->renderer_vec[batch_i].grad_col_d), + {static_cast(n_points), + this->renderer_vec[0].cam.n_channels}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + std::get<1>(ret) = torch::stack(results); + } else { + std::get<1>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_col_d), + {static_cast(n_points), + this->renderer_vec[0].cam.n_channels}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + } + if (dif_rad) { + if (batch_processing) { + std::vector results(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + results[batch_i] = from_blob( + reinterpret_cast(this->renderer_vec[batch_i].grad_rad_d), + {static_cast(n_points)}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + std::get<2>(ret) = torch::stack(results); + } else { + std::get<2>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_rad_d), + {static_cast(n_points)}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + } + if (dif_cam) { + if (batch_processing) { + std::vector res_p1(batch_size); + std::vector res_p2(batch_size); + std::vector res_p3(batch_size); + std::vector res_p4(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + res_p1[batch_i] = from_blob( + reinterpret_cast(this->renderer_vec[batch_i].grad_cam_d), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + res_p2[batch_i] = from_blob( + reinterpret_cast( + this->renderer_vec[batch_i].grad_cam_d + 3), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + res_p3[batch_i] = from_blob( + reinterpret_cast( + this->renderer_vec[batch_i].grad_cam_d + 6), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + res_p4[batch_i] = from_blob( + reinterpret_cast( + this->renderer_vec[batch_i].grad_cam_d + 9), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + std::get<3>(ret) = torch::stack(res_p1); + std::get<4>(ret) = torch::stack(res_p2); + std::get<5>(ret) = torch::stack(res_p3); + std::get<6>(ret) = torch::stack(res_p4); + } else { + std::get<3>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_cam_d), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + std::get<4>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_cam_d + 3), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + std::get<5>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_cam_d + 6), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + std::get<6>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_cam_d + 9), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + } + if (dif_opy) { + if (batch_processing) { + std::vector results(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + results[batch_i] = from_blob( + reinterpret_cast(this->renderer_vec[batch_i].grad_opy_d), + {static_cast(n_points)}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + std::get<7>(ret) = torch::stack(results); + } else { + std::get<7>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_opy_d), + {static_cast(n_points)}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + } + return ret; +}; + +} // namespace pytorch +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.h new file mode 100644 index 0000000000000000000000000000000000000000..2525ca3f3dd9036320401b2a0059a2d5b6b864a4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_PYTORCH_RENDERER_H_ +#define PULSAR_NATIVE_PYTORCH_RENDERER_H_ + +#include "../global.h" +#include "../include/renderer.h" + +namespace pulsar { +namespace pytorch { + +struct Renderer { + public: + /** + * Pytorch Pulsar differentiable rendering module. + */ + explicit Renderer( + const unsigned int& width, + const unsigned int& height, + const uint& max_n_balls, + const bool& orthogonal_projection, + const bool& right_handed_system, + const float& background_normalization_depth, + const uint& n_channels, + const uint& n_track); + ~Renderer(); + + std::tuple forward( + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float min_depth, + const c10::optional& bg_col, + const c10::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode); + + std::tuple< + at::optional, + at::optional, + at::optional, + at::optional, + at::optional, + at::optional, + at::optional, + at::optional> + backward( + const torch::Tensor& grad_im, + const torch::Tensor& image, + const torch::Tensor& forw_info, + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float min_depth, + const c10::optional& bg_col, + const c10::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + const at::optional>& dbg_pos); + + // Infrastructure. + /** + * Ensure that the renderer is placed on this device. + * Is nearly a no-op if the device is correct. + */ + void ensure_on_device(torch::Device device, bool non_blocking = false); + + /** + * Ensure that at least n renderers are available. + */ + void ensure_n_renderers_gte(const size_t& batch_size); + + /** + * Check the parameters. + */ + std::tuple arg_check( + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float& min_depth, + const c10::optional& bg_col, + const c10::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode); + + bool operator==(const Renderer& rhs) const; + inline friend std::ostream& operator<<( + std::ostream& stream, + const Renderer& self) { + stream << "pulsar::Renderer["; + // Device info. + stream << self.device_type; + if (self.device_index != -1) + stream << ", ID " << self.device_index; + stream << "]"; + return stream; + } + + inline uint width() const { + return this->renderer_vec[0].cam.film_width; + } + inline uint height() const { + return this->renderer_vec[0].cam.film_height; + } + inline int max_num_balls() const { + return this->renderer_vec[0].max_num_balls; + } + inline bool orthogonal() const { + return this->renderer_vec[0].cam.orthogonal_projection; + } + inline bool right_handed() const { + return this->renderer_vec[0].cam.right_handed; + } + inline uint n_track() const { + return static_cast(this->renderer_vec[0].n_track); + } + + /** A tensor that is registered as a buffer with this Module to track its + * device placement. Unfortunately, pytorch doesn't offer tracking Module + * device placement in a better way as of now. + */ + torch::Tensor device_tracker; + + protected: + /** The device type for this renderer. */ + c10::DeviceType device_type; + /** The device index for this renderer. */ + c10::DeviceIndex device_index; + /** Pointer to the underlying pulsar renderers. */ + std::vector renderer_vec; +}; + +} // namespace pytorch +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1dd41ed4e5ef40d2c78b3d71fb5dff7cdaa4e6c0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp @@ -0,0 +1,63 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifdef WITH_CUDA +#include +#include +#endif +#include + +#include "./tensor_util.h" + +namespace pulsar { +namespace pytorch { + +torch::Tensor sphere_ids_from_result_info_nograd( + const torch::Tensor& forw_info) { + torch::Tensor result = torch::zeros( + {forw_info.size(0), + forw_info.size(1), + forw_info.size(2), + (forw_info.size(3) - 3) / 2}, + torch::TensorOptions().device(forw_info.device()).dtype(torch::kInt32)); + // Get the relevant slice, contiguous. + torch::Tensor tmp = + forw_info + .slice( + /*dim=*/3, /*start=*/3, /*end=*/forw_info.size(3), /*step=*/2) + .contiguous(); + if (forw_info.device().type() == c10::DeviceType::CUDA) { +#ifdef WITH_CUDA + cudaMemcpyAsync( + result.data_ptr(), + tmp.data_ptr(), + sizeof(uint32_t) * tmp.size(0) * tmp.size(1) * tmp.size(2) * + tmp.size(3), + cudaMemcpyDeviceToDevice, + at::cuda::getCurrentCUDAStream()); +#else + throw std::runtime_error( + "Copy on CUDA device initiated but built " + "without CUDA support."); +#endif + } else { + memcpy( + result.data_ptr(), + tmp.data_ptr(), + sizeof(uint32_t) * tmp.size(0) * tmp.size(1) * tmp.size(2) * + tmp.size(3)); + } + // `tmp` is freed after this, the memory might get reallocated. However, + // only kernels in the same stream should ever be able to write to this + // memory, which are executed only after the memcpy is complete. That's + // why we can just continue. + return result; +} + +} // namespace pytorch +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.h new file mode 100644 index 0000000000000000000000000000000000000000..9f1d677cbfd4377f27224e05abc66085a06aa60c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_PYTORCH_TENSOR_UTIL_H_ +#define PULSAR_NATIVE_PYTORCH_TENSOR_UTIL_H_ + +#include + +namespace pulsar { +namespace pytorch { + +torch::Tensor sphere_ids_from_result_info_nograd( + const torch::Tensor& forw_info); + +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7d25b6e8504c765b816e7793419e9de63a7719dd --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifdef WITH_CUDA +#include + +namespace pulsar { +namespace pytorch { + +void cudaDevToDev( + void* trg, + const void* src, + const int& size, + const cudaStream_t& stream) { + cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToDevice, stream); +} + +void cudaDevToHost( + void* trg, + const void* src, + const int& size, + const cudaStream_t& stream) { + cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToHost, stream); +} + +} // namespace pytorch +} // namespace pulsar +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.h new file mode 100644 index 0000000000000000000000000000000000000000..be3dc80defbb78c6e65722a1dda5d70e288e73c7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_PYTORCH_UTIL_H_ +#define PULSAR_NATIVE_PYTORCH_UTIL_H_ + +#include +#include "../global.h" + +namespace pulsar { +namespace pytorch { + +void cudaDevToDev( + void* trg, + const void* src, + const int& size, + const cudaStream_t& stream); +void cudaDevToHost( + void* trg, + const void* src, + const int& size, + const cudaStream_t& stream); + +/** + * This method takes a memory pointer and wraps it into a pytorch tensor. + * + * This is preferred over `torch::from_blob`, since that requires a CUDA + * managed pointer. However, working with these for high performance + * operations is slower. Most of the rendering operations should stay + * local to the respective GPU anyways, so unmanaged pointers are + * preferred. + */ +template +torch::Tensor from_blob( + const T* ptr, + const torch::IntArrayRef& shape, + const c10::DeviceType& device_type, + const c10::DeviceIndex& device_index, + const torch::Dtype& dtype, + const cudaStream_t& stream) { + torch::Tensor ret = torch::zeros( + shape, torch::device({device_type, device_index}).dtype(dtype)); + const int num_elements = + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies{}); + if (device_type == c10::DeviceType::CUDA) { +#ifdef WITH_CUDA + cudaDevToDev( + ret.data_ptr(), + static_cast(ptr), + sizeof(T) * num_elements, + stream); +#else + throw std::runtime_error( + "Initiating devToDev copy on a build without CUDA."); +#endif + // TODO: check for synchronization. + } else { + memcpy(ret.data_ptr(), ptr, sizeof(T) * num_elements); + } + return ret; +}; + +} // namespace pytorch +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/warnings.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/warnings.cpp new file mode 100644 index 0000000000000000000000000000000000000000..54615ac1392db7788c643e93a40b4824b59ec102 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/pulsar/warnings.cpp @@ -0,0 +1,21 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./global.h" +#include "./logging.h" + +/** + * A compilation unit to provide warnings about the code and avoid + * repeated messages. + */ +#ifdef PULSAR_ASSERTIONS +#pragma message("WARNING: assertions are enabled in Pulsar.") +#endif +#ifdef PULSAR_LOGGING_ENABLED +#pragma message("WARNING: logging is enabled in Pulsar.") +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_coarse/bitmask.cuh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_coarse/bitmask.cuh new file mode 100644 index 0000000000000000000000000000000000000000..6ffcac87caa13f37a5ccb12b565d33450bc035c2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_coarse/bitmask.cuh @@ -0,0 +1,79 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#define BINMASK_H + +// A BitMask represents a bool array of shape (H, W, N). We pack values into +// the bits of unsigned ints; a single unsigned int has B = 32 bits, so to hold +// all values we use H * W * (N / B) = H * W * D values. We want to store +// BitMasks in shared memory, so we assume that the memory has already been +// allocated for it elsewhere. +class BitMask { + public: + __device__ BitMask(unsigned int* data, int H, int W, int N) + : data(data), H(H), W(W), B(8 * sizeof(unsigned int)), D(N / B) { + // TODO: check if the data is null. + N = ceilf(N % 32); // take ceil incase N % 32 != 0 + block_clear(); // clear the data + } + + // Use all threads in the current block to clear all bits of this BitMask + __device__ void block_clear() { + for (int i = threadIdx.x; i < H * W * D; i += blockDim.x) { + data[i] = 0; + } + __syncthreads(); + } + + __device__ int _get_elem_idx(int y, int x, int d) { + return y * W * D + x * D + d / B; + } + + __device__ int _get_bit_idx(int d) { + return d % B; + } + + // Turn on a single bit (y, x, d) + __device__ void set(int y, int x, int d) { + int elem_idx = _get_elem_idx(y, x, d); + int bit_idx = _get_bit_idx(d); + const unsigned int mask = 1U << bit_idx; + atomicOr(data + elem_idx, mask); + } + + // Turn off a single bit (y, x, d) + __device__ void unset(int y, int x, int d) { + int elem_idx = _get_elem_idx(y, x, d); + int bit_idx = _get_bit_idx(d); + const unsigned int mask = ~(1U << bit_idx); + atomicAnd(data + elem_idx, mask); + } + + // Check whether the bit (y, x, d) is on or off + __device__ bool get(int y, int x, int d) { + int elem_idx = _get_elem_idx(y, x, d); + int bit_idx = _get_bit_idx(d); + return (data[elem_idx] >> bit_idx) & 1U; + } + + // Compute the number of bits set in the row (y, x, :) + __device__ int count(int y, int x) { + int total = 0; + for (int i = 0; i < D; ++i) { + int elem_idx = y * W * D + x * D + i; + unsigned int elem = data[elem_idx]; + total += __popc(elem); + } + return total; + } + + private: + unsigned int* data; + int H, W, B, D; +}; diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.cu new file mode 100644 index 0000000000000000000000000000000000000000..aed57d21bf1e9c70a7bde543223960894a49ada2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.cu @@ -0,0 +1,388 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include "rasterize_coarse/bitmask.cuh" +#include "rasterize_points/rasterization_utils.cuh" +#include "utils/float_math.cuh" +#include "utils/geometry_utils.cuh" // For kEpsilon -- gross + +__global__ void TriangleBoundingBoxKernel( + const float* face_verts, // (F, 3, 3) + const int F, + const float blur_radius, + float* bboxes, // (4, F) + bool* skip_face) { // (F,) + const int tid = blockIdx.x * blockDim.x + threadIdx.x; + const int num_threads = blockDim.x * gridDim.x; + const float sqrt_radius = sqrt(blur_radius); + for (int f = tid; f < F; f += num_threads) { + const float v0x = face_verts[f * 9 + 0 * 3 + 0]; + const float v0y = face_verts[f * 9 + 0 * 3 + 1]; + const float v0z = face_verts[f * 9 + 0 * 3 + 2]; + const float v1x = face_verts[f * 9 + 1 * 3 + 0]; + const float v1y = face_verts[f * 9 + 1 * 3 + 1]; + const float v1z = face_verts[f * 9 + 1 * 3 + 2]; + const float v2x = face_verts[f * 9 + 2 * 3 + 0]; + const float v2y = face_verts[f * 9 + 2 * 3 + 1]; + const float v2z = face_verts[f * 9 + 2 * 3 + 2]; + const float xmin = FloatMin3(v0x, v1x, v2x) - sqrt_radius; + const float xmax = FloatMax3(v0x, v1x, v2x) + sqrt_radius; + const float ymin = FloatMin3(v0y, v1y, v2y) - sqrt_radius; + const float ymax = FloatMax3(v0y, v1y, v2y) + sqrt_radius; + const float zmin = FloatMin3(v0z, v1z, v2z); + const bool skip = zmin < kEpsilon; + bboxes[0 * F + f] = xmin; + bboxes[1 * F + f] = xmax; + bboxes[2 * F + f] = ymin; + bboxes[3 * F + f] = ymax; + skip_face[f] = skip; + } +} + +__global__ void PointBoundingBoxKernel( + const float* points, // (P, 3) + const float* radius, // (P,) + const int P, + float* bboxes, // (4, P) + bool* skip_points) { + const int tid = blockIdx.x * blockDim.x + threadIdx.x; + const int num_threads = blockDim.x * gridDim.x; + for (int p = tid; p < P; p += num_threads) { + const float x = points[p * 3 + 0]; + const float y = points[p * 3 + 1]; + const float z = points[p * 3 + 2]; + const float r = radius[p]; + // TODO: change to kEpsilon to match triangles? + const bool skip = z < 0; + bboxes[0 * P + p] = x - r; + bboxes[1 * P + p] = x + r; + bboxes[2 * P + p] = y - r; + bboxes[3 * P + p] = y + r; + skip_points[p] = skip; + } +} + +__global__ void RasterizeCoarseCudaKernel( + const float* bboxes, // (4, E) (xmin, xmax, ymin, ymax) + const bool* should_skip, // (E,) + const int64_t* elem_first_idxs, + const int64_t* elems_per_batch, + const int N, + const int E, + const int H, + const int W, + const int bin_size, + const int chunk_size, + const int max_elem_per_bin, + int* elems_per_bin, + int* bin_elems) { + extern __shared__ char sbuf[]; + const int M = max_elem_per_bin; + // Integer divide round up + const int num_bins_x = 1 + (W - 1) / bin_size; + const int num_bins_y = 1 + (H - 1) / bin_size; + + // NDC range depends on the ratio of W/H + // The shorter side from (H, W) is given an NDC range of 2.0 and + // the other side is scaled by the ratio of H:W. + const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f; + const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f; + + // Size of half a pixel in NDC units is the NDC half range + // divided by the corresponding image dimension + const float half_pix_x = NDC_x_half_range / W; + const float half_pix_y = NDC_y_half_range / H; + + // This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size) + // stored in shared memory that will track whether each elem in the chunk + // falls into each bin of the image. + BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size); + + // Have each block handle a chunk of elements + const int chunks_per_batch = 1 + (E - 1) / chunk_size; + const int num_chunks = N * chunks_per_batch; + + for (int chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { + const int batch_idx = chunk / chunks_per_batch; // batch index + const int chunk_idx = chunk % chunks_per_batch; + const int elem_chunk_start_idx = chunk_idx * chunk_size; + + binmask.block_clear(); + const int64_t elem_start_idx = elem_first_idxs[batch_idx]; + const int64_t elem_stop_idx = elem_start_idx + elems_per_batch[batch_idx]; + + // Have each thread handle a different face within the chunk + for (int e = threadIdx.x; e < chunk_size; e += blockDim.x) { + const int e_idx = elem_chunk_start_idx + e; + + // Check that we are still within the same element of the batch + if (e_idx >= elem_stop_idx || e_idx < elem_start_idx) { + continue; + } + + if (should_skip[e_idx]) { + continue; + } + const float xmin = bboxes[0 * E + e_idx]; + const float xmax = bboxes[1 * E + e_idx]; + const float ymin = bboxes[2 * E + e_idx]; + const float ymax = bboxes[3 * E + e_idx]; + + // Brute-force search over all bins; TODO(T54294966) something smarter. + for (int by = 0; by < num_bins_y; ++by) { + // Y coordinate of the top and bottom of the bin. + // PixToNdc gives the location of the center of each pixel, so we + // need to add/subtract a half pixel to get the true extent of the bin. + // Reverse ordering of Y axis so that +Y is upwards in the image. + const float bin_y_min = + PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y; + const float bin_y_max = + PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y; + const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax); + + for (int bx = 0; bx < num_bins_x; ++bx) { + // X coordinate of the left and right of the bin. + // Reverse ordering of x axis so that +X is left. + const float bin_x_max = + PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x; + const float bin_x_min = + PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x; + + const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax); + if (y_overlap && x_overlap) { + binmask.set(by, bx, e); + } + } + } + } + __syncthreads(); + // Now we have processed every elem in the current chunk. We need to + // count the number of elems in each bin so we can write the indices + // out to global memory. We have each thread handle a different bin. + for (int byx = threadIdx.x; byx < num_bins_y * num_bins_x; + byx += blockDim.x) { + const int by = byx / num_bins_x; + const int bx = byx % num_bins_x; + const int count = binmask.count(by, bx); + const int elems_per_bin_idx = + batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx; + + // This atomically increments the (global) number of elems found + // in the current bin, and gets the previous value of the counter; + // this effectively allocates space in the bin_faces array for the + // elems in the current chunk that fall into this bin. + const int start = atomicAdd(elems_per_bin + elems_per_bin_idx, count); + if (start + count > M) { + // The number of elems in this bin is so big that they won't fit. + // We print a warning using CUDA's printf. This may be invisible + // to notebook users, but apparent to others. It would be nice to + // also have a Python-friendly warning, but it is not obvious + // how to do this without slowing down the normal case. + const char* warning = + "Bin size was too small in the coarse rasterization phase. " + "This caused an overflow, meaning output may be incomplete. " + "To solve, " + "try increasing max_faces_per_bin / max_points_per_bin, " + "decreasing bin_size, " + "or setting bin_size to 0 to use the naive rasterization."; + printf(warning); + continue; + } + + // Now loop over the binmask and write the active bits for this bin + // out to bin_faces. + int next_idx = batch_idx * num_bins_y * num_bins_x * M + + by * num_bins_x * M + bx * M + start; + for (int e = 0; e < chunk_size; ++e) { + if (binmask.get(by, bx, e)) { + // TODO(T54296346) find the correct method for handling errors in + // CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin. + // Either decrease bin size or increase max_faces_per_bin + bin_elems[next_idx] = elem_chunk_start_idx + e; + next_idx++; + } + } + } + __syncthreads(); + } +} + +at::Tensor RasterizeCoarseCuda( + const at::Tensor& bboxes, + const at::Tensor& should_skip, + const at::Tensor& elem_first_idxs, + const at::Tensor& elems_per_batch, + const std::tuple image_size, + const int bin_size, + const int max_elems_per_bin) { + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(bboxes.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int H = std::get<0>(image_size); + const int W = std::get<1>(image_size); + + const int E = bboxes.size(1); + const int N = elems_per_batch.size(0); + const int M = max_elems_per_bin; + + // Integer divide round up + const int num_bins_y = 1 + (H - 1) / bin_size; + const int num_bins_x = 1 + (W - 1) / bin_size; + + if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) { + std::stringstream ss; + ss << "In RasterizeCoarseCuda got num_bins_y: " << num_bins_y + << ", num_bins_x: " << num_bins_x << ", " << "; that's too many!"; + AT_ERROR(ss.str()); + } + auto opts = elems_per_batch.options().dtype(at::kInt); + at::Tensor elems_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts); + at::Tensor bin_elems = at::full({N, num_bins_y, num_bins_x, M}, -1, opts); + + if (bin_elems.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return bin_elems; + } + + const int chunk_size = 512; + const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8; + const size_t blocks = 64; + const size_t threads = 512; + + RasterizeCoarseCudaKernel<<>>( + bboxes.contiguous().data_ptr(), + should_skip.contiguous().data_ptr(), + elem_first_idxs.contiguous().data_ptr(), + elems_per_batch.contiguous().data_ptr(), + N, + E, + H, + W, + bin_size, + chunk_size, + M, + elems_per_bin.data_ptr(), + bin_elems.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + return bin_elems; +} + +at::Tensor RasterizeMeshesCoarseCuda( + const at::Tensor& face_verts, + const at::Tensor& mesh_to_face_first_idx, + const at::Tensor& num_faces_per_mesh, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int max_faces_per_bin) { + TORCH_CHECK( + face_verts.ndimension() == 3 && face_verts.size(1) == 3 && + face_verts.size(2) == 3, + "face_verts must have dimensions (num_faces, 3, 3)"); + + // Check inputs are on the same device + at::TensorArg face_verts_t{face_verts, "face_verts", 1}, + mesh_to_face_first_idx_t{ + mesh_to_face_first_idx, "mesh_to_face_first_idx", 2}, + num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}; + at::CheckedFrom c = "RasterizeMeshesCoarseCuda"; + at::checkAllSameGPU( + c, {face_verts_t, mesh_to_face_first_idx_t, num_faces_per_mesh_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(face_verts.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Allocate tensors for bboxes and should_skip + const int F = face_verts.size(0); + auto float_opts = face_verts.options().dtype(at::kFloat); + auto bool_opts = face_verts.options().dtype(at::kBool); + at::Tensor bboxes = at::empty({4, F}, float_opts); + at::Tensor should_skip = at::empty({F}, bool_opts); + + // Launch kernel to compute triangle bboxes + const size_t blocks = 128; + const size_t threads = 256; + TriangleBoundingBoxKernel<<>>( + face_verts.contiguous().data_ptr(), + F, + blur_radius, + bboxes.contiguous().data_ptr(), + should_skip.contiguous().data_ptr()); + AT_CUDA_CHECK(cudaGetLastError()); + + return RasterizeCoarseCuda( + bboxes, + should_skip, + mesh_to_face_first_idx, + num_faces_per_mesh, + image_size, + bin_size, + max_faces_per_bin); +} + +at::Tensor RasterizePointsCoarseCuda( + const at::Tensor& points, // (P, 3) + const at::Tensor& cloud_to_packed_first_idx, // (N,) + const at::Tensor& num_points_per_cloud, // (N,) + const std::tuple image_size, + const at::Tensor& radius, + const int bin_size, + const int max_points_per_bin) { + TORCH_CHECK( + points.ndimension() == 2 && points.size(1) == 3, + "points must have dimensions (num_points, 3)"); + + // Check inputs are on the same device + at::TensorArg points_t{points, "points", 1}, + cloud_to_packed_first_idx_t{ + cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2}, + num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3}; + at::CheckedFrom c = "RasterizePointsCoarseCuda"; + at::checkAllSameGPU( + c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(points.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Allocate tensors for bboxes and should_skip + const int P = points.size(0); + auto float_opts = points.options().dtype(at::kFloat); + auto bool_opts = points.options().dtype(at::kBool); + at::Tensor bboxes = at::empty({4, P}, float_opts); + at::Tensor should_skip = at::empty({P}, bool_opts); + + // Launch kernel to compute point bboxes + const size_t blocks = 128; + const size_t threads = 256; + PointBoundingBoxKernel<<>>( + points.contiguous().data_ptr(), + radius.contiguous().data_ptr(), + P, + bboxes.contiguous().data_ptr(), + should_skip.contiguous().data_ptr()); + AT_CUDA_CHECK(cudaGetLastError()); + + return RasterizeCoarseCuda( + bboxes, + should_skip, + cloud_to_packed_first_idx, + num_points_per_cloud, + image_size, + bin_size, + max_points_per_bin); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.h new file mode 100644 index 0000000000000000000000000000000000000000..858407cb66b2a252f1b2b223f2adaa2ce8074543 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +// Arguments are the same as RasterizeMeshesCoarse from +// rasterize_meshes/rasterize_meshes.h +#ifdef WITH_CUDA +torch::Tensor RasterizeMeshesCoarseCuda( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int max_faces_per_bin); +#endif + +// Arguments are the same as RasterizePointsCoarse from +// rasterize_points/rasterize_points.h +#ifdef WITH_CUDA +torch::Tensor RasterizePointsCoarseCuda( + const torch::Tensor& points, + const torch::Tensor& cloud_to_packed_first_idx, + const torch::Tensor& num_points_per_cloud, + const std::tuple image_size, + const torch::Tensor& radius, + const int bin_size, + const int max_points_per_bin); +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.cu new file mode 100644 index 0000000000000000000000000000000000000000..9dd3e266ccf05e17f9ca6427dd4c414d7a1e1057 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.cu @@ -0,0 +1,823 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "rasterize_points/rasterization_utils.cuh" +#include "utils/float_math.cuh" +#include "utils/geometry_utils.cuh" + +namespace { +// A structure for holding details about a pixel. +struct Pixel { + float z; + int64_t idx; // idx of face + float dist; // abs distance of pixel to face + float3 bary; +}; + +__device__ bool operator<(const Pixel& a, const Pixel& b) { + return a.z < b.z || (a.z == b.z && a.idx < b.idx); +} + +// Get the xyz coordinates of the three vertices for the face given by the +// index face_idx into face_verts. +__device__ thrust::tuple GetSingleFaceVerts( + const float* face_verts, + int face_idx) { + const float x0 = face_verts[face_idx * 9 + 0]; + const float y0 = face_verts[face_idx * 9 + 1]; + const float z0 = face_verts[face_idx * 9 + 2]; + const float x1 = face_verts[face_idx * 9 + 3]; + const float y1 = face_verts[face_idx * 9 + 4]; + const float z1 = face_verts[face_idx * 9 + 5]; + const float x2 = face_verts[face_idx * 9 + 6]; + const float y2 = face_verts[face_idx * 9 + 7]; + const float z2 = face_verts[face_idx * 9 + 8]; + + const float3 v0xyz = make_float3(x0, y0, z0); + const float3 v1xyz = make_float3(x1, y1, z1); + const float3 v2xyz = make_float3(x2, y2, z2); + + return thrust::make_tuple(v0xyz, v1xyz, v2xyz); +} + +// Get the min/max x/y/z values for the face given by vertices v0, v1, v2. +__device__ thrust::tuple +GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) { + const float xmin = FloatMin3(v0.x, v1.x, v2.x); + const float ymin = FloatMin3(v0.y, v1.y, v2.y); + const float zmin = FloatMin3(v0.z, v1.z, v2.z); + const float xmax = FloatMax3(v0.x, v1.x, v2.x); + const float ymax = FloatMax3(v0.y, v1.y, v2.y); + const float zmax = FloatMax3(v0.z, v1.z, v2.z); + + return thrust::make_tuple( + make_float2(xmin, xmax), + make_float2(ymin, ymax), + make_float2(zmin, zmax)); +} + +// Check if the point (px, py) lies outside the face bounding box face_bbox. +// Return true if the point is outside. +__device__ bool CheckPointOutsideBoundingBox( + float3 v0, + float3 v1, + float3 v2, + float blur_radius, + float2 pxy) { + const auto bbox = GetFaceBoundingBox(v0, v1, v2); + const float2 xlims = thrust::get<0>(bbox); + const float2 ylims = thrust::get<1>(bbox); + const float2 zlims = thrust::get<2>(bbox); + + const float x_min = xlims.x - blur_radius; + const float y_min = ylims.x - blur_radius; + const float x_max = xlims.y + blur_radius; + const float y_max = ylims.y + blur_radius; + + // Faces with at least one vertex behind the camera won't render correctly + // and should be removed or clipped before calling the rasterizer + const bool z_invalid = zlims.x < kEpsilon; + + // Check if the current point is oustside the triangle bounding box. + return ( + pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min || + z_invalid); +} + +// This function checks if a pixel given by xy location pxy lies within the +// face with index face_idx in face_verts. One of the inputs is a list (q) +// which contains Pixel structs with the indices of the faces which intersect +// with this pixel sorted by closest z distance. If the point pxy lies in the +// face, the list (q) is updated and re-orderered in place. In addition +// the auxiliary variables q_size, q_max_z and q_max_idx are also modified. +// This code is shared between RasterizeMeshesNaiveCudaKernel and +// RasterizeMeshesFineCudaKernel. +template +__device__ void CheckPixelInsideFace( + const float* face_verts, // (F, 3, 3) + const int64_t* clipped_faces_neighbor_idx, // (F,) + const int face_idx, + int& q_size, + float& q_max_z, + int& q_max_idx, + FaceQ& q, + const float blur_radius, + const float2 pxy, // Coordinates of the pixel + const int K, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + const auto v012 = GetSingleFaceVerts(face_verts, face_idx); + const float3 v0 = thrust::get<0>(v012); + const float3 v1 = thrust::get<1>(v012); + const float3 v2 = thrust::get<2>(v012); + + // Only need xy for barycentric coordinates and distance calculations. + const float2 v0xy = make_float2(v0.x, v0.y); + const float2 v1xy = make_float2(v1.x, v1.y); + const float2 v2xy = make_float2(v2.x, v2.y); + + // Perform checks and skip if: + // 1. the face is behind the camera + // 2. the face is facing away from the camera + // 3. the face has very small face area + // 4. the pixel is outside the face bbox + const float zmax = FloatMax3(v0.z, v1.z, v2.z); + const bool outside_bbox = CheckPointOutsideBoundingBox( + v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox + const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy); + // Check if the face is visible to the camera. + const bool back_face = face_area < 0.0; + const bool zero_face_area = + (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon); + + if (zmax < 0 || (cull_backfaces && back_face) || outside_bbox || + zero_face_area) { + return; + } + + // Calculate barycentric coords and euclidean dist to triangle. + const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); + const float3 p_bary = !perspective_correct + ? p_bary0 + : BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z); + const float3 p_bary_clip = + !clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary); + + const float pz = + p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z; + + if (pz < 0) { + return; // Face is behind the image plane. + } + + // Get abs squared distance + const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy); + + // Use the unclipped bary coordinates to determine if the point is inside the + // face. + const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f; + const float signed_dist = inside ? -dist : dist; + // Check if pixel is outside blur region + if (!inside && dist >= blur_radius) { + return; + } + + // Handle the case where a face (f) partially behind the image plane is + // clipped to a quadrilateral and then split into two faces (t1, t2). In this + // case we: + // 1. Find the index of the neighboring face (e.g. for t1 need index of t2) + // 2. Check if the neighboring face (t2) is already in the top K faces + // 3. If yes, compare the distance of the pixel to t1 with the distance to t2. + // 4. If dist_t1 < dist_t2, overwrite the values for t2 in the top K faces. + const int neighbor_idx = clipped_faces_neighbor_idx[face_idx]; + int neighbor_idx_top_k = -1; + + // Check if neighboring face is already in the top K. + // -1 is the fill value in clipped_faces_neighbor_idx + if (neighbor_idx != -1) { + // Only need to loop until q_size. + for (int i = 0; i < q_size; i++) { + if (q[i].idx == neighbor_idx) { + neighbor_idx_top_k = i; + break; + } + } + } + // If neighbor idx is not -1 then it is in the top K struct. + if (neighbor_idx_top_k != -1) { + // If dist of current face is less than neighbor then overwrite the + // neighbor face values in the top K struct. + float neighbor_dist = abs(q[neighbor_idx_top_k].dist); + if (dist < neighbor_dist) { + // Overwrite the neighbor face values + q[neighbor_idx_top_k] = {pz, face_idx, signed_dist, p_bary_clip}; + + // If pz > q_max then overwrite the max values and index of the max. + // q_size stays the same. + if (pz > q_max_z) { + q_max_z = pz; + q_max_idx = neighbor_idx_top_k; + } + } + } else { + // Handle as a normal face + if (q_size < K) { + // Just insert it. + q[q_size] = {pz, face_idx, signed_dist, p_bary_clip}; + if (pz > q_max_z) { + q_max_z = pz; + q_max_idx = q_size; + } + q_size++; + } else if (pz < q_max_z) { + // Overwrite the old max, and find the new max. + q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip}; + q_max_z = pz; + for (int i = 0; i < K; i++) { + if (q[i].z > q_max_z) { + q_max_z = q[i].z; + q_max_idx = i; + } + } + } + } +} + +} // namespace + +// **************************************************************************** +// * NAIVE RASTERIZATION * +// **************************************************************************** +__global__ void RasterizeMeshesNaiveCudaKernel( + const float* face_verts, + const int64_t* mesh_to_face_first_idx, + const int64_t* num_faces_per_mesh, + const int64_t* clipped_faces_neighbor_idx, + const float blur_radius, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces, + const int N, + const int H, + const int W, + const int K, + int64_t* face_idxs, + float* zbuf, + float* pix_dists, + float* bary) { + // Simple version: One thread per output pixel + int num_threads = gridDim.x * blockDim.x; + int tid = blockDim.x * blockIdx.x + threadIdx.x; + + for (int i = tid; i < N * H * W; i += num_threads) { + // Convert linear index to 3D index + const int n = i / (H * W); // batch index. + const int pix_idx = i % (H * W); + + // Reverse ordering of X and Y axes + const int yi = H - 1 - pix_idx / W; + const int xi = W - 1 - pix_idx % W; + + // screen coordinates to ndc coordinates of pixel. + const float xf = PixToNonSquareNdc(xi, W, H); + const float yf = PixToNonSquareNdc(yi, H, W); + const float2 pxy = make_float2(xf, yf); + + // For keeping track of the K closest points we want a data structure + // that (1) gives O(1) access to the closest point for easy comparisons, + // and (2) allows insertion of new elements. In the CPU version we use + // std::priority_queue; then (2) is O(log K). We can't use STL + // containers in CUDA; we could roll our own max heap in an array, but + // that would likely have a lot of warp divergence so we do something + // simpler instead: keep the elements in an unsorted array, but keep + // track of the max value and the index of the max value. Then (1) is + // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 + // this should be fast enough for our purposes. + Pixel q[kMaxPointsPerPixel]; + int q_size = 0; + float q_max_z = -1000; + int q_max_idx = -1; + + // Using the batch index of the thread get the start and stop + // indices for the faces. + const int64_t face_start_idx = mesh_to_face_first_idx[n]; + const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n]; + + // Loop through the faces in the mesh. + for (int f = face_start_idx; f < face_stop_idx; ++f) { + // Check if the pixel pxy is inside the face bounding box and if it is, + // update q, q_size, q_max_z and q_max_idx in place. + + CheckPixelInsideFace( + face_verts, + clipped_faces_neighbor_idx, + f, + q_size, + q_max_z, + q_max_idx, + q, + blur_radius, + pxy, + K, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); + } + + // TODO: make sorting an option as only top k is needed, not sorted values. + BubbleSort(q, q_size); + int idx = n * H * W * K + pix_idx * K; + + for (int k = 0; k < q_size; ++k) { + face_idxs[idx + k] = q[k].idx; + zbuf[idx + k] = q[k].z; + pix_dists[idx + k] = q[k].dist; + bary[(idx + k) * 3 + 0] = q[k].bary.x; + bary[(idx + k) * 3 + 1] = q[k].bary.y; + bary[(idx + k) * 3 + 2] = q[k].bary.z; + } + } +} + +std::tuple +RasterizeMeshesNaiveCuda( + const at::Tensor& face_verts, + const at::Tensor& mesh_to_faces_packed_first_idx, + const at::Tensor& num_faces_per_mesh, + const at::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int num_closest, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + TORCH_CHECK( + face_verts.ndimension() == 3 && face_verts.size(1) == 3 && + face_verts.size(2) == 3, + "face_verts must have dimensions (num_faces, 3, 3)"); + + TORCH_CHECK( + num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0), + "num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx"); + + TORCH_CHECK( + clipped_faces_neighbor_idx.size(0) == face_verts.size(0), + "clipped_faces_neighbor_idx must have save size first dimension as face_verts"); + + if (num_closest > kMaxPointsPerPixel) { + std::stringstream ss; + ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; + AT_ERROR(ss.str()); + } + + // Check inputs are on the same device + at::TensorArg face_verts_t{face_verts, "face_verts", 1}, + mesh_to_faces_packed_first_idx_t{ + mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2}, + num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}, + clipped_faces_neighbor_idx_t{ + clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 4}; + at::CheckedFrom c = "RasterizeMeshesNaiveCuda"; + at::checkAllSameGPU( + c, + {face_verts_t, + mesh_to_faces_packed_first_idx_t, + num_faces_per_mesh_t, + clipped_faces_neighbor_idx_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(face_verts.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int N = num_faces_per_mesh.size(0); // batch size. + const int H = std::get<0>(image_size); + const int W = std::get<1>(image_size); + const int K = num_closest; + + auto long_opts = num_faces_per_mesh.options().dtype(at::kLong); + auto float_opts = face_verts.options().dtype(at::kFloat); + + at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); + at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); + at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); + at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); + + if (face_idxs.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(face_idxs, zbuf, bary, pix_dists); + } + + const size_t blocks = 1024; + const size_t threads = 64; + + RasterizeMeshesNaiveCudaKernel<<>>( + face_verts.contiguous().data_ptr(), + mesh_to_faces_packed_first_idx.contiguous().data_ptr(), + num_faces_per_mesh.contiguous().data_ptr(), + clipped_faces_neighbor_idx.contiguous().data_ptr(), + blur_radius, + perspective_correct, + clip_barycentric_coords, + cull_backfaces, + N, + H, + W, + K, + face_idxs.data_ptr(), + zbuf.data_ptr(), + pix_dists.data_ptr(), + bary.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(face_idxs, zbuf, bary, pix_dists); +} + +// **************************************************************************** +// * BACKWARD PASS * +// **************************************************************************** +// TODO: benchmark parallelizing over faces_verts instead of over pixels. +__global__ void RasterizeMeshesBackwardCudaKernel( + const float* face_verts, // (F, 3, 3) + const int64_t* pix_to_face, // (N, H, W, K) + const bool perspective_correct, + const bool clip_barycentric_coords, + const int N, + const int H, + const int W, + const int K, + const float* grad_zbuf, // (N, H, W, K) + const float* grad_bary, // (N, H, W, K, 3) + const float* grad_dists, // (N, H, W, K) + float* grad_face_verts) { // (F, 3, 3) + + // Parallelize over each pixel in images of + // size H * W, for each image in the batch of size N. + const int num_threads = gridDim.x * blockDim.x; + const int tid = blockIdx.x * blockDim.x + threadIdx.x; + + for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { + // Convert linear index to 3D index + const int n = t_i / (H * W); // batch index. + const int pix_idx = t_i % (H * W); + + // Reverse ordering of X and Y axes. + const int yi = H - 1 - pix_idx / W; + const int xi = W - 1 - pix_idx % W; + + const float xf = PixToNonSquareNdc(xi, W, H); + const float yf = PixToNonSquareNdc(yi, H, W); + const float2 pxy = make_float2(xf, yf); + + // Loop over all the faces for this pixel. + for (int k = 0; k < K; k++) { + // Index into (N, H, W, K, :) grad tensors + // pixel index + top k index + int i = n * H * W * K + pix_idx * K + k; + + const int f = pix_to_face[i]; + if (f < 0) { + continue; // padded face. + } + // Get xyz coordinates of the three face vertices. + const auto v012 = GetSingleFaceVerts(face_verts, f); + const float3 v0 = thrust::get<0>(v012); + const float3 v1 = thrust::get<1>(v012); + const float3 v2 = thrust::get<2>(v012); + + // Only neex xy for barycentric coordinate and distance calculations. + const float2 v0xy = make_float2(v0.x, v0.y); + const float2 v1xy = make_float2(v1.x, v1.y); + const float2 v2xy = make_float2(v2.x, v2.y); + + // Get upstream gradients for the face. + const float grad_dist_upstream = grad_dists[i]; + const float grad_zbuf_upstream = grad_zbuf[i]; + const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0]; + const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1]; + const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2]; + const float3 grad_bary_upstream = make_float3( + grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2); + + const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); + const float3 b_pp = !perspective_correct + ? b_w + : BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z); + + const float3 b_w_clip = + !clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp); + + const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f; + const float sign = inside ? -1.0f : 1.0f; + + auto grad_dist_f = PointTriangleDistanceBackward( + pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); + const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f); + const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f); + const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f); + + // Upstream gradient for barycentric coords from zbuf calculation: + // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 + // Therefore + // d_zbuf/d_bary_w0 = z0 + // d_zbuf/d_bary_w1 = z1 + // d_zbuf/d_bary_w2 = z2 + const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z); + + // Total upstream barycentric gradients are the sum of + // external upstream gradients and contribution from zbuf. + const float3 grad_bary_f_sum = + (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip); + + float3 grad_bary0 = grad_bary_f_sum; + + if (clip_barycentric_coords) { + grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum); + } + + float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f; + if (perspective_correct) { + auto perspective_grads = BarycentricPerspectiveCorrectionBackward( + b_w, v0.z, v1.z, v2.z, grad_bary0); + grad_bary0 = thrust::get<0>(perspective_grads); + dz0_persp = thrust::get<1>(perspective_grads); + dz1_persp = thrust::get<2>(perspective_grads); + dz2_persp = thrust::get<3>(perspective_grads); + } + + auto grad_bary_f = + BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); + const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f); + const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f); + const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f); + + atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x); + atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y); + atomicAdd( + grad_face_verts + f * 9 + 2, + grad_zbuf_upstream * b_w_clip.x + dz0_persp); + atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x); + atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y); + atomicAdd( + grad_face_verts + f * 9 + 5, + grad_zbuf_upstream * b_w_clip.y + dz1_persp); + atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x); + atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y); + atomicAdd( + grad_face_verts + f * 9 + 8, + grad_zbuf_upstream * b_w_clip.z + dz2_persp); + } + } +} + +at::Tensor RasterizeMeshesBackwardCuda( + const at::Tensor& face_verts, // (F, 3, 3) + const at::Tensor& pix_to_face, // (N, H, W, K) + const at::Tensor& grad_zbuf, // (N, H, W, K) + const at::Tensor& grad_bary, // (N, H, W, K, 3) + const at::Tensor& grad_dists, // (N, H, W, K) + const bool perspective_correct, + const bool clip_barycentric_coords) { + // Check inputs are on the same device + at::TensorArg face_verts_t{face_verts, "face_verts", 1}, + pix_to_face_t{pix_to_face, "pix_to_face", 2}, + grad_zbuf_t{grad_zbuf, "grad_zbuf", 3}, + grad_bary_t{grad_bary, "grad_bary", 4}, + grad_dists_t{grad_dists, "grad_dists", 5}; + at::CheckedFrom c = "RasterizeMeshesBackwardCuda"; + at::checkAllSameGPU( + c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); + at::checkAllSameType( + c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); + + // This is nondeterministic because atomicAdd + at::globalContext().alertNotDeterministic("RasterizeMeshesBackwardCuda"); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(face_verts.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int F = face_verts.size(0); + const int N = pix_to_face.size(0); + const int H = pix_to_face.size(1); + const int W = pix_to_face.size(2); + const int K = pix_to_face.size(3); + + at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options()); + + if (grad_face_verts.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_face_verts; + } + + const size_t blocks = 1024; + const size_t threads = 64; + + RasterizeMeshesBackwardCudaKernel<<>>( + face_verts.contiguous().data_ptr(), + pix_to_face.contiguous().data_ptr(), + perspective_correct, + clip_barycentric_coords, + N, + H, + W, + K, + grad_zbuf.contiguous().data_ptr(), + grad_bary.contiguous().data_ptr(), + grad_dists.contiguous().data_ptr(), + grad_face_verts.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + return grad_face_verts; +} + +// **************************************************************************** +// * FINE RASTERIZATION * +// **************************************************************************** +__global__ void RasterizeMeshesFineCudaKernel( + const float* face_verts, // (F, 3, 3) + const int32_t* bin_faces, // (N, BH, BW, T) + const int64_t* clipped_faces_neighbor_idx, // (F,) + const float blur_radius, + const int bin_size, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces, + const int N, + const int BH, + const int BW, + const int M, + const int H, + const int W, + const int K, + int64_t* face_idxs, // (N, H, W, K) + float* zbuf, // (N, H, W, K) + float* pix_dists, // (N, H, W, K) + float* bary // (N, H, W, K, 3) +) { + // This can be more than H * W if H or W are not divisible by bin_size. + int num_pixels = N * BH * BW * bin_size * bin_size; + int num_threads = gridDim.x * blockDim.x; + int tid = blockIdx.x * blockDim.x + threadIdx.x; + + for (int pid = tid; pid < num_pixels; pid += num_threads) { + // Convert linear index into bin and pixel indices. We make the within + // block pixel ids move the fastest, so that adjacent threads will fall + // into the same bin; this should give them coalesced memory reads when + // they read from faces and bin_faces. + int i = pid; + const int n = i / (BH * BW * bin_size * bin_size); + i %= BH * BW * bin_size * bin_size; + // bin index y + const int by = i / (BW * bin_size * bin_size); + i %= BW * bin_size * bin_size; + // bin index y + const int bx = i / (bin_size * bin_size); + // pixel within the bin + i %= bin_size * bin_size; + + // Pixel x, y indices + const int yi = i / bin_size + by * bin_size; + const int xi = i % bin_size + bx * bin_size; + + if (yi >= H || xi >= W) + continue; + + const float xf = PixToNonSquareNdc(xi, W, H); + const float yf = PixToNonSquareNdc(yi, H, W); + + const float2 pxy = make_float2(xf, yf); + + // This part looks like the naive rasterization kernel, except we use + // bin_faces to only look at a subset of faces already known to fall + // in this bin. TODO abstract out this logic into some data structure + // that is shared by both kernels? + Pixel q[kMaxPointsPerPixel]; + int q_size = 0; + float q_max_z = -1000; + int q_max_idx = -1; + + for (int m = 0; m < M; m++) { + const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m]; + if (f < 0) { + continue; // bin_faces uses -1 as a sentinal value. + } + // Check if the pixel pxy is inside the face bounding box and if it is, + // update q, q_size, q_max_z and q_max_idx in place. + CheckPixelInsideFace( + face_verts, + clipped_faces_neighbor_idx, + f, + q_size, + q_max_z, + q_max_idx, + q, + blur_radius, + pxy, + K, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); + } + + // Now we've looked at all the faces for this bin, so we can write + // output for the current pixel. + // TODO: make sorting an option as only top k is needed, not sorted values. + BubbleSort(q, q_size); + + // Reverse ordering of the X and Y axis so that + // in the image +Y is pointing up and +X is pointing left. + const int yidx = H - 1 - yi; + const int xidx = W - 1 - xi; + + const int pix_idx = n * H * W * K + yidx * W * K + xidx * K; + for (int k = 0; k < q_size; k++) { + face_idxs[pix_idx + k] = q[k].idx; + zbuf[pix_idx + k] = q[k].z; + pix_dists[pix_idx + k] = q[k].dist; + bary[(pix_idx + k) * 3 + 0] = q[k].bary.x; + bary[(pix_idx + k) * 3 + 1] = q[k].bary.y; + bary[(pix_idx + k) * 3 + 2] = q[k].bary.z; + } + } +} + +std::tuple +RasterizeMeshesFineCuda( + const at::Tensor& face_verts, + const at::Tensor& bin_faces, + const at::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + TORCH_CHECK( + face_verts.ndimension() == 3 && face_verts.size(1) == 3 && + face_verts.size(2) == 3, + "face_verts must have dimensions (num_faces, 3, 3)"); + TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions"); + TORCH_CHECK( + clipped_faces_neighbor_idx.size(0) == face_verts.size(0), + "clipped_faces_neighbor_idx must have the same first dimension as face_verts"); + + // Check inputs are on the same device + at::TensorArg face_verts_t{face_verts, "face_verts", 1}, + bin_faces_t{bin_faces, "bin_faces", 2}, + clipped_faces_neighbor_idx_t{ + clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 3}; + at::CheckedFrom c = "RasterizeMeshesFineCuda"; + at::checkAllSameGPU( + c, {face_verts_t, bin_faces_t, clipped_faces_neighbor_idx_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(face_verts.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // bin_faces shape (N, BH, BW, M) + const int N = bin_faces.size(0); + const int BH = bin_faces.size(1); + const int BW = bin_faces.size(2); + const int M = bin_faces.size(3); + const int K = faces_per_pixel; + + const int H = std::get<0>(image_size); + const int W = std::get<1>(image_size); + + if (K > kMaxPointsPerPixel) { + AT_ERROR("Must have num_closest <= 150"); + } + auto long_opts = bin_faces.options().dtype(at::kLong); + auto float_opts = face_verts.options().dtype(at::kFloat); + + at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); + at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); + at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); + at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); + + if (face_idxs.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(face_idxs, zbuf, bary, pix_dists); + } + + const size_t blocks = 1024; + const size_t threads = 64; + + RasterizeMeshesFineCudaKernel<<>>( + face_verts.contiguous().data_ptr(), + bin_faces.contiguous().data_ptr(), + clipped_faces_neighbor_idx.contiguous().data_ptr(), + blur_radius, + bin_size, + perspective_correct, + clip_barycentric_coords, + cull_backfaces, + N, + BH, + BW, + M, + H, + W, + K, + face_idxs.data_ptr(), + zbuf.data_ptr(), + pix_dists.data_ptr(), + bary.data_ptr()); + + return std::make_tuple(face_idxs, zbuf, bary, pix_dists); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h new file mode 100644 index 0000000000000000000000000000000000000000..584aa0238ad86434567716d5c77f212a394b1d84 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h @@ -0,0 +1,549 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include +#include "rasterize_coarse/rasterize_coarse.h" +#include "utils/pytorch3d_cutils.h" + +// **************************************************************************** +// * FORWARD PASS * +// **************************************************************************** + +std::tuple +RasterizeMeshesNaiveCpu( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces); + +#ifdef WITH_CUDA +std::tuple +RasterizeMeshesNaiveCuda( + const at::Tensor& face_verts, + const at::Tensor& mesh_to_face_first_idx, + const at::Tensor& num_faces_per_mesh, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int num_closest, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces); +#endif +// Forward pass for rasterizing a batch of meshes. +// +// Args: +// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for +// faces in all the meshes in the batch. Concretely, +// face_verts[f, i] = [x, y, z] gives the coordinates for the +// ith vertex of the fth face. These vertices are expected to be +// in NDC coordinates in the range [-1, 1]. +// mesh_to_face_first_idx: LongTensor of shape (N) giving the index in +// faces_verts of the first face in each mesh in +// the batch where N is the batch size. +// num_faces_per_mesh: LongTensor of shape (N) giving the number of faces +// for each mesh in the batch. +// clipped_faces_neighbor_idx: LongTensor of shape (F,) giving the +// index of the neighboring face for each face which was clipped to a +// quadrilateral and then divided into two triangles. +// e.g. for a face f partially behind the image plane which is split into +// two triangles (t1, t2): clipped_faces_neighbor_idx[t1_idx] = t2_idx +// Faces which are not clipped and subdivided are set to -1. +// image_size: Tuple (H, W) giving the size in pixels of the output +// image to be rasterized. +// blur_radius: float distance in NDC coordinates uses to expand the face +// bounding boxes for the rasterization. Set to 0.0 if no blur +// is required. +// faces_per_pixel: the number of closeset faces to rasterize per pixel. +// perspective_correct: Whether to apply perspective correction when +// computing barycentric coordinates. If this is True, +// then this function returns world-space barycentric +// coordinates for each pixel; if this is False then +// this function instead returns screen-space +// barycentric coordinates for each pixel. +// clip_barycentric_coords: Whether, after any perspective correction +// is applied but before the depth is calculated (e.g. for +// z clipping), to "correct" a location outside the face (i.e. with +// a negative barycentric coordinate) to a position on the edge of the +// face. +// cull_backfaces: Bool, Whether to only rasterize mesh faces which are +// visible to the camera. This assumes that vertices of +// front-facing triangles are ordered in an anti-clockwise +// fashion, and triangles that face away from the camera are +// in a clockwise order relative to the current view +// direction. NOTE: This will only work if the mesh faces are +// consistently defined with counter-clockwise ordering when +// viewed from the outside. +// +// Returns: +// A 4 element tuple of: +// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of +// each of the closest faces to the pixel in the rasterized +// image, or -1 for pixels that are not covered by any face. +// zbuf: float32 Tensor of shape (N, H, W, K) giving the depth of each of +// the closest faces for each pixel. +// barycentric_coords: float tensor of shape (N, H, W, K, 3) giving +// barycentric coordinates of the pixel with respect to +// each of the closest faces along the z axis, padded +// with -1 for pixels hit by fewer than +// faces_per_pixel faces. +// dists: float tensor of shape (N, H, W, K) giving the euclidean distance +// in the (NDC) x/y plane between each pixel and its K closest +// faces along the z axis padded with -1 for pixels hit by fewer than +// faces_per_pixel faces. +inline std::tuple +RasterizeMeshesNaive( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + // TODO: Better type checking. + if (face_verts.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(face_verts); + CHECK_CUDA(mesh_to_face_first_idx); + CHECK_CUDA(num_faces_per_mesh); + return RasterizeMeshesNaiveCuda( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + faces_per_pixel, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + return RasterizeMeshesNaiveCpu( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + faces_per_pixel, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); + } +} + +// **************************************************************************** +// * BACKWARD PASS * +// **************************************************************************** + +torch::Tensor RasterizeMeshesBackwardCpu( + const torch::Tensor& face_verts, + const torch::Tensor& pix_to_face, + const torch::Tensor& grad_zbuf, + const torch::Tensor& grad_bary, + const torch::Tensor& grad_dists, + const bool perspective_correct, + const bool clip_barycentric_coords); + +#ifdef WITH_CUDA +torch::Tensor RasterizeMeshesBackwardCuda( + const torch::Tensor& face_verts, + const torch::Tensor& pix_to_face, + const torch::Tensor& grad_zbuf, + const torch::Tensor& grad_bary, + const torch::Tensor& grad_dists, + const bool perspective_correct, + const bool clip_barycentric_coords); +#endif + +// Args: +// face_verts: float32 Tensor of shape (F, 3, 3) (from forward pass) giving +// (packed) vertex positions for faces in all the meshes in +// the batch. +// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of +// each of the closest faces to the pixel in the rasterized +// image, or -1 for pixels that are not covered by any face. +// grad_zbuf: Tensor of shape (N, H, W, K) giving upstream gradients +// d(loss)/d(zbuf) of the zbuf tensor from the forward pass. +// grad_bary: Tensor of shape (N, H, W, K, 3) giving upstream gradients +// d(loss)/d(bary) of the barycentric_coords tensor returned by +// the forward pass. +// grad_dists: Tensor of shape (N, H, W, K) giving upstream gradients +// d(loss)/d(dists) of the dists tensor from the forward pass. +// perspective_correct: Whether to apply perspective correction when +// computing barycentric coordinates. If this is True, +// then this function returns world-space barycentric +// coordinates for each pixel; if this is False then +// this function instead returns screen-space +// barycentric coordinates for each pixel. +// clip_barycentric_coords: Whether, after any perspective correction +// is applied but before the depth is calculated (e.g. for +// z clipping), to "correct" a location outside the face (i.e. with +// a negative barycentric coordinate) to a position on the edge of the +// face. +// +// Returns: +// grad_face_verts: float32 Tensor of shape (F, 3, 3) giving downstream +// gradients for the face vertices. +torch::Tensor RasterizeMeshesBackward( + const torch::Tensor& face_verts, + const torch::Tensor& pix_to_face, + const torch::Tensor& grad_zbuf, + const torch::Tensor& grad_bary, + const torch::Tensor& grad_dists, + const bool perspective_correct, + const bool clip_barycentric_coords) { + if (face_verts.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(face_verts); + CHECK_CUDA(pix_to_face); + CHECK_CUDA(grad_zbuf); + CHECK_CUDA(grad_bary); + CHECK_CUDA(grad_dists); + return RasterizeMeshesBackwardCuda( + face_verts, + pix_to_face, + grad_zbuf, + grad_bary, + grad_dists, + perspective_correct, + clip_barycentric_coords); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + return RasterizeMeshesBackwardCpu( + face_verts, + pix_to_face, + grad_zbuf, + grad_bary, + grad_dists, + perspective_correct, + clip_barycentric_coords); + } +} + +// **************************************************************************** +// * COARSE RASTERIZATION * +// **************************************************************************** + +// RasterizeMeshesCoarseCuda in rasterize_coarse/rasterize_coarse.h + +torch::Tensor RasterizeMeshesCoarseCpu( + const torch::Tensor& face_verts, + const at::Tensor& mesh_to_face_first_idx, + const at::Tensor& num_faces_per_mesh, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int max_faces_per_bin); + +// Args: +// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for +// faces in all the meshes in the batch. Concretely, +// face_verts[f, i] = [x, y, z] gives the coordinates for the +// ith vertex of the fth face. These vertices are expected to be +// in NDC coordinates in the range [-1, 1]. +// mesh_to_face_first_idx: LongTensor of shape (N) giving the index in +// faces_verts of the first face in each mesh in +// the batch where N is the batch size. +// num_faces_per_mesh: LongTensor of shape (N) giving the number of faces +// for each mesh in the batch. +// image_size: Tuple (H, W) giving the size in pixels of the output +// image to be rasterized. +// blur_radius: float distance in NDC coordinates uses to expand the face +// bounding boxes for the rasterization. Set to 0.0 if no blur +// is required. +// bin_size: Size of each bin within the image (in pixels) +// max_faces_per_bin: Maximum number of faces to count in each bin. +// +// Returns: +// bin_face_idxs: Tensor of shape (N, num_bins, num_bins, K) giving the +// indices of faces that fall into each bin. + +torch::Tensor RasterizeMeshesCoarse( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int max_faces_per_bin) { + if (face_verts.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(face_verts); + CHECK_CUDA(mesh_to_face_first_idx); + CHECK_CUDA(num_faces_per_mesh); + return RasterizeMeshesCoarseCuda( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + image_size, + blur_radius, + bin_size, + max_faces_per_bin); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + return RasterizeMeshesCoarseCpu( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + image_size, + blur_radius, + bin_size, + max_faces_per_bin); + } +} + +// **************************************************************************** +// * FINE RASTERIZATION * +// **************************************************************************** + +#ifdef WITH_CUDA +std::tuple +RasterizeMeshesFineCuda( + const torch::Tensor& face_verts, + const torch::Tensor& bin_faces, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces); +#endif +// Args: +// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for +// faces in all the meshes in the batch. Concretely, +// face_verts[f, i] = [x, y, z] gives the coordinates for the +// ith vertex of the fth face. These vertices are expected to be +// in NDC coordinates in the range [-1, 1]. +// bin_faces: int32 Tensor of shape (N, B, B, M) giving the indices of faces +// that fall into each bin (output from coarse rasterization). +// clipped_faces_neighbor_idx: LongTensor of shape (F,) giving the +// index of the neighboring face for each face which was clipped to a +// quadrilateral and then divided into two triangles. +// e.g. for a face f partially behind the image plane which is split into +// two triangles (t1, t2): clipped_faces_neighbor_idx[t1_idx] = t2_idx +// Faces which are not clipped and subdivided are set to -1. +// image_size: Tuple (H, W) giving the size in pixels of the output +// image to be rasterized. +// blur_radius: float distance in NDC coordinates uses to expand the face +// bounding boxes for the rasterization. Set to 0.0 if no blur +// is required. +// bin_size: Size of each bin within the image (in pixels) +// faces_per_pixel: the number of closeset faces to rasterize per pixel. +// perspective_correct: Whether to apply perspective correction when +// computing barycentric coordinates. If this is True, +// then this function returns world-space barycentric +// coordinates for each pixel; if this is False then +// this function instead returns screen-space +// barycentric coordinates for each pixel. +// clip_barycentric_coords: Whether, after any perspective correction +// is applied but before the depth is calculated (e.g. for +// z clipping), to "correct" a location outside the face (i.e. with +// a negative barycentric coordinate) to a position on the edge of the +// face. +// cull_backfaces: Bool, Whether to only rasterize mesh faces which are +// visible to the camera. This assumes that vertices of +// front-facing triangles are ordered in an anti-clockwise +// fashion, and triangles that face away from the camera are +// in a clockwise order relative to the current view +// direction. NOTE: This will only work if the mesh faces are +// consistently defined with counter-clockwise ordering when +// viewed from the outside. +// +// Returns (same as rasterize_meshes): +// A 4 element tuple of: +// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of +// each of the closest faces to the pixel in the rasterized +// image, or -1 for pixels that are not covered by any face. +// zbuf: float32 Tensor of shape (N, H, W, K) giving the depth of each of +// the closest faces for each pixel. +// barycentric_coords: float tensor of shape (N, H, W, K, 3) giving +// barycentric coordinates of the pixel with respect to +// each of the closest faces along the z axis, padded +// with -1 for pixels hit by fewer than +// faces_per_pixel faces. +// dists: float tensor of shape (N, H, W, K) giving the euclidean distance +// in the (NDC) x/y plane between each pixel and its K closest +// faces along the z axis padded with -1 for pixels hit by fewer than +// faces_per_pixel faces. +std::tuple +RasterizeMeshesFine( + const torch::Tensor& face_verts, + const torch::Tensor& bin_faces, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + if (face_verts.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(face_verts); + CHECK_CUDA(bin_faces); + return RasterizeMeshesFineCuda( + face_verts, + bin_faces, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + bin_size, + faces_per_pixel, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + AT_ERROR("NOT IMPLEMENTED"); + } +} + +// **************************************************************************** +// * MAIN ENTRY POINT * +// **************************************************************************** + +// This is the main entry point for the forward pass of the mesh rasterizer; +// it uses either naive or coarse-to-fine rasterization based on bin_size. +// +// Args: +// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for +// faces in all the meshes in the batch. Concretely, +// face_verts[f, i] = [x, y, z] gives the coordinates for the +// ith vertex of the fth face. These vertices are expected to be +// in NDC coordinates in the range [-1, 1]. +// mesh_to_face_first_idx: LongTensor of shape (N) giving the index in +// faces_verts of the first face in each mesh in +// the batch where N is the batch size. +// num_faces_per_mesh: LongTensor of shape (N) giving the number of faces +// for each mesh in the batch. +// clipped_faces_neighbor_idx: LongTensor of shape (F,) giving the +// index of the neighboring face for each face which was clipped to a +// quadrilateral and then divided into two triangles. +// e.g. for a face f partially behind the image plane which is split into +// two triangles (t1, t2): clipped_faces_neighbor_idx[t1_idx] = t2_idx +// Faces which are not clipped and subdivided are set to -1. +// image_size: Tuple (H, W) giving the size in pixels of the output +// image to be rasterized. +// blur_radius: float distance in NDC coordinates uses to expand the face +// bounding boxes for the rasterization. Set to 0.0 if no blur +// is required. +// faces_per_pixel: the number of closeset faces to rasterize per pixel. +// bin_size: Bin size (in pixels) for coarse-to-fine rasterization. Setting +// bin_size=0 uses naive rasterization instead. +// max_faces_per_bin: The maximum number of faces allowed to fall into each +// bin when using coarse-to-fine rasterization. +// perspective_correct: Whether to apply perspective correction when +// computing barycentric coordinates. If this is True, +// then this function returns world-space barycentric +// coordinates for each pixel; if this is False then +// this function instead returns screen-space +// barycentric coordinates for each pixel. +// clip_barycentric_coords: Whether, after any perspective correction +// is applied but before the depth is calculated (e.g. for +// z clipping), to "correct" a location outside the face (i.e. with +// a negative barycentric coordinate) to a position on the edge of the +// face. +// cull_backfaces: Bool, Whether to only rasterize mesh faces which are +// visible to the camera. This assumes that vertices of +// front-facing triangles are ordered in an anti-clockwise +// fashion, and triangles that face away from the camera are +// in a clockwise order relative to the current view +// direction. NOTE: This will only work if the mesh faces are +// consistently defined with counter-clockwise ordering when +// viewed from the outside. +// +// Returns: +// A 4 element tuple of: +// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of +// each of the closest faces to the pixel in the rasterized +// image, or -1 for pixels that are not covered by any face. +// zbuf: float32 Tensor of shape (N, H, W, K) giving the depth of each of +// the closest faces for each pixel. +// barycentric_coords: float tensor of shape (N, H, W, K, 3) giving +// barycentric coordinates of the pixel with respect to +// each of the closest faces along the z axis, padded +// with -1 for pixels hit by fewer than +// faces_per_pixel faces. +// dists: float tensor of shape (N, H, W, K) giving the euclidean distance +// in the (NDC) x/y plane between each pixel and its K closest +// faces along the z axis padded with -1 for pixels hit by fewer than +// faces_per_pixel faces. +std::tuple +RasterizeMeshes( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int faces_per_pixel, + const int bin_size, + const int max_faces_per_bin, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + if (bin_size > 0 && max_faces_per_bin > 0) { + // Use coarse-to-fine rasterization + at::Tensor bin_faces = RasterizeMeshesCoarse( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + image_size, + blur_radius, + bin_size, + max_faces_per_bin); + return RasterizeMeshesFine( + face_verts, + bin_faces, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + bin_size, + faces_per_pixel, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); + } else { + // Use the naive per-pixel implementation + return RasterizeMeshesNaive( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + faces_per_pixel, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); + } +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..210df55e43de5602c3c80b05e0ff8b9d7e59253a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp @@ -0,0 +1,640 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include "ATen/core/TensorAccessor.h" +#include "rasterize_points/rasterization_utils.h" +#include "utils/geometry_utils.h" +#include "utils/vec2.h" +#include "utils/vec3.h" + +// Get (x, y, z) values for vertex from (3, 3) tensor face. +template +auto ExtractVerts(const Face& face, const int vertex_index) { + return std::make_tuple( + face[vertex_index][0], face[vertex_index][1], face[vertex_index][2]); +} + +// Compute min/max x/y for each face. +auto ComputeFaceBoundingBoxes(const torch::Tensor& face_verts) { + const int total_F = face_verts.size(0); + auto float_opts = face_verts.options().dtype(torch::kFloat32); + auto face_verts_a = face_verts.accessor(); + torch::Tensor face_bboxes = torch::full({total_F, 6}, -2.0, float_opts); + + // Loop through all the faces + for (int f = 0; f < total_F; ++f) { + const auto& face = face_verts_a[f]; + float x0, x1, x2, y0, y1, y2, z0, z1, z2; + std::tie(x0, y0, z0) = ExtractVerts(face, 0); + std::tie(x1, y1, z1) = ExtractVerts(face, 1); + std::tie(x2, y2, z2) = ExtractVerts(face, 2); + + const float x_min = std::min(x0, std::min(x1, x2)); + const float y_min = std::min(y0, std::min(y1, y2)); + const float x_max = std::max(x0, std::max(x1, x2)); + const float y_max = std::max(y0, std::max(y1, y2)); + const float z_min = std::min(z0, std::min(z1, z2)); + const float z_max = std::max(z0, std::max(z1, z2)); + + face_bboxes[f][0] = x_min; + face_bboxes[f][1] = y_min; + face_bboxes[f][2] = x_max; + face_bboxes[f][3] = y_max; + face_bboxes[f][4] = z_min; + face_bboxes[f][5] = z_max; + } + + return face_bboxes; +} + +// Check if the point (px, py) lies inside the face bounding box face_bbox. +// Return true if the point is outside. +template +bool CheckPointOutsideBoundingBox( + const Face& face_bbox, + float blur_radius, + float px, + float py) { + // Read triangle bbox coordinates and expand by blur radius. + float x_min = face_bbox[0] - blur_radius; + float y_min = face_bbox[1] - blur_radius; + float x_max = face_bbox[2] + blur_radius; + float y_max = face_bbox[3] + blur_radius; + + // Faces with at least one vertex behind the camera won't render correctly + // and should be removed or clipped before calling the rasterizer + const bool z_invalid = face_bbox[4] < kEpsilon; + + // Check if the current point is within the triangle bounding box. + return (px > x_max || px < x_min || py > y_max || py < y_min || z_invalid); +} + +// Calculate areas of all faces. Returns a tensor of shape (total_faces, 1) +// where faces with zero area have value -1. +auto ComputeFaceAreas(const torch::Tensor& face_verts) { + const int total_F = face_verts.size(0); + auto float_opts = face_verts.options().dtype(torch::kFloat32); + auto face_verts_a = face_verts.accessor(); + torch::Tensor face_areas = torch::full({total_F}, -1, float_opts); + + // Loop through all the faces + for (int f = 0; f < total_F; ++f) { + const auto& face = face_verts_a[f]; + float x0, x1, x2, y0, y1, y2, z0, z1, z2; + std::tie(x0, y0, z0) = ExtractVerts(face, 0); + std::tie(x1, y1, z1) = ExtractVerts(face, 1); + std::tie(x2, y2, z2) = ExtractVerts(face, 2); + + const vec2 v0(x0, y0); + const vec2 v1(x1, y1); + const vec2 v2(x2, y2); + + const float face_area = EdgeFunctionForward(v0, v1, v2); + face_areas[f] = face_area; + } + + return face_areas; +} + +// Helper function to use with std::find_if to find the index of any +// values in the top k struct which match a given idx. +struct IsNeighbor { + IsNeighbor(int neighbor_idx) { + this->neighbor_idx = neighbor_idx; + } + bool operator()(std::tuple elem) { + return (std::get<1>(elem) == neighbor_idx); + } + int neighbor_idx; +}; + +namespace { +void RasterizeMeshesNaiveCpu_worker( + const int start_yi, + const int end_yi, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const float blur_radius, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces, + const int32_t N, + const int H, + const int W, + const int K, + at::TensorAccessor& face_verts_a, + at::TensorAccessor& face_areas_a, + at::TensorAccessor& face_bboxes_a, + at::TensorAccessor& neighbor_idx_a, + at::TensorAccessor& zbuf_a, + at::TensorAccessor& face_idxs_a, + at::TensorAccessor& pix_dists_a, + at::TensorAccessor& barycentric_coords_a) { + for (int n = 0; n < N; ++n) { + // Loop through each mesh in the batch. + // Get the start index of the faces in faces_packed and the num faces + // in the mesh to avoid having to loop through all the faces. + const int face_start_idx = mesh_to_face_first_idx[n].item().to(); + const int face_stop_idx = + (face_start_idx + num_faces_per_mesh[n].item().to()); + + // Iterate through the horizontal lines of the image from top to bottom. + for (int yi = start_yi; yi < end_yi; ++yi) { + // Reverse the order of yi so that +Y is pointing upwards in the image. + const int yidx = H - 1 - yi; + + // Y coordinate of the top of the pixel. + const float yf = PixToNonSquareNdc(yidx, H, W); + // Iterate through pixels on this horizontal line, left to right. + for (int xi = 0; xi < W; ++xi) { + // Reverse the order of xi so that +X is pointing to the left in the + // image. + const int xidx = W - 1 - xi; + + // X coordinate of the left of the pixel. + const float xf = PixToNonSquareNdc(xidx, W, H); + + // Use a deque to hold values: + // (z, idx, r, bary.x, bary.y. bary.z) + // Sort the deque as needed to mimic a priority queue. + std::deque> q; + + // Loop through the faces in the mesh. + for (int f = face_start_idx; f < face_stop_idx; ++f) { + // Get coordinates of three face vertices. + const auto& face = face_verts_a[f]; + float x0, x1, x2, y0, y1, y2, z0, z1, z2; + std::tie(x0, y0, z0) = ExtractVerts(face, 0); + std::tie(x1, y1, z1) = ExtractVerts(face, 1); + std::tie(x2, y2, z2) = ExtractVerts(face, 2); + + const vec2 v0(x0, y0); + const vec2 v1(x1, y1); + const vec2 v2(x2, y2); + + const float face_area = face_areas_a[f]; + const bool back_face = face_area < 0.0; + // Check if the face is visible to the camera. + if (cull_backfaces && back_face) { + continue; + } + // Skip faces with zero area. + if (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon) { + continue; + } + + // Skip if point is outside the face bounding box. + const auto face_bbox = face_bboxes_a[f]; + const bool outside_bbox = CheckPointOutsideBoundingBox( + face_bbox, std::sqrt(blur_radius), xf, yf); + if (outside_bbox) { + continue; + } + + // Compute barycentric coordinates and use this to get the + // depth of the point on the triangle. + const vec2 pxy(xf, yf); + const vec3 bary0 = + BarycentricCoordinatesForward(pxy, v0, v1, v2); + const vec3 bary = !perspective_correct + ? bary0 + : BarycentricPerspectiveCorrectionForward(bary0, z0, z1, z2); + + const vec3 bary_clip = + !clip_barycentric_coords ? bary : BarycentricClipForward(bary); + + // Use barycentric coordinates to get the depth of the current pixel + const float pz = + (bary_clip.x * z0 + bary_clip.y * z1 + bary_clip.z * z2); + + if (pz < 0) { + continue; // Point is behind the image plane so ignore. + } + + // Compute squared distance of the point to the triangle. + const float dist = PointTriangleDistanceForward(pxy, v0, v1, v2); + + // Use the bary coordinates to determine if the point is + // inside the face. + const bool inside = bary.x > 0.0f && bary.y > 0.0f && bary.z > 0.0f; + + // If the point is inside the triangle then signed_dist + // is negative. + const float signed_dist = inside ? -dist : dist; + + // Check if pixel is outside blur region + if (!inside && dist >= blur_radius) { + continue; + } + + // Handle the case where a face (f) partially behind the image plane + // is clipped to a quadrilateral and then split into two faces (t1, + // t2). In this case we: + // 1. Find the index of the neighbor (e.g. for t1 need index of t2) + // 2. Check if the neighbor (t2) is already in the top K faces + // 3. If yes, compare the distance of the pixel to t1 with the + // distance to t2. + // 4. If dist_t1 < dist_t2, overwrite the values for t2 in the top K + // faces. + const int neighbor_idx = neighbor_idx_a[f]; + int idx_top_k = -1; + + // Check if neighboring face is already in the top K. + if (neighbor_idx != -1) { + const auto it = + std::find_if(q.begin(), q.end(), IsNeighbor(neighbor_idx)); + // Get the index of the element from the iterator + idx_top_k = (it != q.end()) ? it - q.begin() : idx_top_k; + } + + // If idx_top_k idx is not -1 then it is in the top K struct. + if (idx_top_k != -1) { + // If dist of current face is less than neighbor, overwrite + // the neighbor face values in the top K struct. + const auto neighbor = q[idx_top_k]; + const float dist_neighbor = std::abs(std::get<2>(neighbor)); + if (dist < dist_neighbor) { + // Overwrite the neighbor face values. + q[idx_top_k] = std::make_tuple( + pz, f, signed_dist, bary_clip.x, bary_clip.y, bary_clip.z); + } + } else { + // Handle as a normal face. + // The current pixel lies inside the current face. + // Add at the end of the deque. + q.emplace_back( + pz, f, signed_dist, bary_clip.x, bary_clip.y, bary_clip.z); + } + + // Sort the deque inplace based on the z distance + // to mimic using a priority queue. + std::sort(q.begin(), q.end()); + if (static_cast(q.size()) > K) { + // remove the last value + q.pop_back(); + } + } + while (!q.empty()) { + // Loop through and add values to the output tensors + auto t = q.back(); + q.pop_back(); + const int i = q.size(); + zbuf_a[n][yi][xi][i] = std::get<0>(t); + face_idxs_a[n][yi][xi][i] = std::get<1>(t); + pix_dists_a[n][yi][xi][i] = std::get<2>(t); + barycentric_coords_a[n][yi][xi][i][0] = std::get<3>(t); + barycentric_coords_a[n][yi][xi][i][1] = std::get<4>(t); + barycentric_coords_a[n][yi][xi][i][2] = std::get<5>(t); + } + } + } + } +} +} // namespace + +std::tuple +RasterizeMeshesNaiveCpu( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 || + face_verts.size(2) != 3) { + AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)"); + } + if (num_faces_per_mesh.size(0) != mesh_to_face_first_idx.size(0)) { + AT_ERROR( + "num_faces_per_mesh must have save size first dimension as mesh_to_face_first_idx"); + } + + const int32_t N = mesh_to_face_first_idx.size(0); // batch_size. + const int H = std::get<0>(image_size); + const int W = std::get<1>(image_size); + const int K = faces_per_pixel; + + auto long_opts = num_faces_per_mesh.options().dtype(torch::kInt64); + auto float_opts = face_verts.options().dtype(torch::kFloat32); + + // Initialize output tensors. + torch::Tensor face_idxs = torch::full({N, H, W, K}, -1, long_opts); + torch::Tensor zbuf = torch::full({N, H, W, K}, -1, float_opts); + torch::Tensor pix_dists = torch::full({N, H, W, K}, -1, float_opts); + torch::Tensor barycentric_coords = + torch::full({N, H, W, K, 3}, -1, float_opts); + + auto face_verts_a = face_verts.accessor(); + auto face_idxs_a = face_idxs.accessor(); + auto zbuf_a = zbuf.accessor(); + auto pix_dists_a = pix_dists.accessor(); + auto barycentric_coords_a = barycentric_coords.accessor(); + auto neighbor_idx_a = clipped_faces_neighbor_idx.accessor(); + + auto face_bboxes = ComputeFaceBoundingBoxes(face_verts); + auto face_bboxes_a = face_bboxes.accessor(); + auto face_areas = ComputeFaceAreas(face_verts); + auto face_areas_a = face_areas.accessor(); + + const int64_t n_threads = at::get_num_threads(); + std::vector threads; + threads.reserve(n_threads); + const int chunk_size = 1 + (H - 1) / n_threads; + int start_yi = 0; + for (int iThread = 0; iThread < n_threads; ++iThread) { + const int64_t end_yi = std::min(start_yi + chunk_size, H); + threads.emplace_back( + RasterizeMeshesNaiveCpu_worker, + start_yi, + end_yi, + mesh_to_face_first_idx, + num_faces_per_mesh, + blur_radius, + perspective_correct, + clip_barycentric_coords, + cull_backfaces, + N, + H, + W, + K, + std::ref(face_verts_a), + std::ref(face_areas_a), + std::ref(face_bboxes_a), + std::ref(neighbor_idx_a), + std::ref(zbuf_a), + std::ref(face_idxs_a), + std::ref(pix_dists_a), + std::ref(barycentric_coords_a)); + start_yi += chunk_size; + } + for (auto&& thread : threads) { + thread.join(); + } + + return std::make_tuple(face_idxs, zbuf, barycentric_coords, pix_dists); +} + +torch::Tensor RasterizeMeshesBackwardCpu( + const torch::Tensor& face_verts, // (F, 3, 3) + const torch::Tensor& pix_to_face, // (N, H, W, K) + const torch::Tensor& grad_zbuf, // (N, H, W, K) + const torch::Tensor& grad_bary, // (N, H, W, K, 3) + const torch::Tensor& grad_dists, // (N, H, W, K) + const bool perspective_correct, + const bool clip_barycentric_coords) { + const int F = face_verts.size(0); + const int N = pix_to_face.size(0); + const int H = pix_to_face.size(1); + const int W = pix_to_face.size(2); + const int K = pix_to_face.size(3); + + torch::Tensor grad_face_verts = torch::zeros({F, 3, 3}, face_verts.options()); + auto face_verts_a = face_verts.accessor(); + auto pix_to_face_a = pix_to_face.accessor(); + auto grad_dists_a = grad_dists.accessor(); + auto grad_zbuf_a = grad_zbuf.accessor(); + auto grad_bary_a = grad_bary.accessor(); + + for (int n = 0; n < N; ++n) { + // Iterate through the horizontal lines of the image from top to bottom. + for (int y = 0; y < H; ++y) { + // Reverse the order of yi so that +Y is pointing upwards in the image. + const int yidx = H - 1 - y; + + // Y coordinate of the top of the pixel. + const float yf = PixToNonSquareNdc(yidx, H, W); + // Iterate through pixels on this horizontal line, left to right. + for (int x = 0; x < W; ++x) { + // Reverse the order of xi so that +X is pointing to the left in the + // image. + const int xidx = W - 1 - x; + + // X coordinate of the left of the pixel. + const float xf = PixToNonSquareNdc(xidx, W, H); + const vec2 pxy(xf, yf); + + // Iterate through the faces that hit this pixel. + for (int k = 0; k < K; ++k) { + // Get face index from forward pass output. + const int f = pix_to_face_a[n][y][x][k]; + if (f < 0) { + continue; // padded face. + } + // Get coordinates of the three face vertices. + const auto face_verts_f = face_verts_a[f]; + const float x0 = face_verts_f[0][0]; + const float y0 = face_verts_f[0][1]; + const float z0 = face_verts_f[0][2]; + const float x1 = face_verts_f[1][0]; + const float y1 = face_verts_f[1][1]; + const float z1 = face_verts_f[1][2]; + const float x2 = face_verts_f[2][0]; + const float y2 = face_verts_f[2][1]; + const float z2 = face_verts_f[2][2]; + const vec2 v0xy(x0, y0); + const vec2 v1xy(x1, y1); + const vec2 v2xy(x2, y2); + + // Get upstream gradients for the face. + const float grad_dist_upstream = grad_dists_a[n][y][x][k]; + const float grad_zbuf_upstream = grad_zbuf_a[n][y][x][k]; + const auto grad_bary_upstream_w012 = grad_bary_a[n][y][x][k]; + const float grad_bary_upstream_w0 = grad_bary_upstream_w012[0]; + const float grad_bary_upstream_w1 = grad_bary_upstream_w012[1]; + const float grad_bary_upstream_w2 = grad_bary_upstream_w012[2]; + const vec3 grad_bary_upstream( + grad_bary_upstream_w0, + grad_bary_upstream_w1, + grad_bary_upstream_w2); + + const vec3 bary0 = + BarycentricCoordinatesForward(pxy, v0xy, v1xy, v2xy); + const vec3 bary = !perspective_correct + ? bary0 + : BarycentricPerspectiveCorrectionForward(bary0, z0, z1, z2); + const vec3 bary_clip = + !clip_barycentric_coords ? bary : BarycentricClipForward(bary); + + // Distances inside the face are negative so get the + // correct sign to apply to the upstream gradient. + const bool inside = bary.x > 0.0f && bary.y > 0.0f && bary.z > 0.0f; + const float sign = inside ? -1.0f : 1.0f; + + const auto grad_dist_f = PointTriangleDistanceBackward( + pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); + const auto ddist_d_v0 = std::get<1>(grad_dist_f); + const auto ddist_d_v1 = std::get<2>(grad_dist_f); + const auto ddist_d_v2 = std::get<3>(grad_dist_f); + + // Upstream gradient for barycentric coords from zbuf calculation: + // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 + // Therefore + // d_zbuf/d_bary_w0 = z0 + // d_zbuf/d_bary_w1 = z1 + // d_zbuf/d_bary_w2 = z2 + const vec3 d_zbuf_d_baryclip(z0, z1, z2); + + // Total upstream barycentric gradients are the sum of + // external upstream gradients and contribution from zbuf. + const vec3 grad_bary_f_sum = + (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_baryclip); + + vec3 grad_bary0 = grad_bary_f_sum; + + if (clip_barycentric_coords) { + grad_bary0 = BarycentricClipBackward(bary, grad_bary0); + } + + if (perspective_correct) { + auto perspective_grads = BarycentricPerspectiveCorrectionBackward( + bary0, z0, z1, z2, grad_bary0); + grad_bary0 = std::get<0>(perspective_grads); + grad_face_verts[f][0][2] += std::get<1>(perspective_grads); + grad_face_verts[f][1][2] += std::get<2>(perspective_grads); + grad_face_verts[f][2][2] += std::get<3>(perspective_grads); + } + + auto grad_bary_f = + BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); + const vec2 dbary_d_v0 = std::get<1>(grad_bary_f); + const vec2 dbary_d_v1 = std::get<2>(grad_bary_f); + const vec2 dbary_d_v2 = std::get<3>(grad_bary_f); + + // Update output gradient buffer. + grad_face_verts[f][0][0] += dbary_d_v0.x + ddist_d_v0.x; + grad_face_verts[f][0][1] += dbary_d_v0.y + ddist_d_v0.y; + grad_face_verts[f][0][2] += grad_zbuf_upstream * bary_clip.x; + grad_face_verts[f][1][0] += dbary_d_v1.x + ddist_d_v1.x; + grad_face_verts[f][1][1] += dbary_d_v1.y + ddist_d_v1.y; + grad_face_verts[f][1][2] += grad_zbuf_upstream * bary_clip.y; + grad_face_verts[f][2][0] += dbary_d_v2.x + ddist_d_v2.x; + grad_face_verts[f][2][1] += dbary_d_v2.y + ddist_d_v2.y; + grad_face_verts[f][2][2] += grad_zbuf_upstream * bary_clip.z; + } + } + } + } + return grad_face_verts; +} + +torch::Tensor RasterizeMeshesCoarseCpu( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int max_faces_per_bin) { + if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 || + face_verts.size(2) != 3) { + AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)"); + } + if (num_faces_per_mesh.ndimension() != 1) { + AT_ERROR("num_faces_per_mesh can only have one dimension"); + } + + const int N = num_faces_per_mesh.size(0); // batch size. + const int M = max_faces_per_bin; + + const float H = std::get<0>(image_size); + const float W = std::get<1>(image_size); + + // Integer division round up. + const int BH = 1 + (H - 1) / bin_size; + const int BW = 1 + (W - 1) / bin_size; + + auto opts = num_faces_per_mesh.options().dtype(torch::kInt32); + torch::Tensor faces_per_bin = torch::zeros({N, BH, BW}, opts); + torch::Tensor bin_faces = torch::full({N, BH, BW, M}, -1, opts); + auto bin_faces_a = bin_faces.accessor(); + + // Precompute all face bounding boxes. + auto face_bboxes = ComputeFaceBoundingBoxes(face_verts); + auto face_bboxes_a = face_bboxes.accessor(); + + const float ndc_x_range = NonSquareNdcRange(W, H); + const float pixel_width_x = ndc_x_range / W; + const float bin_width_x = pixel_width_x * bin_size; + + const float ndc_y_range = NonSquareNdcRange(H, W); + const float pixel_width_y = ndc_y_range / H; + const float bin_width_y = pixel_width_y * bin_size; + + // Iterate through the meshes in the batch. + for (int n = 0; n < N; ++n) { + const int face_start_idx = mesh_to_face_first_idx[n].item().to(); + const int face_stop_idx = + (face_start_idx + num_faces_per_mesh[n].item().to()); + + float bin_y_min = -1.0f; + float bin_y_max = bin_y_min + bin_width_y; + + // Iterate through the horizontal bins from top to bottom. + for (int by = 0; by < BH; ++by) { + float bin_x_min = -1.0f; + float bin_x_max = bin_x_min + bin_width_x; + + // Iterate through bins on this horizontal line, left to right. + for (int bx = 0; bx < BW; ++bx) { + int32_t faces_hit = 0; + + for (int32_t f = face_start_idx; f < face_stop_idx; ++f) { + // Get bounding box and expand by blur radius. + float face_x_min = face_bboxes_a[f][0] - std::sqrt(blur_radius); + float face_y_min = face_bboxes_a[f][1] - std::sqrt(blur_radius); + float face_x_max = face_bboxes_a[f][2] + std::sqrt(blur_radius); + float face_y_max = face_bboxes_a[f][3] + std::sqrt(blur_radius); + float face_z_min = face_bboxes_a[f][4]; + + // Faces with at least one vertex behind the camera won't render + // correctly and should be removed or clipped before calling the + // rasterizer + if (face_z_min < kEpsilon) { + continue; + } + + // Use a half-open interval so that faces exactly on the + // boundary between bins will fall into exactly one bin. + bool x_overlap = + (face_x_min <= bin_x_max) && (bin_x_min < face_x_max); + bool y_overlap = + (face_y_min <= bin_y_max) && (bin_y_min < face_y_max); + + if (x_overlap && y_overlap) { + // Got too many faces for this bin, so throw an error. + if (faces_hit >= max_faces_per_bin) { + AT_ERROR("Got too many faces per bin"); + } + // The current point falls in the current bin, so + // record it. + bin_faces_a[n][by][bx][faces_hit] = f; + faces_hit++; + } + } + + // Shift the bin to the right for the next loop iteration + bin_x_min = bin_x_max; + bin_x_max = bin_x_min + bin_width_x; + } + // Shift the bin down for the next loop iteration + bin_y_min = bin_y_max; + bin_y_max = bin_y_min + bin_width_y; + } + } + return bin_faces; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf.cu new file mode 100644 index 0000000000000000000000000000000000000000..885313ac3c8aaba173c4691a1189ea43ddd39f1e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf.cu @@ -0,0 +1,153 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +// There is no intermediate memory, so no reason not to have blocksize=32. +// 256 is a reasonable number of blocks. + +// DESIGN +// We exploit the fact that n_samples is not tiny. +// A chunk of work is T*blocksize many samples from +// a single batch elememt. +// For each batch element there will be +// chunks_per_batch = 1 + (n_samples-1)/(T*blocksize) of them. +// The number of potential chunks to do is +// n_chunks = chunks_per_batch * n_batches. +// These chunks are divided among the gridSize-many blocks. +// In block b, we work on chunks b, b+gridSize, b+2*gridSize etc . +// In chunk i, we work on batch_element i/chunks_per_batch +// on samples starting from (i%chunks_per_batch) * (T*blocksize) + +// BEGIN HYPOTHETICAL +// Another option (not implemented) if batch_size was always large +// would be as follows. + +// A chunk of work is S samples from each of blocksize-many +// batch elements. +// For each batch element there will be +// chunks_per_batch = (1+(n_samples-1)/S) of them. +// The number of potential chunks to do is +// n_chunks = chunks_per_batch * (1+(n_batches-1)/blocksize) +// These chunks are divided among the gridSize-many blocks. +// In block b, we work on chunks b, b+gridSize, b+2*gridSize etc . +// In chunk i, we work on samples starting from S*(i%chunks_per_batch) +// on batch elements starting from blocksize*(i/chunks_per_batch). +// END HYPOTHETICAL + +__global__ void SamplePdfCudaKernel( + const float* __restrict__ bins, + const float* __restrict__ weights, + float* __restrict__ outputs, + float eps, + const int T, + const int64_t batch_size, + const int64_t n_bins, + const int64_t n_samples) { + const int64_t chunks_per_batch = 1 + (n_samples - 1) / (T * blockDim.x); + const int64_t n_chunks = chunks_per_batch * batch_size; + + for (int64_t i_chunk = blockIdx.x; i_chunk < n_chunks; i_chunk += gridDim.x) { + // Loop over the chunks. + int64_t i_batch_element = i_chunk / chunks_per_batch; + int64_t sample_start = (i_chunk % chunks_per_batch) * (T * blockDim.x); + const float* const weight_startp = weights + n_bins * i_batch_element; + const float* const bin_startp = bins + (1 + n_bins) * i_batch_element; + + // Each chunk looks at a single batch element, so we do the preprocessing + // which depends on the batch element, namely finding the total weight. + // Idenntical work is being done in sync here by every thread of the block. + float total_weight = eps; + for (int64_t i_bin = 0; i_bin < n_bins; ++i_bin) { + total_weight += weight_startp[i_bin]; + } + + float* const output_startp = + outputs + n_samples * i_batch_element + sample_start; + + for (int t = 0; t < T; ++t) { + // Loop over T, which is the number of samples each thread makes within + // the chunk. + const int64_t i_sample_within_chunk = threadIdx.x + t * blockDim.x; + if (sample_start + i_sample_within_chunk >= n_samples) { + // Some threads need to exit early because the sample they would + // make is unwanted. + continue; + } + // output_startp[i_sample_within_chunk] contains the quantile we (i.e. + // this thread) are calcvulating. + float uniform = total_weight * output_startp[i_sample_within_chunk]; + int64_t i_bin = 0; + // We find the bin containing the quantile by walking along the weights. + // This loop must be thread dependent. I.e. the whole warp will wait until + // every thread has found the bin for its quantile. + // It may be best to write it differently. + while (i_bin + 1 < n_bins && uniform > weight_startp[i_bin]) { + uniform -= weight_startp[i_bin]; + ++i_bin; + } + + // Now we know which bin to look in, we use linear interpolation + // to find the location of the quantile within the bin, and + // write the answer back. + float bin_start = bin_startp[i_bin]; + float bin_end = bin_startp[i_bin + 1]; + float bin_weight = weight_startp[i_bin]; + float output_value = bin_start; + if (uniform > bin_weight) { + output_value = bin_end; + } else if (bin_weight > eps) { + output_value += (uniform / bin_weight) * (bin_end - bin_start); + } + output_startp[i_sample_within_chunk] = output_value; + } + } +} + +void SamplePdfCuda( + const at::Tensor& bins, + const at::Tensor& weights, + const at::Tensor& outputs, + float eps) { + // Check inputs are on the same device + at::TensorArg bins_t{bins, "bins", 1}, weights_t{weights, "weights", 2}, + outputs_t{outputs, "outputs", 3}; + at::CheckedFrom c = "SamplePdfCuda"; + at::checkAllSameGPU(c, {bins_t, weights_t, outputs_t}); + at::checkAllSameType(c, {bins_t, weights_t, outputs_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(bins.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int64_t batch_size = bins.size(0); + const int64_t n_bins = weights.size(1); + const int64_t n_samples = outputs.size(1); + + const int64_t threads = 32; + const int64_t T = n_samples <= threads ? 1 : 2; + const int64_t chunks_per_batch = 1 + (n_samples - 1) / (T * threads); + const int64_t n_chunks = chunks_per_batch * batch_size; + + const int64_t max_blocks = 1024; + const int64_t blocks = n_chunks < max_blocks ? n_chunks : max_blocks; + + SamplePdfCudaKernel<<>>( + bins.contiguous().data_ptr(), + weights.contiguous().data_ptr(), + outputs.data_ptr(), // Checked contiguous in header file. + eps, + T, + batch_size, + n_bins, + n_samples); + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf.h new file mode 100644 index 0000000000000000000000000000000000000000..899117df797cf03c3c207e6205d1607dd3707f3d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include +#include +#include "utils/pytorch3d_cutils.h" + +// **************************************************************************** +// * SamplePdf * +// **************************************************************************** + +// Samples a probability density functions defined by bin edges `bins` and +// the non-negative per-bin probabilities `weights`. + +// Args: +// bins: FloatTensor of shape `(batch_size, n_bins+1)` denoting the edges +// of the sampling bins. + +// weights: FloatTensor of shape `(batch_size, n_bins)` containing +// non-negative numbers representing the probability of sampling the +// corresponding bin. + +// uniforms: The quantiles to draw, FloatTensor of shape +// `(batch_size, n_samples)`. + +// outputs: On call, this contains the quantiles to draw. It is overwritten +// with the drawn samples. FloatTensor of shape +// `(batch_size, n_samples), where `n_samples are drawn from each +// distribution. + +// eps: A constant preventing division by zero in case empty bins are +// present. + +// Not differentiable + +#ifdef WITH_CUDA +void SamplePdfCuda( + const torch::Tensor& bins, + const torch::Tensor& weights, + const torch::Tensor& outputs, + float eps); +#endif + +void SamplePdfCpu( + const torch::Tensor& bins, + const torch::Tensor& weights, + const torch::Tensor& outputs, + float eps); + +inline void SamplePdf( + const torch::Tensor& bins, + const torch::Tensor& weights, + const torch::Tensor& outputs, + float eps) { + if (bins.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(weights); + CHECK_CONTIGUOUS_CUDA(outputs); + torch::autograd::increment_version(outputs); + SamplePdfCuda(bins, weights, outputs, eps); + return; +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CONTIGUOUS(outputs); + SamplePdfCpu(bins, weights, outputs, eps); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..272197c6e9d5f8ee19153004310fb0fd8e10b94b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf_cpu.cpp @@ -0,0 +1,142 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include + +// If the number of bins is the typical 64, it is +// quicker to use binary search than linear scan. +// With more bins, it is more important. +// There is no equivalent CUDA implementation yet. +#define USE_BINARY_SEARCH + +namespace { +// This worker function does the job of SamplePdf but only on +// batch elements in [start_batch, end_batch). +void SamplePdfCpu_worker( + const torch::Tensor& bins, + const torch::Tensor& weights, + const torch::Tensor& outputs, + float eps, + int64_t start_batch, + int64_t end_batch) { + const int64_t n_bins = weights.size(1); + const int64_t n_samples = outputs.size(1); + + auto bins_a = bins.accessor(); + auto weights_a = weights.accessor(); + float* output_p = outputs.data_ptr() + start_batch * n_samples; + +#ifdef USE_BINARY_SEARCH + std::vector partial_sums(n_bins); +#endif + + for (int64_t i_batch_elt = start_batch; i_batch_elt < end_batch; + ++i_batch_elt) { + auto bin_a = bins_a[i_batch_elt]; + auto weight_a = weights_a[i_batch_elt]; + + // Here we do the work which has to be done once per batch element. + // i.e. (1) finding the total weight. (2) If using binary search, + // precompute the partial sums of the weights. + + float total_weight = 0; + for (int64_t i_bin = 0; i_bin < n_bins; ++i_bin) { + total_weight += weight_a[i_bin]; +#ifdef USE_BINARY_SEARCH + partial_sums[i_bin] = total_weight; +#endif + } + total_weight += eps; + + for (int64_t i_sample = 0; i_sample < n_samples; ++i_sample) { + // Here we are taking a single random quantile (which is stored + // in *output_p) and using it to make a single sample, which we + // write back to the same location. First we find which bin + // the quantile lives in, either by binary search in the + // precomputed partial sums, or by scanning through the weights. + + float uniform = total_weight * *output_p; +#ifdef USE_BINARY_SEARCH + int64_t i_bin = std::lower_bound( + partial_sums.begin(), --partial_sums.end(), uniform) - + partial_sums.begin(); + if (i_bin > 0) { + uniform -= partial_sums[i_bin - 1]; + } +#else + int64_t i_bin = 0; + while (i_bin + 1 < n_bins && uniform > weight_a[i_bin]) { + uniform -= weight_a[i_bin]; + ++i_bin; + } +#endif + + // Now i_bin identifies the bin the quantile lives in, we use + // straight line interpolation to find the position of the + // quantile within the bin, and write it to *output_p. + + float bin_start = bin_a[i_bin]; + float bin_end = bin_a[i_bin + 1]; + float bin_weight = weight_a[i_bin]; + float output_value = bin_start; + if (uniform > bin_weight) { + output_value = bin_end; + } else if (bin_weight > eps) { + output_value += (uniform / bin_weight) * (bin_end - bin_start); + } + *output_p = output_value; + ++output_p; + } + } +} + +} // anonymous namespace + +void SamplePdfCpu( + const torch::Tensor& bins, + const torch::Tensor& weights, + const torch::Tensor& outputs, + float eps) { + const int64_t batch_size = bins.size(0); + const int64_t max_threads = std::min(4, at::get_num_threads()); + const int64_t n_threads = std::min(max_threads, batch_size); + if (batch_size == 0) { + return; + } + + // SamplePdfCpu_worker does the work of this function. We send separate ranges + // of batch elements to that function in nThreads-1 separate threads. + + std::vector threads; + threads.reserve(n_threads - 1); + const int64_t batch_elements_per_thread = 1 + (batch_size - 1) / n_threads; + int64_t start_batch = 0; + for (int iThread = 0; iThread < n_threads - 1; ++iThread) { + threads.emplace_back( + SamplePdfCpu_worker, + bins, + weights, + outputs, + eps, + start_batch, + start_batch + batch_elements_per_thread); + start_batch += batch_elements_per_thread; + } + + // The remaining batch elements are calculated in this threads. If nThreads is + // 1 then all the work happens in this line. + SamplePdfCpu_worker(bins, weights, outputs, eps, start_batch, batch_size); + for (auto&& thread : threads) { + thread.join(); + } + torch::autograd::increment_version(outputs); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/data_loader_map_provider.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/data_loader_map_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..1e33887b7b295630adeebb200f75fe2a5db4a6df --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/data_loader_map_provider.py @@ -0,0 +1,526 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from dataclasses import dataclass +from enum import Enum +from typing import Iterator, List, Optional, Tuple + +import torch +from pytorch3d.implicitron.tools.config import registry, ReplaceableBase +from torch.utils.data import ( + BatchSampler, + ConcatDataset, + DataLoader, + RandomSampler, + Sampler, +) + +from .dataset_base import DatasetBase +from .dataset_map_provider import DatasetMap +from .frame_data import FrameData +from .scene_batch_sampler import SceneBatchSampler +from .utils import is_known_frame_scalar + + +@dataclass +class DataLoaderMap: + """ + A collection of data loaders for Implicitron. + + Members: + + train: a data loader for training + val: a data loader for validating during training + test: a data loader for final evaluation + """ + + train: Optional[DataLoader[FrameData]] + val: Optional[DataLoader[FrameData]] + test: Optional[DataLoader[FrameData]] + + def __getitem__(self, split: str) -> Optional[DataLoader[FrameData]]: + """ + Get one of the data loaders by key (name of data split) + """ + if split not in ["train", "val", "test"]: + raise ValueError(f"{split} was not a valid split name (train/val/test)") + return getattr(self, split) + + +class DataLoaderMapProviderBase(ReplaceableBase): + """ + Provider of a collection of data loaders for a given collection of datasets. + """ + + def get_data_loader_map(self, datasets: DatasetMap) -> DataLoaderMap: + """ + Returns a collection of data loaders for a given collection of datasets. + """ + raise NotImplementedError() + + +@registry.register +class SimpleDataLoaderMapProvider(DataLoaderMapProviderBase): + """ + Trivial implementation of DataLoaderMapProviderBase. + + If a dataset returns batches from get_eval_batches(), then + they will be what the corresponding dataloader returns, + independently of any of the fields on this class. + + Otherwise, returns shuffled batches. + """ + + batch_size: int = 1 + num_workers: int = 0 + dataset_length_train: int = 0 + dataset_length_val: int = 0 + dataset_length_test: int = 0 + + def get_data_loader_map(self, datasets: DatasetMap) -> DataLoaderMap: + """ + Returns a collection of data loaders for a given collection of datasets. + """ + return DataLoaderMap( + train=self._make_data_loader( + datasets.train, + self.dataset_length_train, + ), + val=self._make_data_loader( + datasets.val, + self.dataset_length_val, + ), + test=self._make_data_loader( + datasets.test, + self.dataset_length_test, + ), + ) + + def _make_data_loader( + self, + dataset: Optional[DatasetBase], + num_batches: int, + ) -> Optional[DataLoader[FrameData]]: + """ + Returns the dataloader for a dataset. + + Args: + dataset: the dataset + num_batches: possible ceiling on number of batches per epoch + """ + if dataset is None: + return None + + data_loader_kwargs = { + "num_workers": self.num_workers, + "collate_fn": dataset.frame_data_type.collate, + } + + eval_batches = dataset.get_eval_batches() + if eval_batches is not None: + return DataLoader( + dataset, + batch_sampler=eval_batches, + **data_loader_kwargs, + ) + + if num_batches > 0: + num_samples = self.batch_size * num_batches + else: + num_samples = None + + # sample with replacement only if a custom number of samples is specified + sampler = RandomSampler( + dataset, + replacement=num_samples is not None, + num_samples=num_samples, + ) + + batch_sampler = BatchSampler(sampler, self.batch_size, drop_last=True) + return DataLoader( + dataset, + batch_sampler=batch_sampler, + **data_loader_kwargs, + ) + + +class DoublePoolBatchSampler(Sampler[List[int]]): + """ + Batch sampler for making random batches of a single frame + from one list and a number of known frames from another list. + """ + + def __init__( + self, + first_indices: List[int], + rest_indices: List[int], + batch_size: int, + replacement: bool, + num_batches: Optional[int] = None, + ) -> None: + """ + Args: + first_indices: indexes of dataset items to use as the first element + of each batch. + rest_indices: indexes of dataset items to use as the subsequent + elements of each batch. Not used if batch_size==1. + batch_size: The common size of any batch. + replacement: Whether the sampling of first items is with replacement. + num_batches: The number of batches in an epoch. If 0 or None, + one epoch is the length of `first_indices`. + """ + self.first_indices = first_indices + self.rest_indices = rest_indices + self.batch_size = batch_size + self.replacement = replacement + self.num_batches = None if num_batches == 0 else num_batches + + if batch_size - 1 > len(rest_indices): + raise ValueError( + f"Cannot make up ({batch_size})-batches from {len(self.rest_indices)}" + ) + + # copied from RandomSampler + seed = int(torch.empty((), dtype=torch.int64).random_().item()) + self.generator = torch.Generator() + self.generator.manual_seed(seed) + + def __len__(self) -> int: + if self.num_batches is not None: + return self.num_batches + return len(self.first_indices) + + def __iter__(self) -> Iterator[List[int]]: + num_batches = self.num_batches + if self.replacement: + i_first = torch.randint( + len(self.first_indices), + size=(len(self),), + generator=self.generator, + ) + elif num_batches is not None: + n_copies = 1 + (num_batches - 1) // len(self.first_indices) + raw_indices = [ + torch.randperm(len(self.first_indices), generator=self.generator) + for _ in range(n_copies) + ] + i_first = torch.cat(raw_indices)[:num_batches] + else: + i_first = torch.randperm(len(self.first_indices), generator=self.generator) + first_indices = [self.first_indices[i] for i in i_first] + + if self.batch_size == 1: + for first_index in first_indices: + yield [first_index] + return + + for first_index in first_indices: + # Consider using this class in a program which sets the seed. This use + # of randperm means that rerunning with a higher batch_size + # results in batches whose first elements as the first run. + i_rest = torch.randperm( + len(self.rest_indices), + generator=self.generator, + )[: self.batch_size - 1] + yield [first_index] + [self.rest_indices[i] for i in i_rest] + + +class BatchConditioningType(Enum): + """ + Ways to add conditioning frames for the val and test batches. + + SAME: Use the corresponding dataset for all elements of val batches + without regard to frame type. + TRAIN: Use the corresponding dataset for the first element of each + batch, and the training dataset for the extra conditioning + elements. No regard to frame type. + KNOWN: Use frames from the corresponding dataset but separate them + according to their frame_type. Each batch will contain one UNSEEN + frame followed by many KNOWN frames. + """ + + SAME = "same" + TRAIN = "train" + KNOWN = "known" + + +@registry.register +class SequenceDataLoaderMapProvider(DataLoaderMapProviderBase): + """ + Default implementation of DataLoaderMapProviderBase. + + If a dataset returns batches from get_eval_batches(), then + they will be what the corresponding dataloader returns, + independently of any of the fields on this class. + + If conditioning is not required, then the batch size should + be set as 1, and most of the fields do not matter. + + If conditioning is required, each batch will contain one main + frame first to predict and the, rest of the elements are for + conditioning. + + If images_per_seq_options is left empty, the conditioning + frames are picked according to the conditioning type given. + This does not have regard to the order of frames in a + scene, or which frames belong to what scene. + + If images_per_seq_options is given, then the conditioning types + must be SAME and the remaining fields are used. + + Members: + batch_size: The size of the batch of the data loader. + num_workers: Number of data-loading threads in each data loader. + dataset_length_train: The number of batches in a training epoch. Or 0 to mean + an epoch is the length of the training set. + dataset_length_val: The number of batches in a validation epoch. Or 0 to mean + an epoch is the length of the validation set. + dataset_length_test: The number of batches in a testing epoch. Or 0 to mean + an epoch is the length of the test set. + train_conditioning_type: Whether the train data loader should use + only known frames for conditioning. + Only used if batch_size>1 and train dataset is + present and does not return eval_batches. + val_conditioning_type: Whether the val data loader should use + training frames or known frames for conditioning. + Only used if batch_size>1 and val dataset is + present and does not return eval_batches. + test_conditioning_type: Whether the test data loader should use + training frames or known frames for conditioning. + Only used if batch_size>1 and test dataset is + present and does not return eval_batches. + images_per_seq_options: Possible numbers of frames sampled per sequence in a batch. + If a conditioning_type is KNOWN or TRAIN, then this must be left at its initial + value. Empty (the default) means that we are not careful about which frames + come from which scene. + sample_consecutive_frames: if True, will sample a contiguous interval of frames + in the sequence. It first sorts the frames by timestimps when available, + otherwise by frame numbers, finds the connected segments within the sequence + of sufficient length, then samples a random pivot element among them and + ideally uses it as a middle of the temporal window, shifting the borders + where necessary. This strategy mitigates the bias against shorter segments + and their boundaries. + consecutive_frames_max_gap: if a number > 0, then used to define the maximum + difference in frame_number of neighbouring frames when forming connected + segments; if both this and consecutive_frames_max_gap_seconds are 0s, + the whole sequence is considered a segment regardless of frame numbers. + consecutive_frames_max_gap_seconds: if a number > 0.0, then used to define the + maximum difference in frame_timestamp of neighbouring frames when forming + connected segments; if both this and consecutive_frames_max_gap are 0s, + the whole sequence is considered a segment regardless of frame timestamps. + """ + + batch_size: int = 1 + num_workers: int = 0 + dataset_length_train: int = 0 + dataset_length_val: int = 0 + dataset_length_test: int = 0 + train_conditioning_type: BatchConditioningType = BatchConditioningType.SAME + val_conditioning_type: BatchConditioningType = BatchConditioningType.SAME + test_conditioning_type: BatchConditioningType = BatchConditioningType.KNOWN + images_per_seq_options: Tuple[int, ...] = () + sample_consecutive_frames: bool = False + consecutive_frames_max_gap: int = 0 + consecutive_frames_max_gap_seconds: float = 0.1 + + def get_data_loader_map(self, datasets: DatasetMap) -> DataLoaderMap: + """ + Returns a collection of data loaders for a given collection of datasets. + """ + return DataLoaderMap( + train=self._make_data_loader( + datasets.train, + self.dataset_length_train, + datasets.train, + self.train_conditioning_type, + ), + val=self._make_data_loader( + datasets.val, + self.dataset_length_val, + datasets.train, + self.val_conditioning_type, + ), + test=self._make_data_loader( + datasets.test, + self.dataset_length_test, + datasets.train, + self.test_conditioning_type, + ), + ) + + def _make_data_loader( + self, + dataset: Optional[DatasetBase], + num_batches: int, + train_dataset: Optional[DatasetBase], + conditioning_type: BatchConditioningType, + ) -> Optional[DataLoader[FrameData]]: + """ + Returns the dataloader for a dataset. + + Args: + dataset: the dataset + num_batches: possible ceiling on number of batches per epoch + train_dataset: the training dataset, used if conditioning_type==TRAIN + conditioning_type: source for padding of batches + """ + if dataset is None: + return None + + data_loader_kwargs = { + "num_workers": self.num_workers, + "collate_fn": dataset.frame_data_type.collate, + } + + eval_batches = dataset.get_eval_batches() + if eval_batches is not None: + return DataLoader( + dataset, + batch_sampler=eval_batches, + **data_loader_kwargs, + ) + + scenes_matter = len(self.images_per_seq_options) > 0 + if scenes_matter and conditioning_type != BatchConditioningType.SAME: + raise ValueError( + f"{conditioning_type} cannot be used with images_per_seq " + + str(self.images_per_seq_options) + ) + + if self.batch_size == 1 or ( + not scenes_matter and conditioning_type == BatchConditioningType.SAME + ): + return self._simple_loader(dataset, num_batches, data_loader_kwargs) + + if scenes_matter: + assert conditioning_type == BatchConditioningType.SAME + batch_sampler = SceneBatchSampler( + dataset, + self.batch_size, + num_batches=len(dataset) if num_batches <= 0 else num_batches, + images_per_seq_options=self.images_per_seq_options, + sample_consecutive_frames=self.sample_consecutive_frames, + consecutive_frames_max_gap=self.consecutive_frames_max_gap, + consecutive_frames_max_gap_seconds=self.consecutive_frames_max_gap_seconds, + ) + return DataLoader( + dataset, + batch_sampler=batch_sampler, + **data_loader_kwargs, + ) + + if conditioning_type == BatchConditioningType.TRAIN: + return self._train_loader( + dataset, train_dataset, num_batches, data_loader_kwargs + ) + + assert conditioning_type == BatchConditioningType.KNOWN + return self._known_loader(dataset, num_batches, data_loader_kwargs) + + def _simple_loader( + self, + dataset: DatasetBase, + num_batches: int, + data_loader_kwargs: dict, + ) -> DataLoader[FrameData]: + """ + Return a simple loader for frames in the dataset. + + This is equivalent to + Dataloader(dataset, batch_size=self.batch_size, **data_loader_kwargs) + except that num_batches is fixed. + + Args: + dataset: the dataset + num_batches: possible ceiling on number of batches per epoch + data_loader_kwargs: common args for dataloader + """ + if num_batches > 0: + num_samples = self.batch_size * num_batches + replacement = True + else: + num_samples = None + replacement = False + sampler = RandomSampler( + dataset, replacement=replacement, num_samples=num_samples + ) + batch_sampler = BatchSampler(sampler, self.batch_size, drop_last=True) + return DataLoader( + dataset, + batch_sampler=batch_sampler, + **data_loader_kwargs, + ) + + def _train_loader( + self, + dataset: DatasetBase, + train_dataset: Optional[DatasetBase], + num_batches: int, + data_loader_kwargs: dict, + ) -> DataLoader[FrameData]: + """ + Return the loader for TRAIN conditioning. + + Args: + dataset: the dataset + train_dataset: the training dataset + num_batches: possible ceiling on number of batches per epoch + data_loader_kwargs: common args for dataloader + """ + if train_dataset is None: + raise ValueError("No training data for conditioning.") + length = len(dataset) + first_indices = list(range(length)) + rest_indices = list(range(length, length + len(train_dataset))) + sampler = DoublePoolBatchSampler( + first_indices=first_indices, + rest_indices=rest_indices, + batch_size=self.batch_size, + replacement=True, + num_batches=num_batches, + ) + return DataLoader( + ConcatDataset([dataset, train_dataset]), + batch_sampler=sampler, + **data_loader_kwargs, + ) + + def _known_loader( + self, + dataset: DatasetBase, + num_batches: int, + data_loader_kwargs: dict, + ) -> DataLoader[FrameData]: + """ + Return the loader for KNOWN conditioning. + + Args: + dataset: the dataset + num_batches: possible ceiling on number of batches per epoch + data_loader_kwargs: common args for dataloader + """ + first_indices, rest_indices = [], [] + for idx in range(len(dataset)): + frame_type = dataset[idx].frame_type + assert isinstance(frame_type, str) + if is_known_frame_scalar(frame_type): + rest_indices.append(idx) + else: + first_indices.append(idx) + sampler = DoublePoolBatchSampler( + first_indices=first_indices, + rest_indices=rest_indices, + batch_size=self.batch_size, + replacement=True, + num_batches=num_batches, + ) + return DataLoader( + dataset, + batch_sampler=sampler, + **data_loader_kwargs, + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/data_source.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/data_source.py new file mode 100644 index 0000000000000000000000000000000000000000..d282bd3c90af1c3bb2043238ae6280c6cb12c861 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/data_source.py @@ -0,0 +1,111 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Optional, Tuple + +from pytorch3d.implicitron.tools.config import ( + registry, + ReplaceableBase, + run_auto_creation, +) +from pytorch3d.renderer.cameras import CamerasBase + +from .data_loader_map_provider import DataLoaderMap, DataLoaderMapProviderBase +from .dataset_map_provider import DatasetMap, DatasetMapProviderBase + + +class DataSourceBase(ReplaceableBase): + """ + Base class for a data source in Implicitron. It encapsulates Dataset + and DataLoader configuration. + """ + + def get_datasets_and_dataloaders(self) -> Tuple[DatasetMap, DataLoaderMap]: + raise NotImplementedError() + + @property + def all_train_cameras(self) -> Optional[CamerasBase]: + """ + DEPRECATED! The property will be removed in future versions. + If the data is all for a single scene, a list + of the known training cameras for that scene, which is + used for evaluating the viewpoint difficulty of the + unseen cameras. + """ + raise NotImplementedError() + + +@registry.register +class ImplicitronDataSource(DataSourceBase): + """ + Represents the data used in Implicitron. This is the only implementation + of DataSourceBase provided. + + Members: + dataset_map_provider_class_type: identifies type for dataset_map_provider. + e.g. JsonIndexDatasetMapProvider for Co3D. + data_loader_map_provider_class_type: identifies type for data_loader_map_provider. + """ + + # pyre-fixme[13]: Attribute `dataset_map_provider` is never initialized. + dataset_map_provider: DatasetMapProviderBase + # pyre-fixme[13]: Attribute `dataset_map_provider_class_type` is never initialized. + dataset_map_provider_class_type: str + # pyre-fixme[13]: Attribute `data_loader_map_provider` is never initialized. + data_loader_map_provider: DataLoaderMapProviderBase + data_loader_map_provider_class_type: str = "SequenceDataLoaderMapProvider" + + @classmethod + def pre_expand(cls) -> None: + # use try/finally to bypass cinder's lazy imports + try: + from .blender_dataset_map_provider import ( # noqa: F401 + BlenderDatasetMapProvider, + ) + from .json_index_dataset_map_provider import ( # noqa: F401 + JsonIndexDatasetMapProvider, + ) + from .json_index_dataset_map_provider_v2 import ( # noqa: F401 + JsonIndexDatasetMapProviderV2, + ) + from .llff_dataset_map_provider import LlffDatasetMapProvider # noqa: F401 + from .rendered_mesh_dataset_map_provider import ( # noqa: F401 + RenderedMeshDatasetMapProvider, + ) + from .train_eval_data_loader_provider import ( # noqa: F401 + TrainEvalDataLoaderMapProvider, + ) + + try: + from .sql_dataset_provider import ( # noqa: F401 # pyre-ignore + SqlIndexDatasetMapProvider, + ) + except ModuleNotFoundError: + pass # environment without SQL dataset + finally: + pass + + def __post_init__(self): + run_auto_creation(self) + self._all_train_cameras_cache: Optional[Tuple[Optional[CamerasBase]]] = None + + def get_datasets_and_dataloaders(self) -> Tuple[DatasetMap, DataLoaderMap]: + datasets = self.dataset_map_provider.get_dataset_map() + dataloaders = self.data_loader_map_provider.get_data_loader_map(datasets) + return datasets, dataloaders + + @property + def all_train_cameras(self) -> Optional[CamerasBase]: + """ + DEPRECATED! The property will be removed in future versions. + """ + if self._all_train_cameras_cache is None: # pyre-ignore[16] + all_train_cameras = self.dataset_map_provider.get_all_train_cameras() + self._all_train_cameras_cache = (all_train_cameras,) + + return self._all_train_cameras_cache[0] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/dataset_map_provider.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/dataset_map_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..7133b36421efce0051cec6ecf9a9c5d34cacdaa8 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/dataset_map_provider.py @@ -0,0 +1,142 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import logging +import os +from dataclasses import dataclass +from typing import Iterable, Iterator, Optional + +from iopath.common.file_io import PathManager +from pytorch3d.implicitron.tools.config import registry, ReplaceableBase +from pytorch3d.renderer.cameras import CamerasBase + +from .dataset_base import DatasetBase + + +@dataclass +class DatasetMap: + """ + A collection of datasets for implicitron. + + Members: + + train: a dataset for training + val: a dataset for validating during training + test: a dataset for final evaluation + """ + + train: Optional[DatasetBase] + val: Optional[DatasetBase] + test: Optional[DatasetBase] + + def __getitem__(self, split: str) -> Optional[DatasetBase]: + """ + Get one of the datasets by key (name of data split) + """ + if split not in ["train", "val", "test"]: + raise ValueError(f"{split} was not a valid split name (train/val/test)") + return getattr(self, split) + + def iter_datasets(self) -> Iterator[DatasetBase]: + """ + Iterator over all datasets. + """ + if self.train is not None: + yield self.train + if self.val is not None: + yield self.val + if self.test is not None: + yield self.test + + def join(self, other_dataset_maps: Iterable["DatasetMap"]) -> None: + """ + Joins the current DatasetMap with other dataset maps from the input list. + + For each subset of each dataset map (train/val/test), the function + omits joining the subsets that are None. + + Note the train/val/test datasets of the current dataset map will be + modified in-place. + + Args: + other_dataset_maps: The list of dataset maps to be joined into the + current dataset map. + """ + for set_ in ["train", "val", "test"]: + dataset_list = [ + getattr(self, set_), + *[getattr(dmap, set_) for dmap in other_dataset_maps], + ] + dataset_list = [d for d in dataset_list if d is not None] + if len(dataset_list) == 0: + setattr(self, set_, None) + continue + d0 = dataset_list[0] + if len(dataset_list) > 1: + d0.join(dataset_list[1:]) + setattr(self, set_, d0) + + +class DatasetMapProviderBase(ReplaceableBase): + """ + Base class for a provider of training / validation and testing + dataset objects. + """ + + def get_dataset_map(self) -> DatasetMap: + """ + Returns: + An object containing the torch.Dataset objects in train/val/test fields. + """ + raise NotImplementedError() + + def get_all_train_cameras(self) -> Optional[CamerasBase]: + """ + DEPRECATED! The function will be removed in future versions. + If the data is all for a single scene, returns a list + of the known training cameras for that scene, which is + used for evaluating the difficulty of the unknown + cameras. Otherwise return None. + """ + raise NotImplementedError() + + +@registry.register +class PathManagerFactory(ReplaceableBase): + """ + Base class and default implementation of a tool which dataset_map_provider implementations + may use to construct a path manager if needed. + + Args: + silence_logs: Whether to reduce log output from iopath library. + """ + + silence_logs: bool = True + + def get(self) -> Optional[PathManager]: + """ + Makes a PathManager if needed. + For open source users, this function should always return None. + Internally, this allows manifold access. + """ + if os.environ.get("INSIDE_RE_WORKER", False): + return None + + try: + from iopath.fb.manifold import ManifoldPathHandler + except ImportError: + return None + + if self.silence_logs: + logging.getLogger("iopath.fb.manifold").setLevel(logging.CRITICAL) + logging.getLogger("iopath.common.file_io").setLevel(logging.CRITICAL) + + path_manager = PathManager() + path_manager.register_handler(ManifoldPathHandler()) + + return path_manager diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/json_index_dataset_map_provider.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/json_index_dataset_map_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..0d4894d6be62c50c768d0b72669a038507be8d73 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/json_index_dataset_map_provider.py @@ -0,0 +1,323 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import json +import os +from typing import Dict, List, Optional, Tuple, Type + +from omegaconf import DictConfig +from pytorch3d.implicitron.tools.config import ( + expand_args_fields, + registry, + run_auto_creation, +) +from pytorch3d.renderer.cameras import CamerasBase + +from .dataset_map_provider import DatasetMap, DatasetMapProviderBase, PathManagerFactory +from .json_index_dataset import JsonIndexDataset + +from .utils import ( + DATASET_TYPE_KNOWN, + DATASET_TYPE_TEST, + DATASET_TYPE_TRAIN, + DATASET_TYPE_UNKNOWN, +) + + +# fmt: off +CO3D_CATEGORIES: List[str] = list(reversed([ + "baseballbat", "banana", "bicycle", "microwave", "tv", + "cellphone", "toilet", "hairdryer", "couch", "kite", "pizza", + "umbrella", "wineglass", "laptop", + "hotdog", "stopsign", "frisbee", "baseballglove", + "cup", "parkingmeter", "backpack", "toyplane", "toybus", + "handbag", "chair", "keyboard", "car", "motorcycle", + "carrot", "bottle", "sandwich", "remote", "bowl", "skateboard", + "toaster", "mouse", "toytrain", "book", "toytruck", + "orange", "broccoli", "plant", "teddybear", + "suitcase", "bench", "ball", "cake", + "vase", "hydrant", "apple", "donut", +])) +# fmt: on + +_CO3D_DATASET_ROOT: str = os.getenv("CO3D_DATASET_ROOT", "") + +# _NEED_CONTROL is a list of those elements of JsonIndexDataset which +# are not directly specified for it in the config but come from the +# DatasetMapProvider. +_NEED_CONTROL: Tuple[str, ...] = ( + "dataset_root", + "eval_batches", + "eval_batch_index", + "n_frames_per_sequence", + "path_manager", + "pick_sequence", + "subsets", + "frame_annotations_file", + "sequence_annotations_file", + "subset_lists_file", +) + + +@registry.register +class JsonIndexDatasetMapProvider(DatasetMapProviderBase): + """ + Generates the training / validation and testing dataset objects for + a dataset laid out on disk like Co3D, with annotations in json files. + + Args: + category: The object category of the dataset. + task_str: "multisequence" or "singlesequence". + dataset_root: The root folder of the dataset. + n_frames_per_sequence: Randomly sample #n_frames_per_sequence frames + in each sequence. + test_on_train: Construct validation and test datasets from + the training subset. + restrict_sequence_name: Restrict the dataset sequences to the ones + present in the given list of names. + test_restrict_sequence_id: The ID of the loaded sequence. + Active for task_str='singlesequence'. + assert_single_seq: Assert that only frames from a single sequence + are present in all generated datasets. + only_test_set: Load only the test set. + dataset_class_type: name of class (JsonIndexDataset or a subclass) + to use for the dataset. + dataset_X_args (e.g. dataset_JsonIndexDataset_args): arguments passed + to all the dataset constructors. + path_manager_factory: (Optional) An object that generates an instance of + PathManager that can translate provided file paths. + path_manager_factory_class_type: The class type of `path_manager_factory`. + """ + + # pyre-fixme[13]: Attribute `category` is never initialized. + category: str + task_str: str = "singlesequence" + dataset_root: str = _CO3D_DATASET_ROOT + n_frames_per_sequence: int = -1 + test_on_train: bool = False + restrict_sequence_name: Tuple[str, ...] = () + test_restrict_sequence_id: int = -1 + assert_single_seq: bool = False + only_test_set: bool = False + # pyre-fixme[13]: Attribute `dataset` is never initialized. + dataset: JsonIndexDataset + dataset_class_type: str = "JsonIndexDataset" + # pyre-fixme[13]: Attribute `path_manager_factory` is never initialized. + path_manager_factory: PathManagerFactory + path_manager_factory_class_type: str = "PathManagerFactory" + + @classmethod + def dataset_tweak_args(cls, type, args: DictConfig) -> None: + """ + Called by get_default_args(JsonIndexDatasetMapProvider) to + not expose certain fields of each dataset class. + """ + for key in _NEED_CONTROL: + del args[key] + + def create_dataset(self): + """ + Prevent the member named dataset from being created. + """ + return + + def __post_init__(self): + super().__init__() + run_auto_creation(self) + if self.only_test_set and self.test_on_train: + raise ValueError("Cannot have only_test_set and test_on_train") + + path_manager = self.path_manager_factory.get() + + # TODO: + # - implement loading multiple categories + + frame_file = os.path.join( + self.dataset_root, self.category, "frame_annotations.jgz" + ) + sequence_file = os.path.join( + self.dataset_root, self.category, "sequence_annotations.jgz" + ) + subset_lists_file = os.path.join( + self.dataset_root, self.category, "set_lists.json" + ) + common_kwargs = { + "dataset_root": self.dataset_root, + "path_manager": path_manager, + "frame_annotations_file": frame_file, + "sequence_annotations_file": sequence_file, + "subset_lists_file": subset_lists_file, + **getattr(self, f"dataset_{self.dataset_class_type}_args"), + } + + # This maps the common names of the dataset subsets ("train"/"val"/"test") + # to the names of the subsets in the CO3D dataset. + set_names_mapping = _get_co3d_set_names_mapping( + self.task_str, + self.test_on_train, + self.only_test_set, + ) + + # load the evaluation batches + batch_indices_path = os.path.join( + self.dataset_root, + self.category, + f"eval_batches_{self.task_str}.json", + ) + if path_manager is not None: + batch_indices_path = path_manager.get_local_path(batch_indices_path) + if not os.path.isfile(batch_indices_path): + # The batch indices file does not exist. + # Most probably the user has not specified the root folder. + raise ValueError( + f"Looking for batch indices in {batch_indices_path}. " + + "Please specify a correct dataset_root folder." + ) + + with open(batch_indices_path, "r") as f: + eval_batch_index = json.load(f) + restrict_sequence_name = self.restrict_sequence_name + + if self.task_str == "singlesequence": + if ( + self.test_restrict_sequence_id is None + or self.test_restrict_sequence_id < 0 + ): + raise ValueError( + "Please specify an integer id 'test_restrict_sequence_id'" + + " of the sequence considered for 'singlesequence'" + + " training and evaluation." + ) + if len(self.restrict_sequence_name) > 0: + raise ValueError( + "For the 'singlesequence' task, the restrict_sequence_name has" + " to be unset while test_restrict_sequence_id has to be set to an" + " integer defining the order of the evaluation sequence." + ) + # a sort-stable set() equivalent: + eval_batches_sequence_names = list( + {b[0][0]: None for b in eval_batch_index}.keys() + ) + eval_sequence_name = eval_batches_sequence_names[ + self.test_restrict_sequence_id + ] + eval_batch_index = [ + b for b in eval_batch_index if b[0][0] == eval_sequence_name + ] + # overwrite the restrict_sequence_name + restrict_sequence_name = [eval_sequence_name] + if len(restrict_sequence_name) > 0: + eval_batch_index = [ + b for b in eval_batch_index if b[0][0] in restrict_sequence_name + ] + + dataset_type: Type[JsonIndexDataset] = registry.get( + JsonIndexDataset, self.dataset_class_type + ) + expand_args_fields(dataset_type) + train_dataset = None + if not self.only_test_set: + train_dataset = dataset_type( + n_frames_per_sequence=self.n_frames_per_sequence, + subsets=set_names_mapping["train"], + pick_sequence=restrict_sequence_name, + **common_kwargs, + ) + if self.test_on_train: + assert train_dataset is not None + val_dataset = test_dataset = train_dataset + else: + val_dataset = dataset_type( + n_frames_per_sequence=-1, + subsets=set_names_mapping["val"], + pick_sequence=restrict_sequence_name, + **common_kwargs, + ) + test_dataset = dataset_type( + n_frames_per_sequence=-1, + subsets=set_names_mapping["test"], + pick_sequence=restrict_sequence_name, + eval_batch_index=eval_batch_index, + **common_kwargs, + ) + dataset_map = DatasetMap( + train=train_dataset, val=val_dataset, test=test_dataset + ) + + if self.assert_single_seq: + # check there's only one sequence in all datasets + sequence_names = { + sequence_name + for dset in dataset_map.iter_datasets() + for sequence_name in dset.sequence_names() + } + if len(sequence_names) > 1: + raise ValueError("Multiple sequences loaded but expected one") + + self.dataset_map = dataset_map + + def get_dataset_map(self) -> DatasetMap: + # pyre-ignore[16] + return self.dataset_map + + def get_all_train_cameras(self) -> Optional[CamerasBase]: + if self.task_str == "multisequence": + return None + + assert self.task_str == "singlesequence" + + # pyre-ignore[16] + train_dataset = self.dataset_map.train + assert isinstance(train_dataset, JsonIndexDataset) + return train_dataset.get_all_train_cameras() + + +def _get_co3d_set_names_mapping( + task_str: str, + test_on_train: bool, + only_test: bool, +) -> Dict[str, List[str]]: + """ + Returns the mapping of the common dataset subset names ("train"/"val"/"test") + to the names of the corresponding subsets in the CO3D dataset + ("test_known"/"test_unseen"/"train_known"/"train_unseen"). + + The keys returned will be + - train (if not only_test) + - val (if not test_on_train) + - test (if not test_on_train) + """ + single_seq = task_str == "singlesequence" + + if only_test: + set_names_mapping = {} + else: + set_names_mapping = { + "train": [ + (DATASET_TYPE_TEST if single_seq else DATASET_TYPE_TRAIN) + + "_" + + DATASET_TYPE_KNOWN + ] + } + if not test_on_train: + prefixes = [DATASET_TYPE_TEST] + if not single_seq: + prefixes.append(DATASET_TYPE_TRAIN) + set_names_mapping.update( + { + dset: [ + p + "_" + t + for p in prefixes + for t in [DATASET_TYPE_KNOWN, DATASET_TYPE_UNKNOWN] + ] + for dset in ["val", "test"] + } + ) + + return set_names_mapping diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/json_index_dataset_map_provider_v2.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/json_index_dataset_map_provider_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..765de1097db259ac7c8fe3bea531a881c6621fe3 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/json_index_dataset_map_provider_v2.py @@ -0,0 +1,483 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import copy +import json +import logging +import multiprocessing +import os +import warnings +from collections import defaultdict +from typing import Dict, List, Optional, Tuple, Type, Union + +import numpy as np +from iopath.common.file_io import PathManager + +from omegaconf import DictConfig +from pytorch3d.implicitron.dataset.dataset_map_provider import ( + DatasetMap, + DatasetMapProviderBase, + PathManagerFactory, +) +from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset +from pytorch3d.implicitron.tools.config import ( + expand_args_fields, + registry, + run_auto_creation, +) + +from pytorch3d.renderer.cameras import CamerasBase +from tqdm import tqdm + + +_CO3DV2_DATASET_ROOT: str = os.getenv("CO3DV2_DATASET_ROOT", "") + +# _NEED_CONTROL is a list of those elements of JsonIndexDataset which +# are not directly specified for it in the config but come from the +# DatasetMapProvider. +_NEED_CONTROL: Tuple[str, ...] = ( + "dataset_root", + "eval_batches", + "eval_batch_index", + "path_manager", + "subsets", + "frame_annotations_file", + "sequence_annotations_file", + "subset_lists_file", +) + +logger = logging.getLogger(__name__) + + +@registry.register +class JsonIndexDatasetMapProviderV2(DatasetMapProviderBase): + """ + Generates the training, validation, and testing dataset objects for + a dataset laid out on disk like CO3Dv2, with annotations in gzipped json files. + + The dataset is organized in the filesystem as follows:: + + self.dataset_root + ├── + │ ├── + │ │ ├── depth_masks + │ │ ├── depths + │ │ ├── images + │ │ ├── masks + │ │ └── pointcloud.ply + │ ├── + │ │ ├── depth_masks + │ │ ├── depths + │ │ ├── images + │ │ ├── masks + │ │ └── pointcloud.ply + │ ├── ... + │ ├── + │ ├── set_lists + │ ├── set_lists_.json + │ ├── set_lists_.json + │ ├── ... + │ ├── set_lists_.json + │ ├── eval_batches + │ │ ├── eval_batches_.json + │ │ ├── eval_batches_.json + │ │ ├── ... + │ │ ├── eval_batches_.json + │ ├── frame_annotations.jgz + │ ├── sequence_annotations.jgz + ├── + ├── ... + ├── + + The dataset contains sequences named `` from `K` categories with + names ``. Each category comprises sequence folders + `/` containing the list of sequence images, depth maps, + foreground masks, and valid-depth masks `images`, `depths`, `masks`, and `depth_masks` + respectively. Furthermore, `//set_lists/` stores `M` + json files `set_lists_.json`, each describing a certain sequence subset. + + Users specify the loaded dataset subset by setting `self.subset_name` to one of the + available subset names ``. + + `frame_annotations.jgz` and `sequence_annotations.jgz` are gzipped json files containing + the list of all frames and sequences of the given category stored as lists of + `FrameAnnotation` and `SequenceAnnotation` objects respectivelly. + + Each `set_lists_.json` file contains the following dictionary:: + + { + "train": [ + (sequence_name: str, frame_number: int, image_path: str), + ... + ], + "val": [ + (sequence_name: str, frame_number: int, image_path: str), + ... + ], + "test": [ + (sequence_name: str, frame_number: int, image_path: str), + ... + ], + ] + + defining the list of frames (identified with their `sequence_name` and `frame_number`) + in the "train", "val", and "test" subsets of the dataset. + Note that `frame_number` can be obtained only from `frame_annotations.jgz` and + does not necesarrily correspond to the numeric suffix of the corresponding image + file name (e.g. a file `//images/frame00005.jpg` can + have its frame number set to `20`, not 5). + + Each `eval_batches_.json` file contains a list of evaluation examples + in the following form:: + + [ + [ # batch 1 + (sequence_name: str, frame_number: int, image_path: str), + ... + ], + [ # batch 1 + (sequence_name: str, frame_number: int, image_path: str), + ... + ], + ] + + Note that the evaluation examples always come from the `"test"` subset of the dataset. + (test frames can repeat across batches). + + Args: + category: Dataset categories to load expressed as a string of comma-separated + category names (e.g. `"apple,car,orange"`). + subset_name: The name of the dataset subset. For CO3Dv2, these include + e.g. "manyview_dev_0", "fewview_test", ... + dataset_root: The root folder of the dataset. + test_on_train: Construct validation and test datasets from + the training subset. + only_test_set: Load only the test set. Incompatible with `test_on_train`. + load_eval_batches: Load the file containing eval batches pointing to the + test dataset. + n_known_frames_for_test: Add a certain number of known frames to each + eval batch. Useful for evaluating models that require + source views as input (e.g. NeRF-WCE / PixelNeRF). + dataset_args: Specifies additional arguments to the + JsonIndexDataset constructor call. + path_manager_factory: (Optional) An object that generates an instance of + PathManager that can translate provided file paths. + path_manager_factory_class_type: The class type of `path_manager_factory`. + """ + + # pyre-fixme[13]: Attribute `category` is never initialized. + category: str + # pyre-fixme[13]: Attribute `subset_name` is never initialized. + subset_name: str + dataset_root: str = _CO3DV2_DATASET_ROOT + + test_on_train: bool = False + only_test_set: bool = False + load_eval_batches: bool = True + num_load_workers: int = 4 + + n_known_frames_for_test: int = 0 + + dataset_class_type: str = "JsonIndexDataset" + # pyre-fixme[13]: Attribute `dataset` is never initialized. + dataset: JsonIndexDataset + + # pyre-fixme[13]: Attribute `path_manager_factory` is never initialized. + path_manager_factory: PathManagerFactory + path_manager_factory_class_type: str = "PathManagerFactory" + + def __post_init__(self): + super().__init__() + run_auto_creation(self) + + if self.only_test_set and self.test_on_train: + raise ValueError("Cannot have only_test_set and test_on_train") + + if "," in self.category: + # a comma-separated list of categories to load + categories = [c.strip() for c in self.category.split(",")] + logger.info(f"Loading a list of categories: {str(categories)}.") + with multiprocessing.Pool( + processes=min(self.num_load_workers, len(categories)) + ) as pool: + category_dataset_maps = list( + tqdm( + pool.imap(self._load_category, categories), + total=len(categories), + ) + ) + dataset_map = category_dataset_maps[0] + dataset_map.join(category_dataset_maps[1:]) + + else: + # one category to load + dataset_map = self._load_category(self.category) + + self.dataset_map = dataset_map + + def _load_category(self, category: str) -> DatasetMap: + + frame_file = os.path.join(self.dataset_root, category, "frame_annotations.jgz") + sequence_file = os.path.join( + self.dataset_root, category, "sequence_annotations.jgz" + ) + + path_manager = self.path_manager_factory.get() + + if path_manager is not None: + path_managed_frame_file = path_manager.get_local_path(frame_file) + else: + path_managed_frame_file = frame_file + if not os.path.isfile(path_managed_frame_file): + # The frame_file does not exist. + # Most probably the user has not specified the root folder. + raise ValueError( + f"Looking for frame annotations in {path_managed_frame_file}." + + " Please specify a correct dataset_root folder." + + " Note: By default the root folder is taken from the" + + " CO3DV2_DATASET_ROOT environment variable." + ) + + # setup the common dataset arguments + common_dataset_kwargs = getattr(self, f"dataset_{self.dataset_class_type}_args") + common_dataset_kwargs = { + **common_dataset_kwargs, + "dataset_root": self.dataset_root, + "frame_annotations_file": frame_file, + "sequence_annotations_file": sequence_file, + "subsets": None, + "subset_lists_file": "", + "path_manager": path_manager, + } + + # get the used dataset type + dataset_type: Type[JsonIndexDataset] = registry.get( + JsonIndexDataset, self.dataset_class_type + ) + expand_args_fields(dataset_type) + + dataset = dataset_type(**common_dataset_kwargs) + + available_subset_names = self._get_available_subset_names(category) + logger.debug(f"Available subset names: {str(available_subset_names)}.") + if self.subset_name not in available_subset_names: + raise ValueError( + f"Unknown subset name {self.subset_name}." + + f" Choose one of available subsets: {str(available_subset_names)}." + ) + + # load the list of train/val/test frames + subset_mapping = self._load_annotation_json( + os.path.join(category, "set_lists", f"set_lists_{self.subset_name}.json") + ) + + # load the evaluation batches + if self.load_eval_batches: + eval_batch_index = self._load_annotation_json( + os.path.join( + category, + "eval_batches", + f"eval_batches_{self.subset_name}.json", + ) + ) + else: + eval_batch_index = None + + train_dataset = None + if not self.only_test_set: + # load the training set + logger.debug("Extracting train dataset.") + train_dataset = dataset.subset_from_frame_index(subset_mapping["train"]) + logger.info(f"Train dataset: {str(train_dataset)}") + + if self.test_on_train: + assert train_dataset is not None + val_dataset = test_dataset = train_dataset + else: + # load the val and test sets + logger.debug("Extracting val dataset.") + val_dataset = dataset.subset_from_frame_index(subset_mapping["val"]) + logger.info(f"Val dataset: {str(val_dataset)}") + logger.debug("Extracting test dataset.") + + if (self.n_known_frames_for_test > 0) and self.load_eval_batches: + # extend the test subset mapping and the dataset with additional + # known views from the train dataset + ( + eval_batch_index, + subset_mapping["test"], + ) = self._extend_test_data_with_known_views( + subset_mapping, + eval_batch_index, + ) + + test_dataset = dataset.subset_from_frame_index(subset_mapping["test"]) + logger.info(f"Test dataset: {str(test_dataset)}") + if self.load_eval_batches: + # load the eval batches + logger.debug("Extracting eval batches.") + try: + test_dataset.eval_batches = ( + test_dataset.seq_frame_index_to_dataset_index( + eval_batch_index, + ) + ) + except IndexError: + warnings.warn( + "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@\n" + + "Some eval batches are missing from the test dataset.\n" + + "The evaluation results will be incomparable to the\n" + + "evaluation results calculated on the original dataset.\n" + + "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@" + ) + test_dataset.eval_batches = ( + test_dataset.seq_frame_index_to_dataset_index( + eval_batch_index, + allow_missing_indices=True, + remove_missing_indices=True, + ) + ) + logger.info(f"# eval batches: {len(test_dataset.eval_batches)}") + + return DatasetMap(train=train_dataset, val=val_dataset, test=test_dataset) + + @classmethod + def dataset_tweak_args(cls, type, args: DictConfig) -> None: + """ + Called by get_default_args(JsonIndexDatasetMapProviderV2) to + not expose certain fields of each dataset class. + """ + for key in _NEED_CONTROL: + del args[key] + + def create_dataset(self): + # The dataset object is created inside `self.get_dataset_map` + pass + + def get_dataset_map(self) -> DatasetMap: + return self.dataset_map # pyre-ignore [16] + + def get_category_to_subset_name_list(self) -> Dict[str, List[str]]: + """ + Returns a global dataset index containing the available subset names per category + as a dictionary. + + Returns: + category_to_subset_name_list: A dictionary containing subset names available + per category of the following form:: + + { + category_0: [category_0_subset_name_0, category_0_subset_name_1, ...], + category_1: [category_1_subset_name_0, category_1_subset_name_1, ...], + ... + } + + """ + category_to_subset_name_list_json = "category_to_subset_name_list.json" + category_to_subset_name_list = self._load_annotation_json( + category_to_subset_name_list_json + ) + return category_to_subset_name_list + + def get_all_train_cameras(self) -> Optional[CamerasBase]: + # pyre-ignore[16] + train_dataset = self.dataset_map.train + assert isinstance(train_dataset, JsonIndexDataset) + return train_dataset.get_all_train_cameras() + + def _load_annotation_json(self, json_filename: str): + full_path = os.path.join( + self.dataset_root, + json_filename, + ) + logger.info(f"Loading frame index json from {full_path}.") + path_manager = self.path_manager_factory.get() + if path_manager is not None: + full_path = path_manager.get_local_path(full_path) + if not os.path.isfile(full_path): + # The batch indices file does not exist. + # Most probably the user has not specified the root folder. + raise ValueError( + f"Looking for dataset json file in {full_path}. " + + "Please specify a correct dataset_root folder." + ) + with open(full_path, "r") as f: + data = json.load(f) + return data + + def _get_available_subset_names(self, category: str): + return get_available_subset_names( + self.dataset_root, + category, + path_manager=self.path_manager_factory.get(), + ) + + def _extend_test_data_with_known_views( + self, + subset_mapping: Dict[str, List[Union[Tuple[str, int], Tuple[str, int, str]]]], + eval_batch_index: List[List[Union[Tuple[str, int, str], Tuple[str, int]]]], + ): + # convert the train subset mapping to a dict: + # sequence_to_train_frames: {sequence_name: frame_index} + sequence_to_train_frames = defaultdict(list) + for frame_entry in subset_mapping["train"]: + sequence_name = frame_entry[0] + sequence_to_train_frames[sequence_name].append(frame_entry) + sequence_to_train_frames = dict(sequence_to_train_frames) + test_subset_mapping_set = {tuple(s) for s in subset_mapping["test"]} + + # extend the eval batches / subset mapping with the additional examples + eval_batch_index_out = copy.deepcopy(eval_batch_index) + generator = np.random.default_rng(seed=0) + for batch in eval_batch_index_out: + sequence_name = batch[0][0] + sequence_known_entries = sequence_to_train_frames[sequence_name] + idx_to_add = generator.permutation(len(sequence_known_entries))[ + : self.n_known_frames_for_test + ] + entries_to_add = [sequence_known_entries[a] for a in idx_to_add] + assert all(e in subset_mapping["train"] for e in entries_to_add) + + # extend the eval batch with the known views + batch.extend(entries_to_add) + + # also add these new entries to the test subset mapping + test_subset_mapping_set.update(tuple(e) for e in entries_to_add) + + return eval_batch_index_out, list(test_subset_mapping_set) + + +def get_available_subset_names( + dataset_root: str, + category: str, + path_manager: Optional[PathManager] = None, +) -> List[str]: + """ + Get the available subset names for a given category folder inside a root dataset + folder `dataset_root`. + """ + category_dir = os.path.join(dataset_root, category) + category_dir_exists = ( + (path_manager is not None) and path_manager.isdir(category_dir) + ) or os.path.isdir(category_dir) + if not category_dir_exists: + raise ValueError( + f"Looking for dataset files in {category_dir}. " + + "Please specify a correct dataset_root folder." + ) + + set_list_dir = os.path.join(category_dir, "set_lists") + set_list_jsons = (os.listdir if path_manager is None else path_manager.ls)( + set_list_dir + ) + + return [ + json_file.replace("set_lists_", "").replace(".json", "") + for json_file in set_list_jsons + ] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/llff_dataset_map_provider.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/llff_dataset_map_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..0c8af9abd14c9f216fe77c9d2727ed193423c936 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/llff_dataset_map_provider.py @@ -0,0 +1,69 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import numpy as np +import torch +from pytorch3d.implicitron.tools.config import registry + +from .load_llff import load_llff_data + +from .single_sequence_dataset import ( + _interpret_blender_cameras, + SingleSceneDatasetMapProviderBase, +) + + +@registry.register +class LlffDatasetMapProvider(SingleSceneDatasetMapProviderBase): + """ + Provides data for one scene from the LLFF dataset. + + Members: + base_dir: directory holding the data for the scene. + object_name: The name of the scene (e.g. "fern"). This is just used as a label. + It will typically be equal to the name of the directory self.base_dir. + path_manager_factory: Creates path manager which may be used for + interpreting paths. + n_known_frames_for_test: If set, training frames are included in the val + and test datasets, and this many random training frames are added to + each test batch. If not set, test batches each contain just a single + testing frame. + downscale_factor: determines image sizes. + """ + + downscale_factor: int = 4 + + def _load_data(self) -> None: + path_manager = self.path_manager_factory.get() + images, poses, _ = load_llff_data( + self.base_dir, factor=self.downscale_factor, path_manager=path_manager + ) + hwf = poses[0, :3, -1] + poses = poses[:, :3, :4] + + llffhold = 8 + i_test = np.arange(images.shape[0])[::llffhold] + i_test_index = set(i_test.tolist()) + i_train = np.array( + [i for i in np.arange(images.shape[0]) if i not in i_test_index] + ) + i_split = (i_train, i_test, i_test) + H, W, focal = hwf + focal_ndc = 2 * focal / min(H, W) + images = torch.from_numpy(images).permute(0, 3, 1, 2) + poses = torch.from_numpy(poses) + + # pyre-ignore[16] + self.poses = _interpret_blender_cameras(poses, focal_ndc) + # pyre-ignore[16] + self.images = images + # pyre-ignore[16] + self.fg_probabilities = None + # pyre-ignore[16] + self.i_split = i_split diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/load_blender.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/load_blender.py new file mode 100644 index 0000000000000000000000000000000000000000..84f08312d5079c3079f33f30cf16a7ea469721ee --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/load_blender.py @@ -0,0 +1,143 @@ +# @lint-ignore-every LICENSELINT +# Adapted from https://github.com/bmild/nerf/blob/master/load_blender.py +# Copyright (c) 2020 bmild + +# pyre-unsafe +import json +import os + +import numpy as np +import torch +from PIL import Image + + +def translate_by_t_along_z(t): + tform = np.eye(4).astype(np.float32) + tform[2][3] = t + return tform + + +def rotate_by_phi_along_x(phi): + tform = np.eye(4).astype(np.float32) + tform[1, 1] = tform[2, 2] = np.cos(phi) + tform[1, 2] = -np.sin(phi) + tform[2, 1] = -tform[1, 2] + return tform + + +def rotate_by_theta_along_y(theta): + tform = np.eye(4).astype(np.float32) + tform[0, 0] = tform[2, 2] = np.cos(theta) + tform[0, 2] = -np.sin(theta) + tform[2, 0] = -tform[0, 2] + return tform + + +def pose_spherical(theta, phi, radius): + c2w = translate_by_t_along_z(radius) + c2w = rotate_by_phi_along_x(phi / 180.0 * np.pi) @ c2w + c2w = rotate_by_theta_along_y(theta / 180 * np.pi) @ c2w + c2w = np.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) @ c2w + return c2w + + +def _local_path(path_manager, path): + if path_manager is None: + return path + return path_manager.get_local_path(path) + + +def load_blender_data( + basedir, + half_res=False, + testskip=1, + debug=False, + path_manager=None, + focal_length_in_screen_space=False, +): + splits = ["train", "val", "test"] + metas = {} + for s in splits: + path = os.path.join(basedir, f"transforms_{s}.json") + with open(_local_path(path_manager, path)) as fp: + metas[s] = json.load(fp) + + all_imgs = [] + all_poses = [] + counts = [0] + for s in splits: + meta = metas[s] + imgs = [] + poses = [] + if s == "train" or testskip == 0: + skip = 1 + else: + skip = testskip + + for frame in meta["frames"][::skip]: + fname = os.path.join(basedir, frame["file_path"] + ".png") + imgs.append(np.array(Image.open(_local_path(path_manager, fname)))) + poses.append(np.array(frame["transform_matrix"])) + imgs = (np.array(imgs) / 255.0).astype(np.float32) + poses = np.array(poses).astype(np.float32) + counts.append(counts[-1] + imgs.shape[0]) + all_imgs.append(imgs) + all_poses.append(poses) + + i_split = [np.arange(counts[i], counts[i + 1]) for i in range(3)] + + imgs = np.concatenate(all_imgs, 0) + poses = np.concatenate(all_poses, 0) + + H, W = imgs[0].shape[:2] + camera_angle_x = float(meta["camera_angle_x"]) + if focal_length_in_screen_space: + focal = 0.5 * W / np.tan(0.5 * camera_angle_x) + else: + focal = 1 / np.tan(0.5 * camera_angle_x) + + render_poses = torch.stack( + [ + torch.from_numpy(pose_spherical(angle, -30.0, 4.0)) + for angle in np.linspace(-180, 180, 40 + 1)[:-1] + ], + 0, + ) + + # In debug mode, return extremely tiny images + if debug: + import cv2 + + H = H // 32 + W = W // 32 + if focal_length_in_screen_space: + focal = focal / 32.0 + imgs = [ + torch.from_numpy( + cv2.resize(imgs[i], dsize=(25, 25), interpolation=cv2.INTER_AREA) + ) + for i in range(imgs.shape[0]) + ] + imgs = torch.stack(imgs, 0) + poses = torch.from_numpy(poses) + return imgs, poses, render_poses, [H, W, focal], i_split + + if half_res: + import cv2 + + # TODO: resize images using INTER_AREA (cv2) + H = H // 2 + W = W // 2 + if focal_length_in_screen_space: + focal = focal / 2.0 + imgs = [ + torch.from_numpy( + cv2.resize(imgs[i], dsize=(400, 400), interpolation=cv2.INTER_AREA) + ) + for i in range(imgs.shape[0]) + ] + imgs = torch.stack(imgs, 0) + + poses = torch.from_numpy(poses) + + return imgs, poses, render_poses, [H, W, focal], i_split diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/orm_types.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/orm_types.py new file mode 100644 index 0000000000000000000000000000000000000000..2e916021a9a80d48ae8a9741694ca8f5bce38c56 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/orm_types.py @@ -0,0 +1,189 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# This functionality requires SQLAlchemy 2.0 or later. + +import math +import struct +from typing import Optional, Tuple + +import numpy as np + +from pytorch3d.implicitron.dataset.types import ( + DepthAnnotation, + ImageAnnotation, + MaskAnnotation, + PointCloudAnnotation, + VideoAnnotation, + ViewpointAnnotation, +) + +from sqlalchemy import LargeBinary +from sqlalchemy.orm import ( + composite, + DeclarativeBase, + Mapped, + mapped_column, + MappedAsDataclass, +) +from sqlalchemy.types import TypeDecorator + + +# these produce policies to serialize structured types to blobs +def ArrayTypeFactory(shape=None): + if shape is None: + + class VariableShapeNumpyArrayType(TypeDecorator): + impl = LargeBinary + + def process_bind_param(self, value, dialect): + if value is None: + return None + + ndim_bytes = np.int32(value.ndim).tobytes() + shape_bytes = np.array(value.shape, dtype=np.int64).tobytes() + value_bytes = value.astype(np.float32).tobytes() + return ndim_bytes + shape_bytes + value_bytes + + def process_result_value(self, value, dialect): + if value is None: + return None + + ndim = np.frombuffer(value[:4], dtype=np.int32)[0] + value_start = 4 + 8 * ndim + shape = np.frombuffer(value[4:value_start], dtype=np.int64) + assert shape.shape == (ndim,) + return np.frombuffer(value[value_start:], dtype=np.float32).reshape( + shape + ) + + return VariableShapeNumpyArrayType + + class NumpyArrayType(TypeDecorator): + impl = LargeBinary + + def process_bind_param(self, value, dialect): + if value is not None: + if value.shape != shape: + raise ValueError(f"Passed an array of wrong shape: {value.shape}") + return value.astype(np.float32).tobytes() + return None + + def process_result_value(self, value, dialect): + if value is not None: + return np.frombuffer(value, dtype=np.float32).reshape(shape) + return None + + return NumpyArrayType + + +def TupleTypeFactory(dtype=float, shape: Tuple[int, ...] = (2,)): + format_symbol = { + float: "f", # float32 + int: "i", # int32 + }[dtype] + + class TupleType(TypeDecorator): + impl = LargeBinary + _format = format_symbol * math.prod(shape) + + def process_bind_param(self, value, _): + if value is None: + return None + + if len(shape) > 1: + value = np.array(value, dtype=dtype).reshape(-1) + + return struct.pack(TupleType._format, *value) + + def process_result_value(self, value, _): + if value is None: + return None + + loaded = struct.unpack(TupleType._format, value) + if len(shape) > 1: + loaded = _rec_totuple( + np.array(loaded, dtype=dtype).reshape(shape).tolist() + ) + + return loaded + + return TupleType + + +def _rec_totuple(t): + if isinstance(t, list): + return tuple(_rec_totuple(x) for x in t) + + return t + + +class Base(MappedAsDataclass, DeclarativeBase): + """subclasses will be converted to dataclasses""" + + +class SqlFrameAnnotation(Base): + __tablename__ = "frame_annots" + + sequence_name: Mapped[str] = mapped_column(primary_key=True) + frame_number: Mapped[int] = mapped_column(primary_key=True) + frame_timestamp: Mapped[float] = mapped_column(index=True) + + image: Mapped[ImageAnnotation] = composite( + mapped_column("_image_path"), + mapped_column("_image_size", TupleTypeFactory(int)), + ) + + depth: Mapped[DepthAnnotation] = composite( + mapped_column("_depth_path", nullable=True), + mapped_column("_depth_scale_adjustment", nullable=True), + mapped_column("_depth_mask_path", nullable=True), + ) + + mask: Mapped[MaskAnnotation] = composite( + mapped_column("_mask_path", nullable=True), + mapped_column("_mask_mass", index=True, nullable=True), + mapped_column( + "_mask_bounding_box_xywh", + TupleTypeFactory(float, shape=(4,)), + nullable=True, + ), + ) + + viewpoint: Mapped[ViewpointAnnotation] = composite( + mapped_column( + "_viewpoint_R", TupleTypeFactory(float, shape=(3, 3)), nullable=True + ), + mapped_column( + "_viewpoint_T", TupleTypeFactory(float, shape=(3,)), nullable=True + ), + mapped_column( + "_viewpoint_focal_length", TupleTypeFactory(float), nullable=True + ), + mapped_column( + "_viewpoint_principal_point", TupleTypeFactory(float), nullable=True + ), + mapped_column("_viewpoint_intrinsics_format", nullable=True), + ) + + +class SqlSequenceAnnotation(Base): + __tablename__ = "sequence_annots" + + sequence_name: Mapped[str] = mapped_column(primary_key=True) + category: Mapped[str] = mapped_column(index=True) + + video: Mapped[VideoAnnotation] = composite( + mapped_column("_video_path", nullable=True), + mapped_column("_video_length", nullable=True), + ) + point_cloud: Mapped[PointCloudAnnotation] = composite( + mapped_column("_point_cloud_path", nullable=True), + mapped_column("_point_cloud_quality_score", nullable=True), + mapped_column("_point_cloud_n_points", nullable=True), + ) + # the bigger the better + viewpoint_quality_score: Mapped[Optional[float]] = mapped_column() diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/rendered_mesh_dataset_map_provider.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/rendered_mesh_dataset_map_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..29013ad053d53c5e1fea2b3b98913090cf6788ba --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/rendered_mesh_dataset_map_provider.py @@ -0,0 +1,218 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from os.path import dirname, join, realpath +from typing import Optional, Tuple + +import torch +from pytorch3d.implicitron.tools.config import registry, run_auto_creation +from pytorch3d.io import IO +from pytorch3d.renderer import ( + AmbientLights, + BlendParams, + CamerasBase, + FoVPerspectiveCameras, + HardPhongShader, + look_at_view_transform, + MeshRasterizer, + MeshRendererWithFragments, + PointLights, + RasterizationSettings, +) +from pytorch3d.structures.meshes import Meshes + +from .dataset_map_provider import DatasetMap, DatasetMapProviderBase, PathManagerFactory +from .single_sequence_dataset import SingleSceneDataset +from .utils import DATASET_TYPE_KNOWN + + +@registry.register +class RenderedMeshDatasetMapProvider(DatasetMapProviderBase): + """ + A simple single-scene dataset based on PyTorch3D renders of a mesh. + Provides `num_views` renders of the mesh as train, with no val + and test. The renders are generated from viewpoints sampled at uniformly + distributed azimuth intervals. The elevation is kept constant so that the + camera's vertical position coincides with the equator. + + By default, uses Keenan Crane's cow model, and the camera locations are + set to make sense for that. + + Although the rendering used to generate this dataset will use a GPU + if one is available, the data it produces is on the CPU just like + the data returned by implicitron's other dataset map providers. + This is because both datasets and models can be large, so implicitron's + training loop expects data on the CPU and only moves + what it needs to the device. + + For a more detailed explanation of this code, please refer to the + docs/tutorials/fit_textured_mesh.ipynb notebook. + + Members: + num_views: The number of generated renders. + data_file: The folder that contains the mesh file. By default, finds + the cow mesh in the same repo as this code. + azimuth_range: number of degrees on each side of the start position to + take samples + distance: distance from camera centres to the origin. + resolution: the common height and width of the output images. + use_point_light: whether to use a particular point light as opposed + to ambient white. + gpu_idx: which gpu to use for rendering the mesh. + path_manager_factory: (Optional) An object that generates an instance of + PathManager that can translate provided file paths. + path_manager_factory_class_type: The class type of `path_manager_factory`. + """ + + num_views: int = 40 + data_file: Optional[str] = None + azimuth_range: float = 180 + distance: float = 2.7 + resolution: int = 128 + use_point_light: bool = True + gpu_idx: Optional[int] = 0 + # pyre-fixme[13]: Attribute `path_manager_factory` is never initialized. + path_manager_factory: PathManagerFactory + path_manager_factory_class_type: str = "PathManagerFactory" + + def get_dataset_map(self) -> DatasetMap: + # pyre-ignore[16] + return DatasetMap(train=self.train_dataset, val=None, test=None) + + def get_all_train_cameras(self) -> CamerasBase: + # pyre-ignore[16] + return self.poses + + def __post_init__(self) -> None: + super().__init__() + run_auto_creation(self) + if torch.cuda.is_available() and self.gpu_idx is not None: + device = torch.device(f"cuda:{self.gpu_idx}") + else: + device = torch.device("cpu") + if self.data_file is None: + data_file = join( + dirname(dirname(dirname(dirname(realpath(__file__))))), + "docs", + "tutorials", + "data", + "cow_mesh", + "cow.obj", + ) + else: + data_file = self.data_file + io = IO(path_manager=self.path_manager_factory.get()) + mesh = io.load_mesh(data_file, device=device) + poses, images, masks = _generate_cow_renders( + num_views=self.num_views, + mesh=mesh, + azimuth_range=self.azimuth_range, + distance=self.distance, + resolution=self.resolution, + device=device, + use_point_light=self.use_point_light, + ) + # pyre-ignore[16] + self.poses = poses.cpu() + # pyre-ignore[16] + self.train_dataset = SingleSceneDataset( # pyre-ignore[28] + object_name="cow", + images=list(images.permute(0, 3, 1, 2).cpu()), + fg_probabilities=list(masks[:, None].cpu()), + poses=[self.poses[i] for i in range(len(poses))], + frame_types=[DATASET_TYPE_KNOWN] * len(poses), + eval_batches=None, + ) + + +@torch.no_grad() +def _generate_cow_renders( + *, + num_views: int, + mesh: Meshes, + azimuth_range: float, + distance: float, + resolution: int, + device: torch.device, + use_point_light: bool, +) -> Tuple[CamerasBase, torch.Tensor, torch.Tensor]: + """ + Returns: + cameras: A batch of `num_views` `FoVPerspectiveCameras` from which the + images are rendered. + images: A tensor of shape `(num_views, height, width, 3)` containing + the rendered images. + silhouettes: A tensor of shape `(num_views, height, width)` containing + the rendered silhouettes. + """ + + # Load obj file + + # We scale normalize and center the target mesh to fit in a sphere of radius 1 + # centered at (0,0,0). (scale, center) will be used to bring the predicted mesh + # to its original center and scale. Note that normalizing the target mesh, + # speeds up the optimization but is not necessary! + verts = mesh.verts_packed() + N = verts.shape[0] + center = verts.mean(0) + scale = max((verts - center).abs().max(0)[0]) + mesh.offset_verts_(-(center.expand(N, 3))) + mesh.scale_verts_((1.0 / float(scale))) + + # Get a batch of viewing angles. + elev = torch.linspace(0, 0, num_views) # keep constant + azim = torch.linspace(-azimuth_range, azimuth_range, num_views) + 180.0 + + # Place a point light in front of the object. As mentioned above, the front of + # the cow is facing the -z direction. + if use_point_light: + lights = PointLights(device=device, location=[[0.0, 0.0, -3.0]]) + else: + lights = AmbientLights(device=device) + + # Initialize a perspective camera that represents a batch of different + # viewing angles. All the cameras helper methods support mixed type inputs and + # broadcasting. So we can view the camera from a fixed distance, and + # then specify elevation and azimuth angles for each viewpoint as tensors. + R, T = look_at_view_transform(dist=distance, elev=elev, azim=azim) + cameras = FoVPerspectiveCameras(device=device, R=R, T=T) + + # Define the settings for rasterization and shading. + # As we are rendering images for visualization + # purposes only we will set faces_per_pixel=1 and blur_radius=0.0. Refer to + # rasterize_meshes.py for explanations of these parameters. We also leave + # bin_size and max_faces_per_bin to their default values of None, which sets + # their values using heuristics and ensures that the faster coarse-to-fine + # rasterization method is used. Refer to docs/notes/renderer.md for an + # explanation of the difference between naive and coarse-to-fine rasterization. + raster_settings = RasterizationSettings( + image_size=resolution, blur_radius=0.0, faces_per_pixel=1 + ) + + # Create a Phong renderer by composing a rasterizer and a shader. The textured + # Phong shader will interpolate the texture uv coordinates for each vertex, + # sample from a texture image and apply the Phong lighting model + blend_params = BlendParams(sigma=1e-4, gamma=1e-4, background_color=(0.0, 0.0, 0.0)) + rasterizer_type = MeshRasterizer + renderer = MeshRendererWithFragments( + rasterizer=rasterizer_type(cameras=cameras, raster_settings=raster_settings), + shader=HardPhongShader( + device=device, cameras=cameras, lights=lights, blend_params=blend_params + ), + ) + + # Create a batch of meshes by repeating the cow mesh and associated textures. + # Meshes has a useful `extend` method which allows us do this very easily. + # This also extends the textures. + meshes = mesh.extend(num_views) + + # Render the cow mesh from each viewing angle + target_images, fragments = renderer(meshes, cameras=cameras, lights=lights) + silhouette_binary = (fragments.pix_to_face[..., 0] >= 0).float() + + return cameras, target_images[..., :3], silhouette_binary diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/sql_dataset.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/sql_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..470f5a95bf100595659918ee979d8e350aa71480 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/sql_dataset.py @@ -0,0 +1,768 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +import hashlib +import json +import logging +import os +from dataclasses import dataclass +from typing import ( + Any, + ClassVar, + Dict, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Type, + Union, +) + +import numpy as np +import pandas as pd +import sqlalchemy as sa +import torch +from pytorch3d.implicitron.dataset.dataset_base import DatasetBase + +from pytorch3d.implicitron.dataset.frame_data import ( # noqa + FrameData, + FrameDataBuilder, + FrameDataBuilderBase, +) +from pytorch3d.implicitron.tools.config import ( + registry, + ReplaceableBase, + run_auto_creation, +) +from sqlalchemy.orm import Session + +from .orm_types import SqlFrameAnnotation, SqlSequenceAnnotation + + +logger = logging.getLogger(__name__) + + +_SET_LISTS_TABLE: str = "set_lists" + + +@registry.register +class SqlIndexDataset(DatasetBase, ReplaceableBase): # pyre-ignore + """ + A dataset with annotations stored as SQLite tables. This is an index-based dataset. + The length is returned after all sequence and frame filters are applied (see param + definitions below). Indices can either be ordinal in [0, len), or pairs of + (sequence_name, frame_number); with the performance of `dataset[i]` and + `dataset[sequence_name, frame_number]` being same. A faster way to get metadata only + (without blobs) is `dataset.meta[idx]` indexing; it requires box_crop==False. + With ordinal indexing, the sequences are NOT guaranteed to span contiguous index + ranges, and frame numbers are NOT guaranteed to be increasing within a sequence. + Sequence-aware batch samplers have to use `sequence_[frames|indices]_in_order` + iterators, which are efficient. + + This functionality requires SQLAlchemy 2.0 or later. + + Metadata-related args: + sqlite_metadata_file: A SQLite file containing frame and sequence annotation + tables (mapping to SqlFrameAnnotation and SqlSequenceAnnotation, + respectively). + dataset_root: A root directory to look for images, masks, etc. It can be + alternatively set in `frame_data_builder` args, but this takes precedence. + subset_lists_file: A JSON/sqlite file containing the lists of frames + corresponding to different subsets (e.g. train/val/test) of the dataset; + format: {subset: [(sequence_name, frame_id, file_path)]}. All entries + must be present in frame_annotation metadata table. + path_manager: a facade for non-POSIX filesystems. + subsets: Restrict frames/sequences only to the given list of subsets + as defined in subset_lists_file (see above). Applied before all other + filters. + remove_empty_masks: Removes the frames with no active foreground pixels + in the segmentation mask (needs frame_annotation.mask.mass to be set; + null values are retained). + pick_frames_sql_clause: SQL WHERE clause to constrain frame annotations + NOTE: This is a potential security risk! The string is passed to the SQL + engine verbatim. Don’t expose it to end users of your application! + pick_categories: Restrict the dataset to the given list of categories. + pick_sequences: A Sequence of sequence names to restrict the dataset to. + exclude_sequences: A Sequence of the names of the sequences to exclude. + limit_sequences_per_category_to: Limit the dataset to the first up to N + sequences within each category (applies after all other sequence filters + but before `limit_sequences_to`). + limit_sequences_to: Limit the dataset to the first `limit_sequences_to` + sequences (after other sequence filters have been applied but before + frame-based filters). + limit_to: Limit the dataset to the first #limit_to frames (after other + filters have been applied, except n_frames_per_sequence). + n_frames_per_sequence: If > 0, randomly samples `n_frames_per_sequence` + frames in each sequences uniformly without replacement if it has + more frames than that; applied after other frame-level filters. + seed: The seed of the random generator sampling `n_frames_per_sequence` + random frames per sequence. + """ + + frame_annotations_type: ClassVar[Type[SqlFrameAnnotation]] = SqlFrameAnnotation + + sqlite_metadata_file: str = "" + dataset_root: Optional[str] = None + subset_lists_file: str = "" + eval_batches_file: Optional[str] = None + path_manager: Any = None + subsets: Optional[List[str]] = None + remove_empty_masks: bool = True + pick_frames_sql_clause: Optional[str] = None + pick_categories: Tuple[str, ...] = () + + pick_sequences: Tuple[str, ...] = () + exclude_sequences: Tuple[str, ...] = () + limit_sequences_per_category_to: int = 0 + limit_sequences_to: int = 0 + limit_to: int = 0 + n_frames_per_sequence: int = -1 + seed: int = 0 + remove_empty_masks_poll_whole_table_threshold: int = 300_000 + # we set it manually in the constructor + # _index: pd.DataFrame = field(init=False) + + frame_data_builder: FrameDataBuilderBase + frame_data_builder_class_type: str = "FrameDataBuilder" + + def __post_init__(self) -> None: + if sa.__version__ < "2.0": + raise ImportError("This class requires SQL Alchemy 2.0 or later") + + if not self.sqlite_metadata_file: + raise ValueError("sqlite_metadata_file must be set") + + if self.dataset_root: + frame_builder_type = self.frame_data_builder_class_type + getattr(self, f"frame_data_builder_{frame_builder_type}_args")[ + "dataset_root" + ] = self.dataset_root + + run_auto_creation(self) + self.frame_data_builder.path_manager = self.path_manager + + # pyre-ignore # NOTE: sqlite-specific args (read-only mode). + self._sql_engine = sa.create_engine( + f"sqlite:///file:{self.sqlite_metadata_file}?mode=ro&uri=true" + ) + + sequences = self._get_filtered_sequences_if_any() + + if self.subsets: + index = self._build_index_from_subset_lists(sequences) + else: + # TODO: if self.subset_lists_file and not self.subsets, it might be faster to + # still use the concatenated lists, assuming they cover the whole dataset + index = self._build_index_from_db(sequences) + + if self.n_frames_per_sequence >= 0: + index = self._stratified_sample_index(index) + + if len(index) == 0: + raise ValueError(f"There are no frames in the subsets: {self.subsets}!") + + self._index = index.set_index(["sequence_name", "frame_number"]) # pyre-ignore + + self.eval_batches = None # pyre-ignore + if self.eval_batches_file: + self.eval_batches = self._load_filter_eval_batches() + + logger.info(str(self)) + + def __len__(self) -> int: + # pyre-ignore[16] + return len(self._index) + + def __getitem__(self, frame_idx: Union[int, Tuple[str, int]]) -> FrameData: + """ + Fetches FrameData by either iloc in the index or by (sequence, frame_no) pair + """ + return self._get_item(frame_idx, True) + + @property + def meta(self): + """ + Allows accessing metadata only without loading blobs using `dataset.meta[idx]`. + Requires box_crop==False, since in that case, cameras cannot be adjusted + without loading masks. + + Returns: + FrameData objects with blob fields like `image_rgb` set to None. + + Raises: + ValueError if dataset.box_crop is set. + """ + return SqlIndexDataset._MetadataAccessor(self) + + @dataclass + class _MetadataAccessor: + dataset: "SqlIndexDataset" + + def __getitem__(self, frame_idx: Union[int, Tuple[str, int]]) -> FrameData: + return self.dataset._get_item(frame_idx, False) + + def _get_item( + self, frame_idx: Union[int, Tuple[str, int]], load_blobs: bool = True + ) -> FrameData: + if isinstance(frame_idx, int): + if frame_idx >= len(self._index): + raise IndexError(f"index {frame_idx} out of range {len(self._index)}") + + seq, frame = self._index.index[frame_idx] + else: + seq, frame, *rest = frame_idx + if isinstance(frame, torch.LongTensor): + frame = frame.item() + + if (seq, frame) not in self._index.index: + raise IndexError( + f"Sequence-frame index {frame_idx} not found; was it filtered out?" + ) + + if rest and rest[0] != self._index.loc[(seq, frame), "_image_path"]: + raise IndexError(f"Non-matching image path in {frame_idx}.") + + stmt = sa.select(self.frame_annotations_type).where( + self.frame_annotations_type.sequence_name == seq, + self.frame_annotations_type.frame_number + == int(frame), # cast from np.int64 + ) + seq_stmt = sa.select(SqlSequenceAnnotation).where( + SqlSequenceAnnotation.sequence_name == seq + ) + with Session(self._sql_engine) as session: + entry = session.scalars(stmt).one() + seq_metadata = session.scalars(seq_stmt).one() + + assert entry.image.path == self._index.loc[(seq, frame), "_image_path"] + + frame_data = self.frame_data_builder.build( + entry, seq_metadata, load_blobs=load_blobs + ) + + # The rest of the fields are optional + frame_data.frame_type = self._get_frame_type(entry) + return frame_data + + def __str__(self) -> str: + # pyre-ignore[16] + return f"SqlIndexDataset #frames={len(self._index)}" + + def sequence_names(self) -> Iterable[str]: + """Returns an iterator over sequence names in the dataset.""" + return self._index.index.unique("sequence_name") + + # override + def category_to_sequence_names(self) -> Dict[str, List[str]]: + stmt = sa.select( + SqlSequenceAnnotation.category, SqlSequenceAnnotation.sequence_name + ).where( # we limit results to sequences that have frames after all filters + SqlSequenceAnnotation.sequence_name.in_(self.sequence_names()) + ) + with self._sql_engine.connect() as connection: + cat_to_seqs = pd.read_sql(stmt, connection) + + return cat_to_seqs.groupby("category")["sequence_name"].apply(list).to_dict() + + # override + def get_frame_numbers_and_timestamps( + self, idxs: Sequence[int], subset_filter: Optional[Sequence[str]] = None + ) -> List[Tuple[int, float]]: + """ + Implements the DatasetBase method. + + NOTE: Avoid this function as there are more efficient alternatives such as + querying `dataset[idx]` directly or getting all sequence frames with + `sequence_[frames|indices]_in_order`. + + Return the index and timestamp in their videos of the frames whose + indices are given in `idxs`. They need to belong to the same sequence! + If timestamps are absent, they are replaced with zeros. + This is used for letting SceneBatchSampler identify consecutive + frames. + + Args: + idxs: a sequence int frame index in the dataset (it can be a slice) + subset_filter: must remain None + + Returns: + list of tuples of + - frame index in video + - timestamp of frame in video, coalesced with 0s + + Raises: + ValueError if idxs belong to more than one sequence. + """ + + if subset_filter is not None: + raise NotImplementedError( + "Subset filters are not supported in SQL Dataset. " + "We encourage creating a dataset per subset." + ) + + index_slice, _ = self._get_frame_no_coalesced_ts_by_row_indices(idxs) + # alternatively, we can use `.values.tolist()`, which may be faster + # but returns a list of lists + return list(index_slice.itertuples()) + + # override + def sequence_frames_in_order( + self, seq_name: str, subset_filter: Optional[Sequence[str]] = None + ) -> Iterator[Tuple[float, int, int]]: + """ + Overrides the default DatasetBase implementation (we don’t use `_seq_to_idx`). + Returns an iterator over the frame indices in a given sequence. + We attempt to first sort by timestamp (if they are available), + then by frame number. + + Args: + seq_name: the name of the sequence. + subset_filter: subset names to filter to + + Returns: + an iterator over triplets `(timestamp, frame_no, dataset_idx)`, + where `frame_no` is the index within the sequence, and + `dataset_idx` is the index within the dataset. + `None` timestamps are replaced with 0s. + """ + # TODO: implement sort_timestamp_first? (which would matter if the orders + # of frame numbers and timestamps are different) + rows = self._index.index.get_loc(seq_name) + if isinstance(rows, slice): + assert rows.stop is not None, "Unexpected result from pandas" + rows = range(rows.start or 0, rows.stop, rows.step or 1) + else: + rows = np.where(rows)[0] + + index_slice, idx = self._get_frame_no_coalesced_ts_by_row_indices( + rows, seq_name, subset_filter + ) + index_slice["idx"] = idx + + yield from index_slice.itertuples(index=False) + + # override + def get_eval_batches(self) -> Optional[List[Any]]: + """ + This class does not support eval batches with ordinal indices. You can pass + eval_batches as a batch_sampler to a data_loader since the dataset supports + `dataset[seq_name, frame_no]` indexing. + """ + return self.eval_batches + + # override + def join(self, other_datasets: Iterable[DatasetBase]) -> None: + raise ValueError("Not supported! Preprocess the data by merging them instead.") + + # override + @property + def frame_data_type(self) -> Type[FrameData]: + return self.frame_data_builder.frame_data_type + + def is_filtered(self) -> bool: + """ + Returns `True` in case the dataset has been filtered and thus some frame + annotations stored on the disk might be missing in the dataset object. + Does not account for subsets. + + Returns: + is_filtered: `True` if the dataset has been filtered, else `False`. + """ + return ( + self.remove_empty_masks + or self.limit_to > 0 + or self.limit_sequences_to > 0 + or self.limit_sequences_per_category_to > 0 + or len(self.pick_sequences) > 0 + or len(self.exclude_sequences) > 0 + or len(self.pick_categories) > 0 + or self.n_frames_per_sequence > 0 + ) + + def _get_filtered_sequences_if_any(self) -> Optional[pd.Series]: + # maximum possible filter (if limit_sequences_per_category_to == 0): + # WHERE category IN 'self.pick_categories' + # AND sequence_name IN 'self.pick_sequences' + # AND sequence_name NOT IN 'self.exclude_sequences' + # LIMIT 'self.limit_sequence_to' + + where_conditions = [ + *self._get_category_filters(), + *self._get_pick_filters(), + *self._get_exclude_filters(), + ] + + def add_where(stmt): + return stmt.where(*where_conditions) if where_conditions else stmt + + if self.limit_sequences_per_category_to <= 0: + stmt = add_where(sa.select(SqlSequenceAnnotation.sequence_name)) + else: + subquery = sa.select( + SqlSequenceAnnotation.sequence_name, + sa.func.row_number() + .over( + order_by=sa.text("ROWID"), # NOTE: ROWID is SQLite-specific + partition_by=SqlSequenceAnnotation.category, + ) + .label("row_number"), + ) + + subquery = add_where(subquery).subquery() + stmt = sa.select(subquery.c.sequence_name).where( + subquery.c.row_number <= self.limit_sequences_per_category_to + ) + + if self.limit_sequences_to > 0: + logger.info( + f"Limiting dataset to first {self.limit_sequences_to} sequences" + ) + # NOTE: ROWID is SQLite-specific + stmt = stmt.order_by(sa.text("ROWID")).limit(self.limit_sequences_to) + + if ( + not where_conditions + and self.limit_sequences_to <= 0 + and self.limit_sequences_per_category_to <= 0 + ): + # we will not need to filter by sequences + return None + + with self._sql_engine.connect() as connection: + sequences = pd.read_sql_query(stmt, connection)["sequence_name"] + logger.info("... retained %d sequences" % len(sequences)) + + return sequences + + def _get_category_filters(self) -> List[sa.ColumnElement]: + if not self.pick_categories: + return [] + + logger.info(f"Limiting dataset to categories: {self.pick_categories}") + return [SqlSequenceAnnotation.category.in_(self.pick_categories)] + + def _get_pick_filters(self) -> List[sa.ColumnElement]: + if not self.pick_sequences: + return [] + + logger.info(f"Limiting dataset to sequences: {self.pick_sequences}") + return [SqlSequenceAnnotation.sequence_name.in_(self.pick_sequences)] + + def _get_exclude_filters(self) -> List[sa.ColumnOperators]: + if not self.exclude_sequences: + return [] + + logger.info(f"Removing sequences from the dataset: {self.exclude_sequences}") + return [SqlSequenceAnnotation.sequence_name.notin_(self.exclude_sequences)] + + def _load_subsets_from_json(self, subset_lists_path: str) -> pd.DataFrame: + assert self.subsets is not None + with open(subset_lists_path, "r") as f: + subset_to_seq_frame = json.load(f) + + seq_frame_list = sum( + ( + [(*row, subset) for row in subset_to_seq_frame[subset]] + for subset in self.subsets + ), + [], + ) + index = pd.DataFrame( + seq_frame_list, + columns=["sequence_name", "frame_number", "_image_path", "subset"], + ) + return index + + def _load_subsets_from_sql(self, subset_lists_path: str) -> pd.DataFrame: + subsets = self.subsets + assert subsets is not None + # we need a new engine since we store the subsets in a separate DB + engine = sa.create_engine(f"sqlite:///{subset_lists_path}") + table = sa.Table(_SET_LISTS_TABLE, sa.MetaData(), autoload_with=engine) + stmt = sa.select(table).where(table.c.subset.in_(subsets)) + with engine.connect() as connection: + index = pd.read_sql(stmt, connection) + + return index + + def _build_index_from_subset_lists( + self, sequences: Optional[pd.Series] + ) -> pd.DataFrame: + if not self.subset_lists_file: + raise ValueError("Requested subsets but subset_lists_file not given") + + logger.info(f"Loading subset lists from {self.subset_lists_file}.") + + subset_lists_path = self._local_path(self.subset_lists_file) + if subset_lists_path.lower().endswith(".json"): + index = self._load_subsets_from_json(subset_lists_path) + else: + index = self._load_subsets_from_sql(subset_lists_path) + index = index.set_index(["sequence_name", "frame_number"]) + logger.info(f" -> loaded {len(index)} samples of {self.subsets}.") + + if sequences is not None: + logger.info("Applying filtered sequences.") + sequence_values = index.index.get_level_values("sequence_name") + index = index.loc[sequence_values.isin(sequences)] + logger.info(f" -> retained {len(index)} samples.") + + pick_frames_criteria = [] + if self.remove_empty_masks: + logger.info("Culling samples with empty masks.") + + if len(index) > self.remove_empty_masks_poll_whole_table_threshold: + # APPROACH 1: find empty masks and drop indices. + # dev load: 17s / 15 s (3.1M / 500K) + stmt = sa.select( + self.frame_annotations_type.sequence_name, + self.frame_annotations_type.frame_number, + ).where(self.frame_annotations_type._mask_mass == 0) + with Session(self._sql_engine) as session: + to_remove = session.execute(stmt).all() + + # Pandas uses np.int64 for integer types, so we have to case + # we might want to read it to pandas DataFrame directly to avoid the loop + to_remove = [(seq, np.int64(fr)) for seq, fr in to_remove] + index.drop(to_remove, errors="ignore", inplace=True) + else: + # APPROACH 3: load index into a temp table and join with annotations + # dev load: 94 s / 23 s (3.1M / 500K) + pick_frames_criteria.append( + sa.or_( + self.frame_annotations_type._mask_mass.is_(None), + self.frame_annotations_type._mask_mass != 0, + ) + ) + + if self.pick_frames_sql_clause: + logger.info("Applying the custom SQL clause.") + pick_frames_criteria.append(sa.text(self.pick_frames_sql_clause)) + + if pick_frames_criteria: + index = self._pick_frames_by_criteria(index, pick_frames_criteria) + + logger.info(f" -> retained {len(index)} samples.") + + if self.limit_to > 0: + logger.info(f"Limiting dataset to first {self.limit_to} frames") + index = index.sort_index().iloc[: self.limit_to] + + return index.reset_index() + + def _pick_frames_by_criteria(self, index: pd.DataFrame, criteria) -> pd.DataFrame: + IndexTable = self._get_temp_index_table_instance() + with self._sql_engine.connect() as connection: + IndexTable.create(connection) + # we don’t let pandas’s `to_sql` create the table automatically as + # the table would be permanent, so we create it and append with pandas + n_rows = index.to_sql(IndexTable.name, connection, if_exists="append") + assert n_rows == len(index) + sa_type = self.frame_annotations_type + stmt = ( + sa.select(IndexTable) + .select_from( + IndexTable.join( + self.frame_annotations_type, + sa.and_( + sa_type.sequence_name == IndexTable.c.sequence_name, + sa_type.frame_number == IndexTable.c.frame_number, + ), + ) + ) + .where(*criteria) + ) + return pd.read_sql_query(stmt, connection).set_index( + ["sequence_name", "frame_number"] + ) + + def _build_index_from_db(self, sequences: Optional[pd.Series]): + logger.info("Loading sequcence-frame index from the database") + stmt = sa.select( + self.frame_annotations_type.sequence_name, + self.frame_annotations_type.frame_number, + self.frame_annotations_type._image_path, + sa.null().label("subset"), + ) + where_conditions = [] + if sequences is not None: + logger.info(" applying filtered sequences") + where_conditions.append( + self.frame_annotations_type.sequence_name.in_(sequences.tolist()) + ) + + if self.remove_empty_masks: + logger.info(" excluding samples with empty masks") + where_conditions.append( + sa.or_( + self.frame_annotations_type._mask_mass.is_(None), + self.frame_annotations_type._mask_mass != 0, + ) + ) + + if self.pick_frames_sql_clause: + logger.info(" applying custom SQL clause") + where_conditions.append(sa.text(self.pick_frames_sql_clause)) + + if where_conditions: + stmt = stmt.where(*where_conditions) + + if self.limit_to > 0: + logger.info(f"Limiting dataset to first {self.limit_to} frames") + stmt = stmt.order_by( + self.frame_annotations_type.sequence_name, + self.frame_annotations_type.frame_number, + ).limit(self.limit_to) + + with self._sql_engine.connect() as connection: + index = pd.read_sql_query(stmt, connection) + + logger.info(f" -> loaded {len(index)} samples.") + return index + + def _sort_index_(self, index): + logger.info("Sorting the index by sequence and frame number.") + index.sort_values(["sequence_name", "frame_number"], inplace=True) + logger.info(" -> Done.") + + def _load_filter_eval_batches(self): + assert self.eval_batches_file + logger.info(f"Loading eval batches from {self.eval_batches_file}") + + if not os.path.isfile(self.eval_batches_file): + # The batch indices file does not exist. + # Most probably the user has not specified the root folder. + raise ValueError( + f"Looking for dataset json file in {self.eval_batches_file}. " + + "Please specify a correct dataset_root folder." + ) + + with open(self.eval_batches_file, "r") as f: + eval_batches = json.load(f) + + # limit the dataset to sequences to allow multiple evaluations in one file + pick_sequences = set(self.pick_sequences) + if self.pick_categories: + cat_to_seq = self.category_to_sequence_names() + pick_sequences.update( + seq for cat in self.pick_categories for seq in cat_to_seq[cat] + ) + + if pick_sequences: + old_len = len(eval_batches) + eval_batches = [b for b in eval_batches if b[0][0] in pick_sequences] + logger.warn( + f"Picked eval batches by sequence/cat: {old_len} -> {len(eval_batches)}" + ) + + if self.exclude_sequences: + old_len = len(eval_batches) + exclude_sequences = set(self.exclude_sequences) + eval_batches = [b for b in eval_batches if b[0][0] not in exclude_sequences] + logger.warn( + f"Excluded eval batches by sequence: {old_len} -> {len(eval_batches)}" + ) + + return eval_batches + + def _stratified_sample_index(self, index): + # NOTE this stratified sampling can be done more efficiently in + # the no-subset case above if it is added to the SQL query. + # We keep this generic implementation since no-subset case is uncommon + index = index.groupby("sequence_name", group_keys=False).apply( + lambda seq_frames: seq_frames.sample( + min(len(seq_frames), self.n_frames_per_sequence), + random_state=( + _seq_name_to_seed(seq_frames.iloc[0]["sequence_name"]) + self.seed + ), + ) + ) + logger.info(f" -> retained {len(index)} samples aster stratified sampling.") + return index + + def _get_frame_type(self, entry: SqlFrameAnnotation) -> Optional[str]: + return self._index.loc[(entry.sequence_name, entry.frame_number), "subset"] + + def _get_frame_no_coalesced_ts_by_row_indices( + self, + idxs: Sequence[int], + seq_name: Optional[str] = None, + subset_filter: Union[Sequence[str], str, None] = None, + ) -> Tuple[pd.DataFrame, Sequence[int]]: + """ + Loads timestamps for given index rows belonging to the same sequence. + If seq_name is known, it speeds up the computation. + Raises ValueError if `idxs` do not all belong to a single sequences . + """ + index_slice = self._index.iloc[idxs] + if subset_filter is not None: + if isinstance(subset_filter, str): + subset_filter = [subset_filter] + indicator = index_slice["subset"].isin(subset_filter) + index_slice = index_slice.loc[indicator] + idxs = [i for i, isin in zip(idxs, indicator) if isin] + + frames = index_slice.index.get_level_values("frame_number").tolist() + if seq_name is None: + seq_name_list = index_slice.index.get_level_values("sequence_name").tolist() + seq_name_set = set(seq_name_list) + if len(seq_name_set) > 1: + raise ValueError("Given indices belong to more than one sequence.") + elif len(seq_name_set) == 1: + seq_name = seq_name_list[0] + + coalesced_ts = sa.sql.functions.coalesce( + self.frame_annotations_type.frame_timestamp, 0 + ) + stmt = sa.select( + coalesced_ts.label("frame_timestamp"), + self.frame_annotations_type.frame_number, + ).where( + self.frame_annotations_type.sequence_name == seq_name, + self.frame_annotations_type.frame_number.in_(frames), + ) + + with self._sql_engine.connect() as connection: + frame_no_ts = pd.read_sql_query(stmt, connection) + + if len(frame_no_ts) != len(index_slice): + raise ValueError( + "Not all indices are found in the database; " + "do they belong to more than one sequence?" + ) + + return frame_no_ts, idxs + + def _local_path(self, path: str) -> str: + if self.path_manager is None: + return path + return self.path_manager.get_local_path(path) + + def _get_temp_index_table_instance(self, table_name: str = "__index"): + CachedTable = self.frame_annotations_type.metadata.tables.get(table_name) + if CachedTable is not None: # table definition is not idempotent + return CachedTable + + return sa.Table( + table_name, + self.frame_annotations_type.metadata, + sa.Column("sequence_name", sa.String, primary_key=True), + sa.Column("frame_number", sa.Integer, primary_key=True), + sa.Column("_image_path", sa.String), + sa.Column("subset", sa.String), + prefixes=["TEMP"], # NOTE SQLite specific! + ) + + +def _seq_name_to_seed(seq_name) -> int: + """Generates numbers in [0, 2 ** 28)""" + return int(hashlib.sha1(seq_name.encode("utf-8")).hexdigest()[:7], 16) + + +def _safe_as_tensor(data, dtype): + return torch.tensor(data, dtype=dtype) if data is not None else None diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/sql_dataset_provider.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/sql_dataset_provider.py new file mode 100644 index 0000000000000000000000000000000000000000..ab161e8d73b3c1e7f0f00195fd6fdf94bf9b7af1 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/sql_dataset_provider.py @@ -0,0 +1,424 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + + +import logging +import os +from typing import List, Optional, Tuple, Type + +import numpy as np + +from omegaconf import DictConfig, OmegaConf + +from pytorch3d.implicitron.dataset.dataset_map_provider import ( + DatasetMap, + DatasetMapProviderBase, + PathManagerFactory, +) +from pytorch3d.implicitron.tools.config import ( + expand_args_fields, + registry, + run_auto_creation, +) + +from .sql_dataset import SqlIndexDataset + + +_CO3D_SQL_DATASET_ROOT: str = os.getenv("CO3D_SQL_DATASET_ROOT", "") + +# _NEED_CONTROL is a list of those elements of SqlIndexDataset which +# are not directly specified for it in the config but come from the +# DatasetMapProvider. +_NEED_CONTROL: Tuple[str, ...] = ( + "path_manager", + "subsets", + "sqlite_metadata_file", + "subset_lists_file", +) + +logger = logging.getLogger(__name__) + + +@registry.register +class SqlIndexDatasetMapProvider(DatasetMapProviderBase): # pyre-ignore [13] + """ + Generates the training, validation, and testing dataset objects for + a dataset laid out on disk like SQL-CO3D, with annotations in an SQLite data base. + + The dataset is organized in the filesystem as follows:: + + self.dataset_root + ├── + │ ├── + │ │ ├── depth_masks + │ │ ├── depths + │ │ ├── images + │ │ ├── masks + │ │ └── pointcloud.ply + │ ├── + │ │ ├── depth_masks + │ │ ├── depths + │ │ ├── images + │ │ ├── masks + │ │ └── pointcloud.ply + │ ├── ... + │ ├── + │ ├── set_lists + │ ├── .json + │ ├── .json + │ ├── ... + │ ├── .json + │ ├── eval_batches + │ │ ├── .json + │ │ ├── .json + │ │ ├── ... + │ │ ├── .json + │ ├── frame_annotations.jgz + │ ├── sequence_annotations.jgz + ├── + ├── ... + ├── + ├── set_lists + ├── .sqlite + ├── .sqlite + ├── ... + ├── .sqlite + ├── eval_batches + │ ├── .json + │ ├── .json + │ ├── ... + │ ├── .json + + The dataset contains sequences named `` that may be partitioned by + directories such as `` e.g. representing categories but they + can also be stored in a flat structure. Each sequence folder contains the list of + sequence images, depth maps, foreground masks, and valid-depth masks + `images`, `depths`, `masks`, and `depth_masks` respectively. Furthermore, + `set_lists/` dirtectories (with partitions or global) store json or sqlite files + `.`, each describing a certain sequence subset. + These subset path conventions are not hard-coded and arbitrary relative path can be + specified by setting `self.subset_lists_path` to the relative path w.r.t. + dataset root. + + Each `.json` file contains the following dictionary:: + + { + "train": [ + (sequence_name: str, frame_number: int, image_path: str), + ... + ], + "val": [ + (sequence_name: str, frame_number: int, image_path: str), + ... + ], + "test": [ + (sequence_name: str, frame_number: int, image_path: str), + ... + ], + ] + + defining the list of frames (identified with their `sequence_name` and + `frame_number`) in the "train", "val", and "test" subsets of the dataset. In case of + SQLite format, `.sqlite` contains a table with the header:: + + | sequence_name | frame_number | image_path | subset | + + Note that `frame_number` can be obtained only from the metadata and + does not necesarrily correspond to the numeric suffix of the corresponding image + file name (e.g. a file `//images/frame00005.jpg` can + have its frame number set to `20`, not 5). + + Each `.json` file contains a list of evaluation examples + in the following form:: + + [ + [ # batch 1 + (sequence_name: str, frame_number: int, image_path: str), + ... + ], + [ # batch 2 + (sequence_name: str, frame_number: int, image_path: str), + ... + ], + ] + + Note that the evaluation examples always come from the `"test"` subset of the dataset. + (test frames can repeat across batches). The batches can contain single element, + which is typical in case of regular radiance field fitting. + + Args: + subset_lists_path: The relative path to the dataset subset definition. + For CO3D, these include e.g. "skateboard/set_lists/set_lists_manyview_dev_0.json". + By default (None), dataset is not partitioned to subsets (in that case, setting + `ignore_subsets` will speed up construction) + dataset_root: The root folder of the dataset. + metadata_basename: name of the SQL metadata file in dataset_root; + not expected to be changed by users + test_on_train: Construct validation and test datasets from + the training subset; note that in practice, in this + case all subset dataset objects will be same + only_test_set: Load only the test set. Incompatible with `test_on_train`. + ignore_subsets: Don’t filter by subsets in the dataset; note that in this + case all subset datasets will be same + eval_batch_num_training_frames: Add a certain number of training frames to each + eval batch. Useful for evaluating models that require + source views as input (e.g. NeRF-WCE / PixelNeRF). + dataset_args: Specifies additional arguments to the + JsonIndexDataset constructor call. + path_manager_factory: (Optional) An object that generates an instance of + PathManager that can translate provided file paths. + path_manager_factory_class_type: The class type of `path_manager_factory`. + """ + + category: Optional[str] = None + subset_list_name: Optional[str] = None # TODO: docs + # OR + subset_lists_path: Optional[str] = None + eval_batches_path: Optional[str] = None + + dataset_root: str = _CO3D_SQL_DATASET_ROOT + metadata_basename: str = "metadata.sqlite" + + test_on_train: bool = False + only_test_set: bool = False + ignore_subsets: bool = False + train_subsets: Tuple[str, ...] = ("train",) + val_subsets: Tuple[str, ...] = ("val",) + test_subsets: Tuple[str, ...] = ("test",) + + eval_batch_num_training_frames: int = 0 + + # this is a mould that is never constructed, used to build self._dataset_map values + dataset_class_type: str = "SqlIndexDataset" + dataset: SqlIndexDataset + + path_manager_factory: PathManagerFactory + path_manager_factory_class_type: str = "PathManagerFactory" + + def __post_init__(self): + super().__init__() + run_auto_creation(self) + + if self.only_test_set and self.test_on_train: + raise ValueError("Cannot have only_test_set and test_on_train") + + if self.ignore_subsets and not self.only_test_set: + self.test_on_train = True # no point in loading same data 3 times + + path_manager = self.path_manager_factory.get() + + sqlite_metadata_file = os.path.join(self.dataset_root, self.metadata_basename) + sqlite_metadata_file = _local_path(path_manager, sqlite_metadata_file) + + if not os.path.isfile(sqlite_metadata_file): + # The sqlite_metadata_file does not exist. + # Most probably the user has not specified the root folder. + raise ValueError( + f"Looking for frame annotations in {sqlite_metadata_file}." + + " Please specify a correct dataset_root folder." + + " Note: By default the root folder is taken from the" + + " CO3D_SQL_DATASET_ROOT environment variable." + ) + + if self.subset_lists_path and self.subset_list_name: + raise ValueError( + "subset_lists_path and subset_list_name cannot be both set" + ) + + subset_lists_file = self._get_lists_file("set_lists") + + # setup the common dataset arguments + common_dataset_kwargs = { + **getattr(self, f"dataset_{self.dataset_class_type}_args"), + "sqlite_metadata_file": sqlite_metadata_file, + "dataset_root": self.dataset_root, + "subset_lists_file": subset_lists_file, + "path_manager": path_manager, + } + + if self.category: + logger.info(f"Forcing category filter in the datasets to {self.category}") + common_dataset_kwargs["pick_categories"] = self.category.split(",") + + # get the used dataset type + dataset_type: Type[SqlIndexDataset] = registry.get( + SqlIndexDataset, self.dataset_class_type + ) + expand_args_fields(dataset_type) + + if subset_lists_file is not None and not os.path.isfile(subset_lists_file): + available_subsets = self._get_available_subsets( + OmegaConf.to_object(common_dataset_kwargs["pick_categories"]) + ) + msg = f"Cannot find subset list file {self.subset_lists_path}." + if available_subsets: + msg += f" Some of the available subsets: {str(available_subsets)}." + raise ValueError(msg) + + train_dataset = None + val_dataset = None + if not self.only_test_set: + # load the training set + logger.debug("Constructing train dataset.") + train_dataset = dataset_type( + **common_dataset_kwargs, subsets=self._get_subsets(self.train_subsets) + ) + logger.info(f"Train dataset: {str(train_dataset)}") + + if self.test_on_train: + assert train_dataset is not None + val_dataset = test_dataset = train_dataset + else: + # load the val and test sets + if not self.only_test_set: + # NOTE: this is always loaded in JsonProviderV2 + logger.debug("Extracting val dataset.") + val_dataset = dataset_type( + **common_dataset_kwargs, subsets=self._get_subsets(self.val_subsets) + ) + logger.info(f"Val dataset: {str(val_dataset)}") + + logger.debug("Extracting test dataset.") + eval_batches_file = self._get_lists_file("eval_batches") + del common_dataset_kwargs["eval_batches_file"] + test_dataset = dataset_type( + **common_dataset_kwargs, + subsets=self._get_subsets(self.test_subsets, True), + eval_batches_file=eval_batches_file, + ) + logger.info(f"Test dataset: {str(test_dataset)}") + + if ( + eval_batches_file is not None + and self.eval_batch_num_training_frames > 0 + ): + self._extend_eval_batches(test_dataset) + + self._dataset_map = DatasetMap( + train=train_dataset, val=val_dataset, test=test_dataset + ) + + def _get_subsets(self, subsets, is_eval: bool = False): + if self.ignore_subsets: + return None + + if is_eval and self.eval_batch_num_training_frames > 0: + # we will need to have training frames for extended batches + return list(subsets) + list(self.train_subsets) + + return subsets + + def _extend_eval_batches(self, test_dataset: SqlIndexDataset) -> None: + rng = np.random.default_rng(seed=0) + eval_batches = test_dataset.get_eval_batches() + if eval_batches is None: + raise ValueError("Eval batches were not loaded!") + + for batch in eval_batches: + sequence = batch[0][0] + seq_frames = list( + test_dataset.sequence_frames_in_order(sequence, self.train_subsets) + ) + idx_to_add = rng.permutation(len(seq_frames))[ + : self.eval_batch_num_training_frames + ] + batch.extend((sequence, seq_frames[a][1]) for a in idx_to_add) + + @classmethod + def dataset_tweak_args(cls, type, args: DictConfig) -> None: + """ + Called by get_default_args. + Certain fields are not exposed on each dataset class + but rather are controlled by this provider class. + """ + for key in _NEED_CONTROL: + del args[key] + + def create_dataset(self): + # No `dataset` member of this class is created. + # The dataset(s) live in `self.get_dataset_map`. + pass + + def get_dataset_map(self) -> DatasetMap: + return self._dataset_map # pyre-ignore [16] + + def _get_available_subsets(self, categories: List[str]): + """ + Get the available subset names for a given category folder (if given) inside + a root dataset folder `dataset_root`. + """ + path_manager = self.path_manager_factory.get() + + subsets: List[str] = [] + for prefix in [""] + categories: + set_list_dir = os.path.join(self.dataset_root, prefix, "set_lists") + if not ( + (path_manager is not None) and path_manager.isdir(set_list_dir) + ) and not os.path.isdir(set_list_dir): + continue + + set_list_files = (os.listdir if path_manager is None else path_manager.ls)( + set_list_dir + ) + subsets.extend(os.path.join(prefix, "set_lists", f) for f in set_list_files) + + return subsets + + def _get_lists_file(self, flavor: str) -> Optional[str]: + if flavor == "eval_batches": + subset_lists_path = self.eval_batches_path + else: + subset_lists_path = self.subset_lists_path + + if not subset_lists_path and not self.subset_list_name: + return None + + category_elem = "" + if self.category and "," not in self.category: + # if multiple categories are given, looking for global set lists + category_elem = self.category + + subset_lists_path = subset_lists_path or ( + os.path.join( + category_elem, f"{flavor}", f"{flavor}_{self.subset_list_name}" + ) + ) + + assert subset_lists_path + path_manager = self.path_manager_factory.get() + # try absolute path first + subset_lists_file = _get_local_path_check_extensions( + subset_lists_path, path_manager + ) + if subset_lists_file: + return subset_lists_file + + full_path = os.path.join(self.dataset_root, subset_lists_path) + subset_lists_file = _get_local_path_check_extensions(full_path, path_manager) + + if not subset_lists_file: + raise FileNotFoundError( + f"Subset lists path given but not found: {full_path}" + ) + + return subset_lists_file + + +def _get_local_path_check_extensions( + path, path_manager, extensions=("", ".sqlite", ".json") +) -> Optional[str]: + for ext in extensions: + local = _local_path(path_manager, path + ext) + if os.path.isfile(local): + return local + + return None + + +def _local_path(path_manager, path: str) -> str: + if path_manager is None: + return path + return path_manager.get_local_path(path) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/types.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/types.py new file mode 100644 index 0000000000000000000000000000000000000000..faf8c790a7d2e46eb7a585b745b711f830ea172c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/types.py @@ -0,0 +1,357 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import dataclasses +import gzip +import json +from dataclasses import dataclass, Field, MISSING +from typing import ( + Any, + cast, + Dict, + get_args, + get_origin, + IO, + Optional, + Tuple, + Type, + TypeVar, + Union, +) + +import numpy as np + + +_X = TypeVar("_X") + +TF3 = Tuple[float, float, float] + + +@dataclass +class ImageAnnotation: + # path to jpg file, relative w.r.t. dataset_root + path: str + # H x W + size: Tuple[int, int] # TODO: rename size_hw? + + +@dataclass +class DepthAnnotation: + # path to png file, relative w.r.t. dataset_root, storing `depth / scale_adjustment` + path: str + # a factor to convert png values to actual depth: `depth = png * scale_adjustment` + scale_adjustment: float + # path to png file, relative w.r.t. dataset_root, storing binary `depth` mask + mask_path: Optional[str] + + +@dataclass +class MaskAnnotation: + # path to png file storing (Prob(fg | pixel) * 255) + path: str + # (soft) number of pixels in the mask; sum(Prob(fg | pixel)) + mass: Optional[float] = None + # tight bounding box around the foreground mask + bounding_box_xywh: Optional[Tuple[float, float, float, float]] = None + + +@dataclass +class ViewpointAnnotation: + # In right-multiply (PyTorch3D) format. X_cam = X_world @ R + T + R: Tuple[TF3, TF3, TF3] + T: TF3 + + focal_length: Tuple[float, float] + principal_point: Tuple[float, float] + + intrinsics_format: str = "ndc_norm_image_bounds" + # Defines the co-ordinate system where focal_length and principal_point live. + # Possible values: ndc_isotropic | ndc_norm_image_bounds (default) + # ndc_norm_image_bounds: legacy PyTorch3D NDC format, where image boundaries + # correspond to [-1, 1] x [-1, 1], and the scale along x and y may differ + # ndc_isotropic: PyTorch3D 0.5+ NDC convention where the shorter side has + # the range [-1, 1], and the longer one has the range [-s, s]; s >= 1, + # where s is the aspect ratio. The scale is same along x and y. + + +@dataclass +class FrameAnnotation: + """A dataclass used to load annotations from json.""" + + # can be used to join with `SequenceAnnotation` + sequence_name: str + # 0-based, continuous frame number within sequence + frame_number: int + # timestamp in seconds from the video start + frame_timestamp: float + + image: ImageAnnotation + depth: Optional[DepthAnnotation] = None + mask: Optional[MaskAnnotation] = None + viewpoint: Optional[ViewpointAnnotation] = None + meta: Optional[Dict[str, Any]] = None + + +@dataclass +class PointCloudAnnotation: + # path to ply file with points only, relative w.r.t. dataset_root + path: str + # the bigger the better + quality_score: float + n_points: Optional[int] + + +@dataclass +class VideoAnnotation: + # path to the original video file, relative w.r.t. dataset_root + path: str + # length of the video in seconds + length: float + + +@dataclass +class SequenceAnnotation: + sequence_name: str + category: str + video: Optional[VideoAnnotation] = None + point_cloud: Optional[PointCloudAnnotation] = None + # the bigger the better + viewpoint_quality_score: Optional[float] = None + + +def dump_dataclass(obj: Any, f: IO, binary: bool = False) -> None: + """ + Args: + f: Either a path to a file, or a file opened for writing. + obj: A @dataclass or collection hierarchy including dataclasses. + binary: Set to True if `f` is a file handle, else False. + """ + if binary: + f.write(json.dumps(_asdict_rec(obj)).encode("utf8")) + else: + json.dump(_asdict_rec(obj), f) + + +def load_dataclass(f: IO, cls: Type[_X], binary: bool = False) -> _X: + """ + Loads to a @dataclass or collection hierarchy including dataclasses + from a json recursively. + Call it like load_dataclass(f, typing.List[FrameAnnotationAnnotation]). + raises KeyError if json has keys not mapping to the dataclass fields. + + Args: + f: Either a path to a file, or a file opened for writing. + cls: The class of the loaded dataclass. + binary: Set to True if `f` is a file handle, else False. + """ + if binary: + asdict = json.loads(f.read().decode("utf8")) + else: + asdict = json.load(f) + + if isinstance(asdict, list): + # in the list case, run a faster "vectorized" version + cls = get_args(cls)[0] + res = list(_dataclass_list_from_dict_list(asdict, cls)) + else: + res = _dataclass_from_dict(asdict, cls) + + return res + + +def _dataclass_list_from_dict_list(dlist, typeannot): + """ + Vectorised version of `_dataclass_from_dict`. + The output should be equivalent to + `[_dataclass_from_dict(d, typeannot) for d in dlist]`. + + Args: + dlist: list of objects to convert. + typeannot: type of each of those objects. + Returns: + iterator or list over converted objects of the same length as `dlist`. + + Raises: + ValueError: it assumes the objects have None's in consistent places across + objects, otherwise it would ignore some values. This generally holds for + auto-generated annotations, but otherwise use `_dataclass_from_dict`. + """ + + cls = get_origin(typeannot) or typeannot + + if typeannot is Any: + return dlist + if all(obj is None for obj in dlist): # 1st recursion base: all None nodes + return dlist + if any(obj is None for obj in dlist): + # filter out Nones and recurse on the resulting list + idx_notnone = [(i, obj) for i, obj in enumerate(dlist) if obj is not None] + idx, notnone = zip(*idx_notnone) + converted = _dataclass_list_from_dict_list(notnone, typeannot) + res = [None] * len(dlist) + for i, obj in zip(idx, converted): + res[i] = obj + return res + + is_optional, contained_type = _resolve_optional(typeannot) + if is_optional: + return _dataclass_list_from_dict_list(dlist, contained_type) + + # otherwise, we dispatch by the type of the provided annotation to convert to + if issubclass(cls, tuple) and hasattr(cls, "_fields"): # namedtuple + # For namedtuple, call the function recursively on the lists of corresponding keys + types = cls.__annotations__.values() + dlist_T = zip(*dlist) + res_T = [ + _dataclass_list_from_dict_list(key_list, tp) + for key_list, tp in zip(dlist_T, types) + ] + return [cls(*converted_as_tuple) for converted_as_tuple in zip(*res_T)] + elif issubclass(cls, (list, tuple)): + # For list/tuple, call the function recursively on the lists of corresponding positions + types = get_args(typeannot) + if len(types) == 1: # probably List; replicate for all items + types = types * len(dlist[0]) + dlist_T = zip(*dlist) + res_T = ( + _dataclass_list_from_dict_list(pos_list, tp) + for pos_list, tp in zip(dlist_T, types) + ) + if issubclass(cls, tuple): + return list(zip(*res_T)) + else: + return [cls(converted_as_tuple) for converted_as_tuple in zip(*res_T)] + elif issubclass(cls, dict): + # For the dictionary, call the function recursively on concatenated keys and vertices + key_t, val_t = get_args(typeannot) + all_keys_res = _dataclass_list_from_dict_list( + [k for obj in dlist for k in obj.keys()], key_t + ) + all_vals_res = _dataclass_list_from_dict_list( + [k for obj in dlist for k in obj.values()], val_t + ) + indices = np.cumsum([len(obj) for obj in dlist]) + assert indices[-1] == len(all_keys_res) + + keys = np.split(list(all_keys_res), indices[:-1]) + all_vals_res_iter = iter(all_vals_res) + return [cls(zip(k, all_vals_res_iter)) for k in keys] + elif not dataclasses.is_dataclass(typeannot): + return dlist + + # dataclass node: 2nd recursion base; call the function recursively on the lists + # of the corresponding fields + assert dataclasses.is_dataclass(cls) + fieldtypes = { + f.name: (_unwrap_type(f.type), _get_dataclass_field_default(f)) + for f in dataclasses.fields(typeannot) + } + + # NOTE the default object is shared here + key_lists = ( + _dataclass_list_from_dict_list([obj.get(k, default) for obj in dlist], type_) + for k, (type_, default) in fieldtypes.items() + ) + transposed = zip(*key_lists) + return [cls(*vals_as_tuple) for vals_as_tuple in transposed] + + +def _dataclass_from_dict(d, typeannot): + if d is None or typeannot is Any: + return d + is_optional, contained_type = _resolve_optional(typeannot) + if is_optional: + # an Optional not set to None, just use the contents of the Optional. + return _dataclass_from_dict(d, contained_type) + + cls = get_origin(typeannot) or typeannot + if issubclass(cls, tuple) and hasattr(cls, "_fields"): # namedtuple + types = cls.__annotations__.values() + return cls(*[_dataclass_from_dict(v, tp) for v, tp in zip(d, types)]) + elif issubclass(cls, (list, tuple)): + types = get_args(typeannot) + if len(types) == 1: # probably List; replicate for all items + types = types * len(d) + return cls(_dataclass_from_dict(v, tp) for v, tp in zip(d, types)) + elif issubclass(cls, dict): + key_t, val_t = get_args(typeannot) + return cls( + (_dataclass_from_dict(k, key_t), _dataclass_from_dict(v, val_t)) + for k, v in d.items() + ) + elif not dataclasses.is_dataclass(typeannot): + return d + + assert dataclasses.is_dataclass(cls) + fieldtypes = {f.name: _unwrap_type(f.type) for f in dataclasses.fields(typeannot)} + return cls(**{k: _dataclass_from_dict(v, fieldtypes[k]) for k, v in d.items()}) + + +def _unwrap_type(tp): + # strips Optional wrapper, if any + if get_origin(tp) is Union: + args = get_args(tp) + if len(args) == 2 and any(a is type(None) for a in args): # noqa: E721 + # this is typing.Optional + return args[0] if args[1] is type(None) else args[1] # noqa: E721 + return tp + + +def _get_dataclass_field_default(field: Field) -> Any: + if field.default_factory is not MISSING: + # pyre-fixme[29]: `Union[dataclasses._MISSING_TYPE, + # dataclasses._DefaultFactory[typing.Any]]` is not a function. + return field.default_factory() + elif field.default is not MISSING: + return field.default + else: + return None + + +def _asdict_rec(obj): + return dataclasses._asdict_inner(obj, dict) + + +def dump_dataclass_jgzip(outfile: str, obj: Any) -> None: + """ + Dumps obj to a gzipped json outfile. + + Args: + obj: A @dataclass or collection hiererchy including dataclasses. + outfile: The path to the output file. + """ + with gzip.GzipFile(outfile, "wb") as f: + dump_dataclass(obj, cast(IO, f), binary=True) + + +def load_dataclass_jgzip(outfile, cls): + """ + Loads a dataclass from a gzipped json outfile. + + Args: + outfile: The path to the loaded file. + cls: The type annotation of the loaded dataclass. + + Returns: + loaded_dataclass: The loaded dataclass. + """ + with gzip.GzipFile(outfile, "rb") as f: + return load_dataclass(cast(IO, f), cls, binary=True) + + +def _resolve_optional(type_: Any) -> Tuple[bool, Any]: + """Check whether `type_` is equivalent to `typing.Optional[T]` for some T.""" + if get_origin(type_) is Union: + args = get_args(type_) + if len(args) == 2 and args[1] == type(None): # noqa E721 + return True, args[0] + if type_ is Any: + return True, Any + + return False, type_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/visualize.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/visualize.py new file mode 100644 index 0000000000000000000000000000000000000000..557f7b43d734ad4570dd3b279571379fa2a592fb --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/dataset/visualize.py @@ -0,0 +1,100 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import cast, Optional, Tuple + +import torch +from pytorch3d.implicitron.tools.point_cloud_utils import get_rgbd_point_cloud +from pytorch3d.structures import Pointclouds + +from .frame_data import FrameData +from .json_index_dataset import JsonIndexDataset + + +def get_implicitron_sequence_pointcloud( + dataset: JsonIndexDataset, + sequence_name: Optional[str] = None, + mask_points: bool = True, + max_frames: int = -1, + num_workers: int = 0, + load_dataset_point_cloud: bool = False, +) -> Tuple[Pointclouds, FrameData]: + """ + Make a point cloud by sampling random points from each frame the dataset. + """ + + if len(dataset) == 0: + raise ValueError("The dataset is empty.") + + if not dataset.load_depths: + raise ValueError("The dataset has to load depths (dataset.load_depths=True).") + + if mask_points and not dataset.load_masks: + raise ValueError( + "For mask_points=True, the dataset has to load masks" + + " (dataset.load_masks=True)." + ) + + # setup the indices of frames loaded from the dataset db + sequence_entries = list(range(len(dataset))) + if sequence_name is not None: + sequence_entries = [ + ei + for ei in sequence_entries + # pyre-ignore[16] + if dataset.frame_annots[ei]["frame_annotation"].sequence_name + == sequence_name + ] + if len(sequence_entries) == 0: + raise ValueError( + f'There are no dataset entries for sequence name "{sequence_name}".' + ) + + # subsample loaded frames if needed + if (max_frames > 0) and (len(sequence_entries) > max_frames): + sequence_entries = [ + sequence_entries[i] + for i in torch.randperm(len(sequence_entries))[:max_frames].sort().values + ] + + # take only the part of the dataset corresponding to the sequence entries + sequence_dataset = torch.utils.data.Subset(dataset, sequence_entries) + + # load the required part of the dataset + loader = torch.utils.data.DataLoader( + sequence_dataset, + batch_size=len(sequence_dataset), + shuffle=False, + num_workers=num_workers, + collate_fn=dataset.frame_data_type.collate, + ) + + frame_data = next(iter(loader)) # there's only one batch + + # scene point cloud + if load_dataset_point_cloud: + if not dataset.load_point_clouds: + raise ValueError( + "For load_dataset_point_cloud=True, the dataset has to" + + " load point clouds (dataset.load_point_clouds=True)." + ) + point_cloud = frame_data.sequence_point_cloud + + else: + point_cloud = get_rgbd_point_cloud( + frame_data.camera, + frame_data.image_rgb, + frame_data.depth_map, + ( + (cast(torch.Tensor, frame_data.fg_probability) > 0.5).float() + if mask_points and frame_data.fg_probability is not None + else None + ), + ) + + return point_cloud, frame_data diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/camera_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/camera_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0c76373dda4a47fc3ffe445a4b0b2136954dc140 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/camera_utils.py @@ -0,0 +1,144 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +# TODO: all this potentially goes to PyTorch3D + +import math +from typing import Tuple + +import pytorch3d as pt3d +import torch +from pytorch3d.renderer.cameras import CamerasBase + + +def jitter_extrinsics( + R: torch.Tensor, + T: torch.Tensor, + max_angle: float = (math.pi * 2.0), + translation_std: float = 1.0, + scale_std: float = 0.3, +): + """ + Jitter the extrinsic camera parameters `R` and `T` with a random similarity + transformation. The transformation rotates by a random angle between [0, max_angle]; + scales by a random factor exp(N(0, scale_std)), where N(0, scale_std) is + a random sample from a normal distrubtion with zero mean and variance scale_std; + and translates by a 3D offset sampled from N(0, translation_std). + """ + assert all(x >= 0.0 for x in (max_angle, translation_std, scale_std)) + N = R.shape[0] + R_jit = pt3d.transforms.random_rotations(1, device=R.device) + R_jit = pt3d.transforms.so3_exponential_map( + pt3d.transforms.so3_log_map(R_jit) * max_angle + ) + T_jit = torch.randn_like(R_jit[:1, :, 0]) * translation_std + rigid_transform = pt3d.ops.eyes(dim=4, N=N, device=R.device) + rigid_transform[:, :3, :3] = R_jit.expand(N, 3, 3) + rigid_transform[:, 3, :3] = T_jit.expand(N, 3) + scale_jit = torch.exp(torch.randn_like(T_jit[:, 0]) * scale_std).expand(N) + return apply_camera_alignment(R, T, rigid_transform, scale_jit) + + +def apply_camera_alignment( + R: torch.Tensor, + T: torch.Tensor, + rigid_transform: torch.Tensor, + scale: torch.Tensor, +): + """ + Args: + R: Camera rotation matrix of shape (N, 3, 3). + T: Camera translation of shape (N, 3). + rigid_transform: A tensor of shape (N, 4, 4) representing a batch of + N 4x4 tensors that map the scene pointcloud from misaligned coords + to the aligned space. + scale: A list of N scaling factors. A tensor of shape (N,) + + Returns: + R_aligned: The aligned rotations R. + T_aligned: The aligned translations T. + """ + R_rigid = rigid_transform[:, :3, :3] + T_rigid = rigid_transform[:, 3:, :3] + R_aligned = R_rigid.permute(0, 2, 1).bmm(R) + T_aligned = scale[:, None] * (T - (T_rigid @ R_aligned)[:, 0]) + return R_aligned, T_aligned + + +def get_min_max_depth_bounds(cameras, scene_center, scene_extent): + """ + Estimate near/far depth plane as: + near = dist(cam_center, self.scene_center) - self.scene_extent + far = dist(cam_center, self.scene_center) + self.scene_extent + """ + cam_center = cameras.get_camera_center() + center_dist = ( + ((cam_center - scene_center.to(cameras.R)[None]) ** 2) + .sum(dim=-1) + .clamp(0.001) + .sqrt() + ) + center_dist = center_dist.clamp(scene_extent + 1e-3) + min_depth = center_dist - scene_extent + max_depth = center_dist + scene_extent + return min_depth, max_depth + + +def volumetric_camera_overlaps( + cameras: CamerasBase, + scene_extent: float = 8.0, + scene_center: Tuple[float, float, float] = (0.0, 0.0, 0.0), + resol: int = 16, + weigh_by_ray_angle: bool = True, +): + """ + Compute the overlaps between viewing frustrums of all pairs of cameras + in `cameras`. + """ + device = cameras.device + ba = cameras.R.shape[0] + n_vox = int(resol**3) + grid = pt3d.structures.Volumes( + densities=torch.zeros([1, 1, resol, resol, resol], device=device), + volume_translation=-torch.FloatTensor(scene_center)[None].to(device), + voxel_size=2.0 * scene_extent / resol, + ).get_coord_grid(world_coordinates=True) + + grid = grid.view(1, n_vox, 3).expand(ba, n_vox, 3) + gridp = cameras.transform_points(grid, eps=1e-2) + proj_in_camera = ( + torch.prod((gridp[..., :2].abs() <= 1.0), dim=-1) + * (gridp[..., 2] > 0.0).float() + ) # ba x n_vox + + if weigh_by_ray_angle: + rays = torch.nn.functional.normalize( + grid - cameras.get_camera_center()[:, None], dim=-1 + ) + rays_masked = rays * proj_in_camera[..., None] + + # - slow and readable: + # inter = torch.zeros(ba, ba) + # for i1 in range(ba): + # for i2 in range(ba): + # inter[i1, i2] = ( + # 1 + (rays_masked[i1] * rays_masked[i2] + # ).sum(dim=-1)).sum() + + # - fast: + rays_masked = rays_masked.view(ba, n_vox * 3) + inter = n_vox + (rays_masked @ rays_masked.t()) + + else: + inter = proj_in_camera @ proj_in_camera.t() + + mass = torch.diag(inter) + iou = inter / (mass[:, None] + mass[None, :] - inter).clamp(0.1) + + return iou diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/image_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/image_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7e8b3b04dbb9c1c9fd59d88cbdef0cf39623bbf2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/image_utils.py @@ -0,0 +1,57 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import Sequence, Union + +import torch + + +def mask_background( + image_rgb: torch.Tensor, + mask_fg: torch.Tensor, + dim_color: int = 1, + bg_color: Union[torch.Tensor, Sequence, str, float] = 0.0, +) -> torch.Tensor: + """ + Mask the background input image tensor `image_rgb` with `bg_color`. + The background regions are obtained from the binary foreground segmentation + mask `mask_fg`. + """ + tgt_view = [1, 1, 1, 1] + tgt_view[dim_color] = 3 + # obtain the background color tensor + if isinstance(bg_color, torch.Tensor): + bg_color_t = bg_color.view(1, 3, 1, 1).clone().to(image_rgb) + elif isinstance(bg_color, (float, tuple, list)): + if isinstance(bg_color, float): + bg_color = [bg_color] * 3 + bg_color_t = torch.tensor( + bg_color, device=image_rgb.device, dtype=image_rgb.dtype + ).view(*tgt_view) + elif isinstance(bg_color, str): + if bg_color == "white": + bg_color_t = image_rgb.new_ones(tgt_view) + elif bg_color == "black": + bg_color_t = image_rgb.new_zeros(tgt_view) + else: + raise ValueError(_invalid_color_error_msg(bg_color)) + else: + raise ValueError(_invalid_color_error_msg(bg_color)) + # cast to the image_rgb's type + mask_fg = mask_fg.type_as(image_rgb) + # mask the bg + image_masked = mask_fg * image_rgb + (1 - mask_fg) * bg_color_t + return image_masked + + +def _invalid_color_error_msg(bg_color) -> str: + return ( + f"Invalid bg_color={bg_color}. Plese set bg_color to a 3-element" + + " tensor. or a string (white | black), or a float." + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/rasterize_mc.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/rasterize_mc.py new file mode 100644 index 0000000000000000000000000000000000000000..9615987eeda913011f06c4a21a2d1e7f24395998 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/rasterize_mc.py @@ -0,0 +1,147 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import math +from typing import Optional, Tuple + +import pytorch3d + +import torch +from pytorch3d.ops import packed_to_padded +from pytorch3d.renderer import PerspectiveCameras +from pytorch3d.structures import Pointclouds + +from .point_cloud_utils import render_point_cloud_pytorch3d + + +@torch.no_grad() +def rasterize_sparse_ray_bundle( + ray_bundle: "pytorch3d.implicitron.models.renderer.base.ImplicitronRayBundle", + features: torch.Tensor, + image_size_hw: Tuple[int, int], + depth: torch.Tensor, + masks: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Rasterizes sparse features corresponding to the coordinates defined by + the rays in the bundle. + + Args: + ray_bundle: ray bundle object with B x ... x 2 pixel coordinates, + it can be packed. + features: B x ... x C tensor containing per-point rendered features. + image_size_hw: Tuple[image_height, image_width] containing + the size of rasterized image. + depth: B x ... x 1 tensor containing per-point rendered depth. + masks: B x ... x 1 tensor containing the alpha mask of the + rendered features. + + Returns: + - image_render: B x C x H x W tensor of rasterized features + - depths_render: B x 1 x H x W tensor of rasterized depth maps + - masks_render: B x 1 x H x W tensor of opacities after splatting + """ + # Flatten the features and xy locations. + features_depth_ras = torch.cat( + (features.flatten(1, -2), depth.flatten(1, -2)), dim=-1 + ) + xys = ray_bundle.xys + masks_ras = None + if ray_bundle.is_packed(): + camera_counts = ray_bundle.camera_counts + assert camera_counts is not None + xys, first_idxs, _ = ray_bundle.get_padded_xys() + masks_ras = ( + torch.arange(xys.shape[1], device=xys.device)[:, None] + < camera_counts[:, None, None] + ) + + max_size = torch.max(camera_counts).item() + features_depth_ras = packed_to_padded( + features_depth_ras[:, 0], first_idxs, max_size + ) + if masks is not None: + padded_mask = packed_to_padded(masks.flatten(1, -1), first_idxs, max_size) + masks_ras = padded_mask * masks_ras + + xys_ras = xys.flatten(1, -2) + + if masks_ras is None: + assert not ray_bundle.is_packed() + masks_ras = masks.flatten(1, -2) if masks is not None else None + + if min(*image_size_hw) <= 0: + raise ValueError( + "Need to specify a positive output_size_hw for bundle rasterisation." + ) + + # Estimate the rasterization point radius so that we approximately fill + # the whole image given the number of rasterized points. + pt_radius = 2.0 / math.sqrt(xys.shape[1]) + + # Rasterize the samples. + features_depth_render, masks_render = rasterize_mc_samples( + xys_ras, + features_depth_ras, + image_size_hw, + radius=pt_radius, + masks=masks_ras, + ) + images_render = features_depth_render[:, :-1] + depths_render = features_depth_render[:, -1:] + return images_render, depths_render, masks_render + + +def rasterize_mc_samples( + xys: torch.Tensor, + feats: torch.Tensor, + image_size_hw: Tuple[int, int], + radius: float = 0.03, + topk: int = 5, + masks: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Rasterizes Monte-Carlo sampled features back onto the image. + + Specifically, the code uses the PyTorch3D point rasterizer to render + a z-flat point cloud composed of the xy MC locations and their features. + + Args: + xys: B x N x 2 2D point locations in PyTorch3D NDC convention + feats: B x N x dim tensor containing per-point rendered features. + image_size_hw: Tuple[image_height, image_width] containing + the size of rasterized image. + radius: Rasterization point radius. + topk: The maximum z-buffer size for the PyTorch3D point cloud rasterizer. + masks: B x N x 1 tensor containing the alpha mask of the + rendered features. + """ + + if masks is None: + masks = torch.ones_like(xys[..., :1]) + + feats = torch.cat((feats, masks), dim=-1) + pointclouds = Pointclouds( + points=torch.cat([xys, torch.ones_like(xys[..., :1])], dim=-1), + features=feats, + ) + + data_rendered, render_mask, _ = render_point_cloud_pytorch3d( + PerspectiveCameras(device=feats.device), + pointclouds, + render_size=image_size_hw, + point_radius=radius, + topk=topk, + ) + + data_rendered, masks_pt = data_rendered.split( + [data_rendered.shape[1] - 1, 1], dim=1 + ) + render_mask = masks_pt * render_mask + + return data_rendered, render_mask diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/stats.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/stats.py new file mode 100644 index 0000000000000000000000000000000000000000..0625f3e38d2070db6cac8f57d74a5475c3a132e5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/stats.py @@ -0,0 +1,513 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import gzip +import json +import logging +import time +import warnings +from collections.abc import Iterable +from itertools import cycle + +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +from matplotlib import colors as mcolors +from pytorch3d.implicitron.tools.vis_utils import get_visdom_connection + +logger = logging.getLogger(__name__) + + +class AverageMeter: + """Computes and stores the average and current value""" + + def __init__(self): + self.history = [] + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1, epoch=0): + + # make sure the history is of the same len as epoch + while len(self.history) <= epoch: + self.history.append([]) + + self.history[epoch].append(val / n) + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def get_epoch_averages(self, epoch=-1): + if len(self.history) == 0: # no stats here + return None + elif epoch == -1: + return [ + (float(np.array(x).mean()) if len(x) > 0 else float("NaN")) + for x in self.history + ] + else: + return float(np.array(self.history[epoch]).mean()) + + def get_all_values(self): + all_vals = [np.array(x) for x in self.history] + all_vals = np.concatenate(all_vals) + return all_vals + + def get_epoch(self): + return len(self.history) + + @staticmethod + def from_json_str(json_str): + self = AverageMeter() + self.__dict__.update(json.loads(json_str)) + return self + + +class Stats: + # TODO: update this with context manager + """ + stats logging object useful for gathering statistics of training a deep net in pytorch + Example:: + + # init stats structure that logs statistics 'objective' and 'top1e' + stats = Stats( ('objective','top1e') ) + network = init_net() # init a pytorch module (=nueral network) + dataloader = init_dataloader() # init a dataloader + for epoch in range(10): + # start of epoch -> call new_epoch + stats.new_epoch() + + # iterate over batches + for batch in dataloader: + + output = network(batch) # run and save into a dict of output variables + + # stats.update() automatically parses the 'objective' and 'top1e' from + # the "output" dict and stores this into the db + stats.update(output) + # prints the metric averages over given epoch + std_out = stats.get_status_string() + logger.info(str_out) + # stores the training plots into '/tmp/epoch_stats.pdf' + # and plots into a visdom server running at localhost (if running) + stats.plot_stats(plot_file='/tmp/epoch_stats.pdf') + + """ + + def __init__( + self, + log_vars, + epoch=-1, + visdom_env="main", + do_plot=True, + plot_file=None, + visdom_server="http://localhost", + visdom_port=8097, + ): + + self.log_vars = log_vars + self.visdom_env = visdom_env + self.visdom_server = visdom_server + self.visdom_port = visdom_port + self.plot_file = plot_file + self.do_plot = do_plot + self.hard_reset(epoch=epoch) + self._t_last_update = None + + @staticmethod + def from_json_str(json_str): + self = Stats([]) + # load the global state + self.__dict__.update(json.loads(json_str)) + # recover the AverageMeters + for stat_set in self.stats: + self.stats[stat_set] = { + log_var: AverageMeter.from_json_str(log_vals_json_str) + for log_var, log_vals_json_str in self.stats[stat_set].items() + } + return self + + @staticmethod + def load(flpath, postfix=".jgz"): + flpath = _get_postfixed_filename(flpath, postfix) + with gzip.open(flpath, "r") as fin: + data = json.loads(fin.read().decode("utf-8")) + return Stats.from_json_str(data) + + def save(self, flpath, postfix=".jgz"): + flpath = _get_postfixed_filename(flpath, postfix) + # store into a gzipped-json + with gzip.open(flpath, "w") as fout: + fout.write(json.dumps(self, cls=StatsJSONEncoder).encode("utf-8")) + + # some sugar to be used with "with stats:" at the beginning of the epoch + def __enter__(self): + if self.do_plot and self.epoch >= 0: + self.plot_stats(self.visdom_env) + self.new_epoch() + + def __exit__(self, type, value, traceback): + iserr = type is not None and issubclass(type, Exception) + iserr = iserr or (type is KeyboardInterrupt) + if iserr: + logger.error("error inside 'with' block") + return + if self.do_plot: + self.plot_stats(self.visdom_env) + + def reset(self): # to be called after each epoch + stat_sets = list(self.stats.keys()) + logger.debug(f"stats: epoch {self.epoch} - reset") + self.it = {k: -1 for k in stat_sets} + for stat_set in stat_sets: + for stat in self.stats[stat_set]: + self.stats[stat_set][stat].reset() + + def hard_reset(self, epoch=-1): # to be called during object __init__ + self.epoch = epoch + logger.debug(f"stats: epoch {self.epoch} - hard reset") + self.stats = {} + + # reset + self.reset() + + def new_epoch(self): + logger.debug(f"stats: new epoch {(self.epoch + 1)}") + self.epoch += 1 + self.reset() # zero the stats + increase epoch counter + + def gather_value(self, val): + if isinstance(val, (float, int)): + val = float(val) + else: + val = val.data.cpu().numpy() + val = float(val.sum()) + return val + + def add_log_vars(self, added_log_vars): + for add_log_var in added_log_vars: + if add_log_var not in self.stats: + logger.debug(f"Adding {add_log_var}") + self.log_vars.append(add_log_var) + + def update(self, preds, time_start=None, freeze_iter=False, stat_set="train"): + + if self.epoch == -1: # uninitialized + logger.warning( + "epoch==-1 means uninitialized stats structure -> new_epoch() called" + ) + self.new_epoch() + + if stat_set not in self.stats: + self.stats[stat_set] = {} + self.it[stat_set] = -1 + + if not freeze_iter: + self.it[stat_set] += 1 + + epoch = self.epoch + + for stat in self.log_vars: + + if stat not in self.stats[stat_set]: + self.stats[stat_set][stat] = AverageMeter() + + if stat == "sec/it": # compute speed + if time_start is None: + time_per_it = 0.0 + else: + now = time.time() + time_per_it = now - (self._t_last_update or time_start) + self._t_last_update = now + val = time_per_it + else: + if stat in preds: + try: + val = self.gather_value(preds[stat]) + except KeyError: + raise ValueError( + "could not extract prediction %s\ + from the prediction dictionary" + % stat + ) from None + else: + val = None + + if val is not None: + self.stats[stat_set][stat].update(val, epoch=epoch, n=1) + + def get_epoch_averages(self, epoch=None): + + stat_sets = list(self.stats.keys()) + + if epoch is None: + epoch = self.epoch + if epoch == -1: + epoch = list(range(self.epoch)) + + outvals = {} + for stat_set in stat_sets: + outvals[stat_set] = { + "epoch": epoch, + "it": self.it[stat_set], + "epoch_max": self.epoch, + } + + for stat in self.stats[stat_set].keys(): + if self.stats[stat_set][stat].count == 0: + continue + if isinstance(epoch, Iterable): + avgs = self.stats[stat_set][stat].get_epoch_averages() + avgs = [avgs[e] for e in epoch] + else: + avgs = self.stats[stat_set][stat].get_epoch_averages(epoch=epoch) + outvals[stat_set][stat] = avgs + + return outvals + + def print( + self, + max_it=None, + stat_set="train", + vars_print=None, + get_str=False, + skip_nan=False, + stat_format=lambda s: s.replace("loss_", "").replace("prev_stage_", "ps_"), + ): + """ + stats.print() is deprecated. Please use get_status_string() instead. + example: + std_out = stats.get_status_string() + logger.info(str_out) + """ + + epoch = self.epoch + stats = self.stats + + str_out = "" + + it = self.it[stat_set] + stat_str = "" + stats_print = sorted(stats[stat_set].keys()) + for stat in stats_print: + if stats[stat_set][stat].count == 0: + continue + if skip_nan and not np.isfinite(stats[stat_set][stat].avg): + continue + stat_str += " {0:.12}: {1:1.3f} |".format( + stat_format(stat), stats[stat_set][stat].avg + ) + + head_str = "[%s] | epoch %3d | it %5d" % (stat_set, epoch, it) + if max_it: + head_str += "/ %d" % max_it + + str_out = "%s | %s" % (head_str, stat_str) + + if get_str: + return str_out + else: + warnings.warn( + "get_str=False is deprecated." + "Please enable this flag to get receive the output string.", + DeprecationWarning, + ) + print(str_out) + + def get_status_string( + self, + max_it=None, + stat_set="train", + vars_print=None, + skip_nan=False, + stat_format=lambda s: s.replace("loss_", "").replace("prev_stage_", "ps_"), + ): + return self.print( + max_it=max_it, + stat_set=stat_set, + vars_print=vars_print, + get_str=True, + skip_nan=skip_nan, + stat_format=stat_format, + ) + + def plot_stats( + self, visdom_env=None, plot_file=None, visdom_server=None, visdom_port=None + ): + + # use the cached visdom env if none supplied + if visdom_env is None: + visdom_env = self.visdom_env + if visdom_server is None: + visdom_server = self.visdom_server + if visdom_port is None: + visdom_port = self.visdom_port + if plot_file is None: + plot_file = self.plot_file + + stat_sets = list(self.stats.keys()) + + logger.debug( + f"printing charts to visdom env '{visdom_env}' ({visdom_server}:{visdom_port})" + ) + + novisdom = False + + viz = get_visdom_connection(server=visdom_server, port=visdom_port) + if viz is None or not viz.check_connection(): + logger.info("no visdom server! -> skipping visdom plots") + novisdom = True + + lines = [] + + # plot metrics + if not novisdom: + viz.close(env=visdom_env, win=None) + + for stat in self.log_vars: + vals = [] + stat_sets_now = [] + for stat_set in stat_sets: + val = self.stats[stat_set][stat].get_epoch_averages() + if val is None: + continue + else: + val = np.array(val).reshape(-1) + stat_sets_now.append(stat_set) + vals.append(val) + + if len(vals) == 0: + continue + + lines.append((stat_sets_now, stat, vals)) + + if not novisdom: + for tmodes, stat, vals in lines: + title = "%s" % stat + opts = {"title": title, "legend": list(tmodes)} + for i, (tmode, val) in enumerate(zip(tmodes, vals)): + update = "append" if i > 0 else None + valid = np.where(np.isfinite(val))[0] + if len(valid) == 0: + continue + x = np.arange(len(val)) + viz.line( + Y=val[valid], + X=x[valid], + env=visdom_env, + opts=opts, + win=f"stat_plot_{title}", + name=tmode, + update=update, + ) + + if plot_file: + logger.info(f"plotting stats to {plot_file}") + ncol = 3 + nrow = int(np.ceil(float(len(lines)) / ncol)) + matplotlib.rcParams.update({"font.size": 5}) + color = cycle(plt.cm.tab10(np.linspace(0, 1, 10))) + fig = plt.figure(1) + plt.clf() + for idx, (tmodes, stat, vals) in enumerate(lines): + c = next(color) + plt.subplot(nrow, ncol, idx + 1) + plt.gca() + for vali, vals_ in enumerate(vals): + c_ = c * (1.0 - float(vali) * 0.3) + valid = np.where(np.isfinite(vals_))[0] + if len(valid) == 0: + continue + x = np.arange(len(vals_)) + plt.plot(x[valid], vals_[valid], c=c_, linewidth=1) + plt.ylabel(stat) + plt.xlabel("epoch") + plt.gca().yaxis.label.set_color(c[0:3] * 0.75) + plt.legend(tmodes) + gcolor = np.array(mcolors.to_rgba("lightgray")) + grid_params = {"visible": True, "color": gcolor} + plt.grid(**grid_params, which="major", linestyle="-", linewidth=0.4) + plt.grid(**grid_params, which="minor", linestyle="--", linewidth=0.2) + plt.minorticks_on() + + plt.tight_layout() + plt.show() + try: + fig.savefig(plot_file) + except PermissionError: + warnings.warn("Cant dump stats due to insufficient permissions!") + + def synchronize_logged_vars(self, log_vars, default_val=float("NaN")): + + stat_sets = list(self.stats.keys()) + + # remove the additional log_vars + for stat_set in stat_sets: + for stat in self.stats[stat_set].keys(): + if stat not in log_vars: + logger.warning(f"additional stat {stat_set}:{stat} -> removing") + + self.stats[stat_set] = { + stat: v for stat, v in self.stats[stat_set].items() if stat in log_vars + } + + self.log_vars = log_vars # !!! + + for stat_set in stat_sets: + for stat in log_vars: + if stat not in self.stats[stat_set]: + logger.info( + "missing stat %s:%s -> filling with default values (%1.2f)" + % (stat_set, stat, default_val) + ) + elif len(self.stats[stat_set][stat].history) != self.epoch + 1: + h = self.stats[stat_set][stat].history + if len(h) == 0: # just never updated stat ... skip + continue + else: + logger.info( + "incomplete stat %s:%s -> reseting with default values (%1.2f)" + % (stat_set, stat, default_val) + ) + else: + continue + + self.stats[stat_set][stat] = AverageMeter() + self.stats[stat_set][stat].reset() + + lastep = self.epoch + 1 + for ep in range(lastep): + self.stats[stat_set][stat].update(default_val, n=1, epoch=ep) + epoch_generated = self.stats[stat_set][stat].get_epoch() + assert ( + epoch_generated == self.epoch + 1 + ), "bad epoch of synchronized log_var! %d vs %d" % ( + self.epoch + 1, + epoch_generated, + ) + + +class StatsJSONEncoder(json.JSONEncoder): + def default(self, o): + if isinstance(o, (AverageMeter, Stats)): + enc = self.encode(o.__dict__) + return enc + else: + raise TypeError( + f"Object of type {o.__class__.__name__} " f"is not JSON serializable" + ) + + +def _get_postfixed_filename(fl, postfix): + return fl if fl.endswith(postfix) else fl + postfix diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b556d1eba47489c2c1dfc0e3af56b570bf9a80b2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/utils.py @@ -0,0 +1,207 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import collections +import dataclasses +import time +from contextlib import contextmanager +from typing import Any, Callable, Dict, Iterable, Iterator + +import torch + + +@contextmanager +def evaluating(net: torch.nn.Module): + """Temporarily switch to evaluation mode.""" + istrain = net.training + try: + net.eval() + yield net + finally: + if istrain: + net.train() + + +def try_to_cuda(t: Any) -> Any: + """ + Try to move the input variable `t` to a cuda device. + + Args: + t: Input. + + Returns: + t_cuda: `t` moved to a cuda device, if supported. + """ + try: + t = t.cuda() + except AttributeError: + pass + return t + + +def try_to_cpu(t: Any) -> Any: + """ + Try to move the input variable `t` to a cpu device. + + Args: + t: Input. + + Returns: + t_cpu: `t` moved to a cpu device, if supported. + """ + try: + t = t.cpu() + except AttributeError: + pass + return t + + +def dict_to_cuda(batch: Dict[Any, Any]) -> Dict[Any, Any]: + """ + Move all values in a dictionary to cuda if supported. + + Args: + batch: Input dict. + + Returns: + batch_cuda: `batch` moved to a cuda device, if supported. + """ + return {k: try_to_cuda(v) for k, v in batch.items()} + + +def dict_to_cpu(batch): + """ + Move all values in a dictionary to cpu if supported. + + Args: + batch: Input dict. + + Returns: + batch_cpu: `batch` moved to a cpu device, if supported. + """ + return {k: try_to_cpu(v) for k, v in batch.items()} + + +def dataclass_to_cuda_(obj): + """ + Move all contents of a dataclass to cuda inplace if supported. + + Args: + batch: Input dataclass. + + Returns: + batch_cuda: `batch` moved to a cuda device, if supported. + """ + for f in dataclasses.fields(obj): + setattr(obj, f.name, try_to_cuda(getattr(obj, f.name))) + return obj + + +def dataclass_to_cpu_(obj): + """ + Move all contents of a dataclass to cpu inplace if supported. + + Args: + batch: Input dataclass. + + Returns: + batch_cuda: `batch` moved to a cpu device, if supported. + """ + for f in dataclasses.fields(obj): + setattr(obj, f.name, try_to_cpu(getattr(obj, f.name))) + return obj + + +# TODO: test it +def cat_dataclass(batch, tensor_collator: Callable): + """ + Concatenate all fields of a list of dataclasses `batch` to a single + dataclass object using `tensor_collator`. + + Args: + batch: Input list of dataclasses. + + Returns: + concatenated_batch: All elements of `batch` concatenated to a single + dataclass object. + tensor_collator: The function used to concatenate tensor fields. + """ + + elem = batch[0] + collated = {} + + for f in dataclasses.fields(elem): + elem_f = getattr(elem, f.name) + if elem_f is None: + collated[f.name] = None + elif torch.is_tensor(elem_f): + collated[f.name] = tensor_collator([getattr(e, f.name) for e in batch]) + elif dataclasses.is_dataclass(elem_f): + collated[f.name] = cat_dataclass( + [getattr(e, f.name) for e in batch], tensor_collator + ) + elif isinstance(elem_f, collections.abc.Mapping): + collated[f.name] = { + k: ( + tensor_collator([getattr(e, f.name)[k] for e in batch]) + if elem_f[k] is not None + else None + ) + for k in elem_f + } + else: + raise ValueError("Unsupported field type for concatenation") + + return type(elem)(**collated) + + +def recursive_visitor(it: Iterable[Any]) -> Iterator[Any]: + for x in it: + if isinstance(x, Iterable) and not isinstance(x, (str, bytes)): + yield from recursive_visitor(x) + else: + yield x + + +def get_inlier_indicators( + tensor: torch.Tensor, dim: int, outlier_rate: float +) -> torch.Tensor: + remove_elements = int(min(outlier_rate, 1.0) * tensor.shape[dim] / 2) + hi = torch.topk(tensor, remove_elements, dim=dim).indices.tolist() + lo = torch.topk(-tensor, remove_elements, dim=dim).indices.tolist() + remove_indices = set(recursive_visitor([hi, lo])) + keep_indices = tensor.new_ones(tensor.shape[dim : dim + 1], dtype=torch.bool) + keep_indices[list(remove_indices)] = False + return keep_indices + + +class Timer: + """ + A simple class for timing execution. + + Example:: + + with Timer(): + print("This print statement is timed.") + + """ + + def __init__(self, name="timer", quiet=False): + self.name = name + self.quiet = quiet + + def __enter__(self): + self.start = time.time() + return self + + def __exit__(self, *args): + self.end = time.time() + self.interval = self.end - self.start + if not self.quiet: + print("%20s: %1.6f sec" % (self.name, self.interval)) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/video_writer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/video_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..867640712f35a3a280dd2f776b9e31ec8177796c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/video_writer.py @@ -0,0 +1,181 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import os +import shutil +import subprocess +import tempfile +import warnings +from typing import Optional, Tuple, Union + +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +from PIL import Image + +_DEFAULT_FFMPEG = os.environ.get("FFMPEG", "ffmpeg") + +matplotlib.use("Agg") + + +class VideoWriter: + """ + A class for exporting videos. + """ + + def __init__( + self, + cache_dir: Optional[str] = None, + ffmpeg_bin: str = _DEFAULT_FFMPEG, + out_path: str = "/tmp/video.mp4", + fps: int = 20, + output_format: str = "visdom", + rmdir_allowed: bool = False, + **kwargs, + ) -> None: + """ + Args: + cache_dir: A directory for storing the video frames. If `None`, + a temporary directory will be used. + ffmpeg_bin: The path to an `ffmpeg` executable. + out_path: The path to the output video. + fps: The speed of the generated video in frames-per-second. + output_format: Format of the output video. Currently only `"visdom"` + is supported. + rmdir_allowed: If `True` delete and create `cache_dir` in case + it is not empty. + """ + self.rmdir_allowed = rmdir_allowed + self.output_format = output_format + self.fps = fps + self.out_path = out_path + self.cache_dir = cache_dir + self.ffmpeg_bin = ffmpeg_bin + self.frames = [] + self.regexp = "frame_%08d.png" + self.frame_num = 0 + + if self.cache_dir is not None: + self.tmp_dir = None + if os.path.isdir(self.cache_dir): + if rmdir_allowed: + shutil.rmtree(self.cache_dir) + else: + warnings.warn( + f"Warning: cache directory not empty ({self.cache_dir})." + ) + os.makedirs(self.cache_dir, exist_ok=True) + else: + self.tmp_dir = tempfile.TemporaryDirectory() + self.cache_dir = self.tmp_dir.name + + def write_frame( + self, + frame: Union[matplotlib.figure.Figure, np.ndarray, Image.Image, str], + resize: Optional[Union[float, Tuple[int, int]]] = None, + ) -> None: + """ + Write a frame to the video. + + Args: + frame: An object containing the frame image. + resize: Either a floating defining the image rescaling factor + or a 2-tuple defining the size of the output image. + """ + + # pyre-fixme[6]: For 1st argument expected `Union[PathLike[str], str]` but + # got `Optional[str]`. + outfile = os.path.join(self.cache_dir, self.regexp % self.frame_num) + + if isinstance(frame, matplotlib.figure.Figure): + plt.savefig(outfile) + im = Image.open(outfile) + elif isinstance(frame, np.ndarray): + if frame.dtype in (np.float64, np.float32, float): + frame = (np.transpose(frame, (1, 2, 0)) * 255.0).astype(np.uint8) + im = Image.fromarray(frame) + elif isinstance(frame, Image.Image): + im = frame + elif isinstance(frame, str): + im = Image.open(frame).convert("RGB") + else: + raise ValueError("Cant convert type %s" % str(type(frame))) + + if im is not None: + if resize is not None: + if isinstance(resize, float): + resize = [int(resize * s) for s in im.size] + else: + resize = im.size + # make sure size is divisible by 2 + resize = tuple([resize[i] + resize[i] % 2 for i in (0, 1)]) + # pyre-fixme[16]: Module `Image` has no attribute `ANTIALIAS`. + im = im.resize(resize, Image.ANTIALIAS) + im.save(outfile) + + self.frames.append(outfile) + self.frame_num += 1 + + def get_video(self, quiet: bool = True) -> str: + """ + Generate the video from the written frames. + + Args: + quiet: If `True`, suppresses logging messages. + + Returns: + video_path: The path to the generated video if any frames were added. + Otherwise returns an empty string. + """ + if self.frame_num == 0: + return "" + + # pyre-fixme[6]: For 1st argument expected `Union[PathLike[str], str]` but + # got `Optional[str]`. + regexp = os.path.join(self.cache_dir, self.regexp) + + if shutil.which(self.ffmpeg_bin) is None: + raise ValueError( + f"Cannot find ffmpeg as `{self.ffmpeg_bin}`. " + + "Please set FFMPEG in the environment or ffmpeg_bin on this class." + ) + + if self.output_format == "visdom": # works for ppt too + args = [ + self.ffmpeg_bin, + "-r", + str(self.fps), + "-i", + regexp, + "-vcodec", + "h264", + "-f", + "mp4", + "-y", + "-crf", + "18", + "-b", + "2000k", + "-pix_fmt", + "yuv420p", + self.out_path, + ] + if quiet: + subprocess.check_call( + args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) + else: + subprocess.check_call(args) + else: + raise ValueError("no such output type %s" % str(self.output_format)) + + return self.out_path + + def __del__(self) -> None: + if self.tmp_dir is not None: + self.tmp_dir.cleanup() diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/vis_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/vis_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..662c138ccbb1900084daa3be1cede76501ee1b2e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/implicitron/tools/vis_utils.py @@ -0,0 +1,189 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import logging +from typing import Any, Dict, Optional, Tuple, TYPE_CHECKING + +import torch + +if TYPE_CHECKING: + from visdom import Visdom + + +logger = logging.getLogger(__name__) + + +def get_visdom_env(visdom_env: str, exp_dir: str) -> str: + """ + Parse out visdom environment name from the input config. + + Args: + visdom_env: Name of the wisdom environment, could be empty string. + exp_dir: Root experiment directory. + + Returns: + visdom_env: The name of the visdom environment. If the given visdom_env is + empty, return the name of the bottom directory in exp_dir. + """ + if len(visdom_env) == 0: + visdom_env = exp_dir.split("/")[-1] + else: + visdom_env = visdom_env + return visdom_env + + +# TODO: a proper singleton +_viz_singleton = None + + +def get_visdom_connection( + server: str = "http://localhost", + port: int = 8097, +) -> Optional["Visdom"]: + """ + Obtain a connection to a visdom server if visdom is installed. + + Args: + server: Server address. + port: Server port. + + Returns: + connection: The connection object. + """ + try: + from visdom import Visdom + except ImportError: + logger.debug("Cannot load visdom") + return None + + if server == "None": + return None + + global _viz_singleton + if _viz_singleton is None: + _viz_singleton = Visdom(server=server, port=port) + return _viz_singleton + + +def visualize_basics( + viz: "Visdom", + preds: Dict[str, Any], + visdom_env_imgs: str, + title: str = "", + visualize_preds_keys: Tuple[str, ...] = ( + "image_rgb", + "images_render", + "fg_probability", + "masks_render", + "depths_render", + "depth_map", + ), + store_history: bool = False, +) -> None: + """ + Visualize basic outputs of a `GenericModel` to visdom. + + Args: + viz: The visdom object. + preds: A dictionary containing `GenericModel` outputs. + visdom_env_imgs: Target visdom environment name. + title: The title of produced visdom window. + visualize_preds_keys: The list of keys of `preds` for visualization. + store_history: Store the history buffer in visdom windows. + """ + imout = {} + for k in visualize_preds_keys: + if k not in preds or preds[k] is None: + logger.info(f"cant show {k}") + continue + v = preds[k].cpu().detach().clone() + if k.startswith("depth"): + # divide by 95th percentile + normfac = ( + v.view(v.shape[0], -1) + .topk(k=int(0.05 * (v.numel() // v.shape[0])), dim=-1) + .values[:, -1] + ) + v = v / normfac[:, None, None, None].clamp(1e-4) + if v.shape[1] == 1: + v = v.repeat(1, 3, 1, 1) + v = torch.nn.functional.interpolate( + v, + scale_factor=( + 600.0 + if ( + "_eval" in visdom_env_imgs + and k in ("images_render", "depths_render") + ) + else 200.0 + ) + / v.shape[2], + mode="bilinear", + ) + imout[k] = v + + # TODO: handle errors on the outside + try: + imout = {"all": torch.cat(list(imout.values()), dim=2)} + except RuntimeError as e: + print("cant cat!", e.args) + + for k, v in imout.items(): + viz.images( + v.clamp(0.0, 1.0), + win=k, + env=visdom_env_imgs, + opts={"title": title + "_" + k, "store_history": store_history}, + ) + + +def make_depth_image( + depths: torch.Tensor, + masks: torch.Tensor, + max_quantile: float = 0.98, + min_quantile: float = 0.02, + min_out_depth: float = 0.1, + max_out_depth: float = 0.9, +) -> torch.Tensor: + """ + Convert a batch of depth maps to a grayscale image. + + Args: + depths: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps. + masks: A tensor of shape `(B, 1, H, W)` containing a batch of foreground masks. + max_quantile: The quantile of the input depth values which will + be mapped to `max_out_depth`. + min_quantile: The quantile of the input depth values which will + be mapped to `min_out_depth`. + min_out_depth: The minimal value in each depth map will be assigned this color. + max_out_depth: The maximal value in each depth map will be assigned this color. + + Returns: + depth_image: A tensor of shape `(B, 1, H, W)` a batch of grayscale + depth images. + """ + normfacs = [] + for d, m in zip(depths, masks): + ok = (d.view(-1) > 1e-6) * (m.view(-1) > 0.5) + if ok.sum() <= 1: + logger.info("empty depth!") + normfacs.append(torch.zeros(2).type_as(depths)) + continue + dok = d.view(-1)[ok].view(-1) + _maxk = max(int(round((1 - max_quantile) * (dok.numel()))), 1) + _mink = max(int(round(min_quantile * (dok.numel()))), 1) + normfac_max = dok.topk(k=_maxk, dim=-1).values[-1] + normfac_min = dok.topk(k=_mink, dim=-1, largest=False).values[-1] + normfacs.append(torch.stack([normfac_min, normfac_max])) + normfacs = torch.stack(normfacs) + _min, _max = (normfacs[:, 0].view(-1, 1, 1, 1), normfacs[:, 1].view(-1, 1, 1, 1)) + depths = (depths - _min) / (_max - _min).clamp(1e-4) + depths = ( + (depths * (max_out_depth - min_out_depth) + min_out_depth) * masks.float() + ).clamp(0.0, 1.0) + return depths diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d1591a5ccde4065d5c1a2d1e2826c6a91abaae7 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/__init__.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/mtl_io.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/mtl_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a68d2275dad3d47493882296ad4257161c99c7eb Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/mtl_io.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/obj_io.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/obj_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53dc3097ef3288ccd2655d6c43c70a2bd5fa9ab6 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/obj_io.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/off_io.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/off_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e395be755f6ef7bccc59d94772c109516df34900 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/off_io.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/pluggable.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/pluggable.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bc7caac10915e81e668bd9fad220ea9874b33573 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/pluggable.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/pluggable_formats.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/pluggable_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..469c52e5e84abad5c52b7f25e283ace77f450e32 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/pluggable_formats.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/ply_io.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/ply_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a54eb9ddc1b0e647006068565be19e670259496 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/ply_io.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/utils.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3d4e1de0b8b5b87c0a655c3a97ec7fdb97f11ee Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/__pycache__/utils.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/experimental_gltf_io.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/experimental_gltf_io.py new file mode 100644 index 0000000000000000000000000000000000000000..937147cebfebdd7ec6df570bfa58ecef216d451c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/io/experimental_gltf_io.py @@ -0,0 +1,864 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +""" +This module implements loading meshes from glTF 2 assets stored in a +GLB container file or a glTF JSON file with embedded binary data. +It is experimental. + +The module provides a MeshFormatInterpreter called +MeshGlbFormat which must be used explicitly. +e.g. + +.. code-block:: python + + from pytorch3d.io import IO + from pytorch3d.io.experimental_gltf_io import MeshGlbFormat + + io = IO() + io.register_meshes_format(MeshGlbFormat()) + io.load_mesh(...) + +This implementation is quite restricted in what it supports. + + - It does not try to validate the input against the standard. + - It loads the default scene only. + - Only triangulated geometry is supported. + - The geometry of all meshes of the entire scene is aggregated into a single mesh. + Use `load_meshes()` instead to get un-aggregated (but transformed) ones. + - All material properties are ignored except for either vertex color, baseColorTexture + or baseColorFactor. If available, one of these (in this order) is exclusively + used which does not match the semantics of the standard. +""" + +import json +import struct +import warnings +from base64 import b64decode +from collections import defaultdict, deque +from enum import IntEnum +from io import BytesIO +from typing import Any, BinaryIO, cast, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch +from iopath.common.file_io import PathManager +from PIL import Image +from pytorch3d.io.utils import _open_file, PathOrStr +from pytorch3d.renderer.mesh import TexturesBase, TexturesUV, TexturesVertex +from pytorch3d.structures import join_meshes_as_scene, Meshes +from pytorch3d.transforms import quaternion_to_matrix, Transform3d + +from .pluggable_formats import endswith, MeshFormatInterpreter + + +_GLTF_MAGIC = 0x46546C67 +_JSON_CHUNK_TYPE = 0x4E4F534A +_BINARY_CHUNK_TYPE = 0x004E4942 +_DATA_URI_PREFIX = "data:application/octet-stream;base64," + + +class _PrimitiveMode(IntEnum): + POINTS = 0 + LINES = 1 + LINE_LOOP = 2 + LINE_STRIP = 3 + TRIANGLES = 4 + TRIANGLE_STRIP = 5 + TRIANGLE_FAN = 6 + + +class _ComponentType(IntEnum): + BYTE = 5120 + UNSIGNED_BYTE = 5121 + SHORT = 5122 + UNSIGNED_SHORT = 5123 + UNSIGNED_INT = 5125 + FLOAT = 5126 + + +_ITEM_TYPES: Dict[int, Any] = { + 5120: np.int8, + 5121: np.uint8, + 5122: np.int16, + 5123: np.uint16, + 5125: np.uint32, + 5126: np.float32, +} + + +_ElementShape = Union[Tuple[int], Tuple[int, int]] +_ELEMENT_SHAPES: Dict[str, _ElementShape] = { + "SCALAR": (1,), + "VEC2": (2,), + "VEC3": (3,), + "VEC4": (4,), + "MAT2": (2, 2), + "MAT3": (3, 3), + "MAT4": (4, 4), +} + +_DTYPE_BYTES: Dict[Any, int] = { + np.int8: 1, + np.uint8: 1, + np.int16: 2, + np.uint16: 2, + np.uint32: 4, + np.float32: 4, +} + + +class _TargetType(IntEnum): + ARRAY_BUFFER = 34962 + ELEMENT_ARRAY_BUFFER = 34963 + + +class OurEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, np.int64): + return str(obj) + return super(OurEncoder, self).default(obj) + + +def _read_header(stream: BinaryIO) -> Optional[Tuple[int, int]]: + header = stream.read(12) + magic, version, length = struct.unpack(" Optional[Tuple[Dict[str, Any], np.ndarray]]: + """ + Get the json header and the binary data from a + GLB file. + """ + json_data = None + binary_data = None + + while stream.tell() < length: + chunk_header = stream.read(8) + chunk_length, chunk_type = struct.unpack(" Transform3d: + """ + Convert a transform from the json data in to a PyTorch3D + Transform3d format. + """ + array = node.get("matrix") + if array is not None: # Stored in column-major order + M = np.array(array, dtype=np.float32).reshape(4, 4, order="F") + return Transform3d(matrix=torch.from_numpy(M)) + + out = Transform3d() + + # Given some of (scale/rotation/translation), we do them in that order to + # get points in to the world space. + # See https://github.com/KhronosGroup/glTF/issues/743 . + + array = node.get("scale", None) + if array is not None: + scale_vector = torch.FloatTensor(array) + out = out.scale(scale_vector[None]) + + # Rotation quaternion (x, y, z, w) where w is the scalar + array = node.get("rotation", None) + if array is not None: + x, y, z, w = array + # We negate w. This is equivalent to inverting the rotation. + # This is needed as quaternion_to_matrix makes a matrix which + # operates on column vectors, whereas Transform3d wants a + # matrix which operates on row vectors. + rotation_quaternion = torch.FloatTensor([-w, x, y, z]) + rotation_matrix = quaternion_to_matrix(rotation_quaternion) + out = out.rotate(R=rotation_matrix) + + array = node.get("translation", None) + if array is not None: + translation_vector = torch.FloatTensor(array) + out = out.translate(x=translation_vector[None]) + + return out + + +class _GLTFLoader: + def __init__(self, stream: BinaryIO) -> None: + self._json_data = None + # Map from buffer index to (decoded) binary data + self._binary_data = {} + + version_and_length = _read_header(stream) + if version_and_length is None: # GLTF + stream.seek(0) + json_data = json.load(stream) + else: # GLB + version, length = version_and_length + if version != 2: + warnings.warn("Unsupported version") + return + json_and_binary_data = _read_chunks(stream, length) + if json_and_binary_data is None: + raise ValueError("Data not found") + json_data, binary_data = json_and_binary_data + self._binary_data[0] = binary_data + + self._json_data = json_data + self._accessors = json_data.get("accessors", []) + self._buffer_views = json_data.get("bufferViews", []) + self._buffers = json_data.get("buffers", []) + self._texture_map_images = {} + + def _access_image(self, image_index: int) -> np.ndarray: + """ + Get the data for an image from the file. This is only called + by _get_texture_map_image which caches it. + """ + + image_json = self._json_data["images"][image_index] + buffer_view = self._buffer_views[image_json["bufferView"]] + if "byteStride" in buffer_view: + raise NotImplementedError("strided buffer views") + + length = buffer_view["byteLength"] + offset = buffer_view.get("byteOffset", 0) + + binary_data = self.get_binary_data(buffer_view["buffer"]) + bytesio = BytesIO(binary_data[offset : offset + length].tobytes()) + with Image.open(bytesio) as f: + array = np.array(f) + if array.dtype == np.uint8: + return array.astype(np.float32) / 255.0 + else: + return array + + def _get_texture_map_image(self, image_index: int) -> torch.Tensor: + """ + Return a texture map image as a torch tensor. + Calling this function repeatedly with the same arguments returns + the very same tensor, this allows a memory optimization to happen + later in TexturesUV.join_scene. + Any alpha channel is ignored. + """ + im = self._texture_map_images.get(image_index) + if im is not None: + return im + + im = torch.from_numpy(self._access_image(image_index))[:, :, :3] + self._texture_map_images[image_index] = im + return im + + def _access_data(self, accessor_index: int) -> np.ndarray: + """ + Get the raw data from an accessor as a numpy array. + """ + accessor = self._accessors[accessor_index] + + buffer_view_index = accessor.get("bufferView") + # Undefined buffer view (all zeros) are not (yet) supported + if buffer_view_index is None: + raise NotImplementedError("Undefined buffer view") + + accessor_byte_offset = accessor.get("byteOffset", 0) + component_type = accessor["componentType"] + element_count = accessor["count"] + element_type = accessor["type"] + + # Sparse accessors are not (yet) supported + if accessor.get("sparse") is not None: + raise NotImplementedError("Sparse Accessors") + + buffer_view = self._buffer_views[buffer_view_index] + buffer_index = buffer_view["buffer"] + buffer_byte_length = buffer_view["byteLength"] + element_byte_offset = buffer_view.get("byteOffset", 0) + element_byte_stride = buffer_view.get("byteStride", 0) + if element_byte_stride != 0 and element_byte_stride < 4: + raise ValueError("Stride is too small.") + if element_byte_stride > 252: + raise ValueError("Stride is too big.") + + element_shape = _ELEMENT_SHAPES[element_type] + item_type = _ITEM_TYPES[component_type] + item_dtype = np.dtype(item_type) + item_count = np.prod(element_shape) + item_size = item_dtype.itemsize + size = element_count * item_count * item_size + if size > buffer_byte_length: + raise ValueError("Buffer did not have enough data for the accessor") + + buffer_ = self._buffers[buffer_index] + binary_data = self.get_binary_data(buffer_index) + if len(binary_data) < buffer_["byteLength"]: + raise ValueError("Not enough binary data for the buffer") + + if element_byte_stride == 0: + element_byte_stride = item_size * item_count + # The same buffer can store interleaved elements + if element_byte_stride < item_size * item_count: + raise ValueError("Items should not overlap") + + dtype = np.dtype( + { + "names": ["element"], + "formats": [str(element_shape) + item_dtype.str], + "offsets": [0], + "itemsize": element_byte_stride, + } + ) + + byte_offset = accessor_byte_offset + element_byte_offset + if byte_offset % item_size != 0: + raise ValueError("Misaligned data") + byte_length = element_count * element_byte_stride + buffer_view = binary_data[byte_offset : byte_offset + byte_length].view(dtype)[ + "element" + ] + + # Convert matrix data from column-major (OpenGL) to row-major order + if element_type in ("MAT2", "MAT3", "MAT4"): + buffer_view = np.transpose(buffer_view, (0, 2, 1)) + + return buffer_view + + def _get_primitive_attribute( + self, primitive_attributes: Dict[str, Any], key: str, dtype + ) -> Optional[np.ndarray]: + accessor_index = primitive_attributes.get(key) + if accessor_index is None: + return None + primitive_attribute = self._access_data(accessor_index) + if key == "JOINTS_0": + pass + elif dtype == np.uint8: + primitive_attribute /= 255.0 + elif dtype == np.uint16: + primitive_attribute /= 65535.0 + else: + if dtype != np.float32: + raise ValueError("Unexpected data type") + primitive_attribute = primitive_attribute.astype(dtype) + return primitive_attribute + + def get_binary_data(self, buffer_index: int): + """ + Get the binary data from a buffer as a 1D numpy array of bytes. + This is implemented for explicit uri data buffers or the main GLB data + segment. + """ + buffer_ = self._buffers[buffer_index] + binary_data = self._binary_data.get(buffer_index) + if binary_data is None: # Lazily decode binary data + uri = buffer_.get("uri") + if not uri.startswith(_DATA_URI_PREFIX): + raise NotImplementedError("Unexpected URI type") + binary_data = b64decode(uri[len(_DATA_URI_PREFIX) :]) + binary_data = np.frombuffer(binary_data, dtype=np.uint8) + self._binary_data[buffer_index] = binary_data + return binary_data + + def get_texture_for_mesh( + self, primitive: Dict[str, Any], indices: torch.Tensor + ) -> Optional[TexturesBase]: + """ + Get the texture object representing the given mesh primitive. + + Args: + primitive: the mesh primitive being loaded. + indices: the face indices of the mesh + """ + attributes = primitive["attributes"] + vertex_colors = self._get_primitive_attribute(attributes, "COLOR_0", np.float32) + if vertex_colors is not None: + return TexturesVertex([torch.from_numpy(vertex_colors)]) + + vertex_texcoords_0 = self._get_primitive_attribute( + attributes, "TEXCOORD_0", np.float32 + ) + if vertex_texcoords_0 is not None: + verts_uvs = torch.from_numpy(vertex_texcoords_0) + verts_uvs[:, 1] = 1 - verts_uvs[:, -1] + faces_uvs = indices + material_index = primitive.get("material", 0) + material = self._json_data["materials"][material_index] + material_roughness = material["pbrMetallicRoughness"] + if "baseColorTexture" in material_roughness: + texture_index = material_roughness["baseColorTexture"]["index"] + texture_json = self._json_data["textures"][texture_index] + # Todo - include baseColorFactor when also given + # Todo - look at the sampler + image_index = texture_json["source"] + map = self._get_texture_map_image(image_index) + elif "baseColorFactor" in material_roughness: + # Constant color? + map = torch.FloatTensor(material_roughness["baseColorFactor"])[ + None, None, :3 + ] + texture = TexturesUV( + # pyre-fixme[61]: `map` may not be initialized here. + maps=[map], # alpha channel ignored + faces_uvs=[faces_uvs], + verts_uvs=[verts_uvs], + ) + return texture + + return None + + def load(self, include_textures: bool) -> List[Tuple[Optional[str], Meshes]]: + """ + Attempt to load all the meshes making up the default scene from + the file as a list of possibly-named Meshes objects. + + Args: + include_textures: Whether to try loading textures. + + Returns: + Meshes object containing one mesh. + """ + if self._json_data is None: + raise ValueError("Initialization problem") + + # This loads the default scene from the file. + # This is usually the only one. + # It is possible to have multiple scenes, in which case + # you could choose another here instead of taking the default. + scene_index = self._json_data.get("scene") + + if scene_index is None: + raise ValueError("Default scene is not specified.") + + scene = self._json_data["scenes"][scene_index] + nodes = self._json_data.get("nodes", []) + meshes = self._json_data.get("meshes", []) + root_node_indices = scene["nodes"] + + mesh_transform = Transform3d() + names_meshes_list: List[Tuple[Optional[str], Meshes]] = [] + + # Keep track and apply the transform of the scene node to mesh vertices + Q = deque([(Transform3d(), node_index) for node_index in root_node_indices]) + + while Q: + parent_transform, current_node_index = Q.popleft() + + current_node = nodes[current_node_index] + + transform = _make_node_transform(current_node) + current_transform = transform.compose(parent_transform) + + if "mesh" in current_node: + mesh_index = current_node["mesh"] + mesh = meshes[mesh_index] + mesh_name = mesh.get("name", None) + mesh_transform = current_transform + + for primitive in mesh["primitives"]: + attributes = primitive["attributes"] + accessor_index = attributes["POSITION"] + positions = torch.from_numpy( + self._access_data(accessor_index).copy() + ) + positions = mesh_transform.transform_points(positions) + + mode = primitive.get("mode", _PrimitiveMode.TRIANGLES) + if mode != _PrimitiveMode.TRIANGLES: + raise NotImplementedError("Non triangular meshes") + + if "indices" in primitive: + accessor_index = primitive["indices"] + indices = self._access_data(accessor_index).astype(np.int64) + else: + indices = np.arange(0, len(positions), dtype=np.int64) + indices = torch.from_numpy(indices.reshape(-1, 3)) + + texture = None + if include_textures: + texture = self.get_texture_for_mesh(primitive, indices) + + mesh_obj = Meshes( + verts=[positions], faces=[indices], textures=texture + ) + names_meshes_list.append((mesh_name, mesh_obj)) + + if "children" in current_node: + children_node_indices = current_node["children"] + Q.extend( + [ + (current_transform, node_index) + for node_index in children_node_indices + ] + ) + + return names_meshes_list + + +def load_meshes( + path: PathOrStr, + path_manager: PathManager, + include_textures: bool = True, +) -> List[Tuple[Optional[str], Meshes]]: + """ + Loads all the meshes from the default scene in the given GLB file. + and returns them separately. + + Args: + path: path to read from + path_manager: PathManager object for interpreting the path + include_textures: whether to load textures + + Returns: + List of (name, mesh) pairs, where the name is the optional name property + from the GLB file, or None if it is absent, and the mesh is a Meshes + object containing one mesh. + """ + with _open_file(path, path_manager, "rb") as f: + loader = _GLTFLoader(cast(BinaryIO, f)) + names_meshes_list = loader.load(include_textures=include_textures) + return names_meshes_list + + +class _GLTFWriter: + def __init__(self, data: Meshes, buffer_stream: BinaryIO) -> None: + self._json_data = defaultdict(list) + self.mesh = data + self.buffer_stream = buffer_stream + + # initialize json with one scene and one node + scene_index = 0 + # pyre-fixme[6]: Incompatible parameter type + self._json_data["scene"] = scene_index + self._json_data["scenes"].append({"nodes": [scene_index]}) + self._json_data["asset"] = {"version": "2.0"} + node = {"name": "Node", "mesh": 0} + self._json_data["nodes"].append(node) + + # mesh primitives + meshes = defaultdict(list) + # pyre-fixme[6]: Incompatible parameter type + meshes["name"] = "Node-Mesh" + if isinstance(self.mesh.textures, TexturesVertex): + primitives = { + "attributes": {"POSITION": 0, "COLOR_0": 2}, + "indices": 1, + "mode": _PrimitiveMode.TRIANGLES, + } + elif isinstance(self.mesh.textures, TexturesUV): + primitives = { + "attributes": {"POSITION": 0, "TEXCOORD_0": 2}, + "indices": 1, + "mode": _PrimitiveMode.TRIANGLES, + "material": 0, + } + else: + primitives = { + "attributes": {"POSITION": 0}, + "indices": 1, + "mode": _PrimitiveMode.TRIANGLES, + } + + meshes["primitives"].append(primitives) + self._json_data["meshes"].append(meshes) + + # default material + material = { + "name": "material_1", + "pbrMetallicRoughness": { + "baseColorTexture": {"index": 0}, + "baseColorFactor": [1, 1, 1, 1], + "metallicFactor": 0, + "roughnessFactor": 0.99, + }, + "emissiveFactor": [0, 0, 0], + "alphaMode": "OPAQUE", + } + self._json_data["materials"].append(material) + + # default sampler + sampler = {"magFilter": 9729, "minFilter": 9986, "wrapS": 10497, "wrapT": 10497} + self._json_data["samplers"].append(sampler) + + # default textures + texture = {"sampler": 0, "source": 0} + self._json_data["textures"].append(texture) + + def _write_accessor_json(self, key: str) -> Tuple[int, np.ndarray]: + name = "Node-Mesh_%s" % key + byte_offset = 0 + if key == "positions": + data = self.mesh.verts_packed().cpu().numpy() + component_type = _ComponentType.FLOAT + element_type = "VEC3" + buffer_view = 0 + element_min = list(map(float, np.min(data, axis=0))) + element_max = list(map(float, np.max(data, axis=0))) + byte_per_element = 3 * _DTYPE_BYTES[_ITEM_TYPES[_ComponentType.FLOAT]] + elif key == "texcoords": + component_type = _ComponentType.FLOAT + data = self.mesh.textures.verts_uvs_list()[0].cpu().numpy() + data[:, 1] = 1 - data[:, -1] # flip y tex-coordinate + element_type = "VEC2" + buffer_view = 2 + element_min = list(map(float, np.min(data, axis=0))) + element_max = list(map(float, np.max(data, axis=0))) + byte_per_element = 2 * _DTYPE_BYTES[_ITEM_TYPES[_ComponentType.FLOAT]] + elif key == "texvertices": + component_type = _ComponentType.FLOAT + data = self.mesh.textures.verts_features_list()[0].cpu().numpy() + element_type = "VEC3" + buffer_view = 2 + element_min = list(map(float, np.min(data, axis=0))) + element_max = list(map(float, np.max(data, axis=0))) + byte_per_element = 3 * _DTYPE_BYTES[_ITEM_TYPES[_ComponentType.FLOAT]] + elif key == "indices": + component_type = _ComponentType.UNSIGNED_SHORT + data = ( + self.mesh.faces_packed() + .cpu() + .numpy() + .astype(_ITEM_TYPES[component_type]) + ) + element_type = "SCALAR" + buffer_view = 1 + element_min = list(map(int, np.min(data, keepdims=True))) + element_max = list(map(int, np.max(data, keepdims=True))) + byte_per_element = ( + 3 * _DTYPE_BYTES[_ITEM_TYPES[_ComponentType.UNSIGNED_SHORT]] + ) + else: + raise NotImplementedError( + "invalid key accessor, should be one of positions, indices or texcoords" + ) + + count = int(data.shape[0]) + byte_length = count * byte_per_element + accessor_json = { + "name": name, + "componentType": component_type, + "type": element_type, + "bufferView": buffer_view, + "byteOffset": byte_offset, + "min": element_min, + "max": element_max, + "count": count * 3 if key == "indices" else count, + } + self._json_data["accessors"].append(accessor_json) + return (byte_length, data) + + def _write_bufferview(self, key: str, **kwargs): + if key not in ["positions", "texcoords", "texvertices", "indices"]: + raise ValueError( + "key must be one of positions, texcoords, texvertices or indices" + ) + + bufferview = { + "name": "bufferView_%s" % key, + "buffer": 0, + } + target = _TargetType.ARRAY_BUFFER + if key == "positions": + byte_per_element = 3 * _DTYPE_BYTES[_ITEM_TYPES[_ComponentType.FLOAT]] + bufferview["byteStride"] = int(byte_per_element) + elif key == "texcoords": + byte_per_element = 2 * _DTYPE_BYTES[_ITEM_TYPES[_ComponentType.FLOAT]] + target = _TargetType.ARRAY_BUFFER + bufferview["byteStride"] = int(byte_per_element) + elif key == "texvertices": + byte_per_element = 3 * _DTYPE_BYTES[_ITEM_TYPES[_ComponentType.FLOAT]] + target = _TargetType.ELEMENT_ARRAY_BUFFER + bufferview["byteStride"] = int(byte_per_element) + elif key == "indices": + byte_per_element = ( + 3 * _DTYPE_BYTES[_ITEM_TYPES[_ComponentType.UNSIGNED_SHORT]] + ) + target = _TargetType.ELEMENT_ARRAY_BUFFER + + bufferview["target"] = target + bufferview["byteOffset"] = kwargs.get("offset") + bufferview["byteLength"] = kwargs.get("byte_length") + self._json_data["bufferViews"].append(bufferview) + + def _write_image_buffer(self, **kwargs) -> Tuple[int, bytes]: + image_np = self.mesh.textures.maps_list()[0].cpu().numpy() + image_array = (image_np * 255.0).astype(np.uint8) + im = Image.fromarray(image_array) + with BytesIO() as f: + im.save(f, format="PNG") + image_data = f.getvalue() + + image_data_byte_length = len(image_data) + bufferview_image = { + "buffer": 0, + } + bufferview_image["byteOffset"] = kwargs.get("offset") + bufferview_image["byteLength"] = image_data_byte_length + self._json_data["bufferViews"].append(bufferview_image) + + image = {"name": "texture", "mimeType": "image/png", "bufferView": 3} + self._json_data["images"].append(image) + return (image_data_byte_length, image_data) + + def save(self): + # check validity of mesh + if self.mesh.verts_packed() is None or self.mesh.faces_packed() is None: + raise ValueError("invalid mesh to save, verts or face indices are empty") + + # accessors for positions, texture uvs and face indices + pos_byte, pos_data = self._write_accessor_json("positions") + idx_byte, idx_data = self._write_accessor_json("indices") + include_textures = False + if self.mesh.textures is not None: + if hasattr(self.mesh.textures, "verts_features_list"): + tex_byte, tex_data = self._write_accessor_json("texvertices") + include_textures = True + texcoords = False + elif self.mesh.textures.verts_uvs_list()[0] is not None: + tex_byte, tex_data = self._write_accessor_json("texcoords") + include_textures = True + texcoords = True + + # bufferViews for positions, texture coords and indices + byte_offset = 0 + self._write_bufferview("positions", byte_length=pos_byte, offset=byte_offset) + byte_offset += pos_byte + + self._write_bufferview("indices", byte_length=idx_byte, offset=byte_offset) + byte_offset += idx_byte + + if include_textures: + if texcoords: + self._write_bufferview( + "texcoords", byte_length=tex_byte, offset=byte_offset + ) + else: + self._write_bufferview( + "texvertices", byte_length=tex_byte, offset=byte_offset + ) + byte_offset += tex_byte + + # image bufferView + include_image = False + if self.mesh.textures is not None and hasattr(self.mesh.textures, "maps_list"): + include_image = True + image_byte, image_data = self._write_image_buffer(offset=byte_offset) + byte_offset += image_byte + + # buffers + self._json_data["buffers"].append({"byteLength": int(byte_offset)}) + + # organize into a glb + json_bytes = bytes(json.dumps(self._json_data, cls=OurEncoder), "utf-8") + json_length = len(json_bytes) + + # write header + version = 2 + total_header_length = 28 # (file header = 12) + 2 * (chunk header = 8) + file_length = json_length + byte_offset + total_header_length + header = struct.pack(" None: + self.known_suffixes = (".glb",) + + def read( + self, + path: PathOrStr, + include_textures: bool, + device, + path_manager: PathManager, + **kwargs, + ) -> Optional[Meshes]: + if not endswith(path, self.known_suffixes): + return None + + names_meshes_list = load_meshes( + path=path, + path_manager=path_manager, + include_textures=include_textures, + ) + + meshes_list = [mesh for name, mesh in names_meshes_list] + mesh = join_meshes_as_scene(meshes_list) + return mesh.to(device) + + def save( + self, + data: Meshes, + path: PathOrStr, + path_manager: PathManager, + binary: Optional[bool], + **kwargs, + ) -> bool: + """ + Writes all the meshes from the default scene to GLB file. + + Args: + data: meshes to save + path: path of the GLB file to write into + path_manager: PathManager object for interpreting the path + + Return True if saving succeeds and False otherwise + """ + + if not endswith(path, self.known_suffixes): + return False + + with _open_file(path, path_manager, "wb") as f: + writer = _GLTFWriter(data, cast(BinaryIO, f)) + writer.save() + return True diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..95f5c65aeba630bfa51ffb4575cf32003c8fb7f2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__init__.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .ball_query import ball_query +from .cameras_alignment import corresponding_cameras_alignment + +from .cubify import cubify +from .graph_conv import GraphConv +from .interp_face_attrs import interpolate_face_attributes +from .iou_box3d import box3d_overlap +from .knn import knn_gather, knn_points +from .laplacian_matrices import cot_laplacian, laplacian, norm_laplacian + +from .mesh_face_areas_normals import mesh_face_areas_normals +from .mesh_filtering import taubin_smoothing + +from .packed_to_padded import packed_to_padded, padded_to_packed +from .perspective_n_points import efficient_pnp +from .points_alignment import corresponding_points_alignment, iterative_closest_point +from .points_normals import ( + estimate_pointcloud_local_coord_frames, + estimate_pointcloud_normals, +) +from .points_to_volumes import ( + add_pointclouds_to_volumes, + add_points_features_to_volume_densities_features, +) + +from .sample_farthest_points import sample_farthest_points + +from .sample_points_from_meshes import sample_points_from_meshes +from .subdivide_meshes import SubdivideMeshes +from .utils import ( + convert_pointclouds_to_tensor, + eyes, + get_point_covariances, + is_pointclouds, + wmean, +) + +from .vert_align import vert_align + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72c11ca40b059422fa6153615fc01a906e4e2556 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/ball_query.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/ball_query.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0aa917c4227c5c56da0666fe7ee09de945b65e62 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/ball_query.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/cameras_alignment.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/cameras_alignment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..410c6a1e4bac02e8f32613c9090e9cf139f05a3c Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/cameras_alignment.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/cubify.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/cubify.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b5973db918d70f98e04284ef2b927449fb071c1 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/cubify.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/graph_conv.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/graph_conv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..197fcecc2df449cc323d64e2a13d762ca1f92b97 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/graph_conv.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/interp_face_attrs.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/interp_face_attrs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6c6de4d9c65c1e388e9c2a0275c0da1ab6f6ae9 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/interp_face_attrs.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/iou_box3d.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/iou_box3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25d738d7d0ab49c676f2bf4b2d672d145cfe6795 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/iou_box3d.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/knn.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/knn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c58345a11c6961da37efc7a0d1f30a713557aa9 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/knn.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/laplacian_matrices.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/laplacian_matrices.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d69b2e1c9e24b8e6738312b226da16be60beca5 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/laplacian_matrices.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/mesh_face_areas_normals.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/mesh_face_areas_normals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81d506356bdfcfc626fe751ee42629c6e35bf966 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/mesh_face_areas_normals.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/mesh_filtering.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/mesh_filtering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf9e11095d40c616a4b4e96be9ab531aba7749de Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/mesh_filtering.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/packed_to_padded.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/packed_to_padded.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72ef07ed5865587809b83f5a570d626127a0b3e1 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/packed_to_padded.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/perspective_n_points.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/perspective_n_points.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f80c18182b4afe15a63e0166039ba0a082e6782b Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/perspective_n_points.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/points_alignment.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/points_alignment.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d72998b9dea3bd0a845bf05e0452370039c0fdd Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/points_alignment.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/points_normals.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/points_normals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdd23997bcf2525275e371b8741895bf7e6682f8 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/points_normals.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/points_to_volumes.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/points_to_volumes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..913f82aa842cc19d92706a1fb6e6a849e4574530 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/points_to_volumes.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/sample_farthest_points.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/sample_farthest_points.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..334d97d78df3da8cacbdbc783fd592441f69e49c Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/sample_farthest_points.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/sample_points_from_meshes.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/sample_points_from_meshes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed5c56999645860505e490d05aa19eaed02bebe1 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/sample_points_from_meshes.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/subdivide_meshes.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/subdivide_meshes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0758fcdb1e4a04193d62ea119336a6422d432957 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/subdivide_meshes.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/utils.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b5320775177bf4f160030a39c4075c416b09dcd Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/utils.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/vert_align.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/vert_align.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..44db773790e3d53de504803cc81d91b6cc054031 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/__pycache__/vert_align.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/ball_query.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/ball_query.py new file mode 100644 index 0000000000000000000000000000000000000000..31266c4d2b6ad36f27086ff5f108ba6b464fe698 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/ball_query.py @@ -0,0 +1,142 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Union + +import torch +from pytorch3d import _C +from torch.autograd import Function +from torch.autograd.function import once_differentiable + +from .knn import _KNN +from .utils import masked_gather + + +class _ball_query(Function): + """ + Torch autograd Function wrapper for Ball Query C++/CUDA implementations. + """ + + @staticmethod + def forward(ctx, p1, p2, lengths1, lengths2, K, radius): + """ + Arguments defintions the same as in the ball_query function + """ + idx, dists = _C.ball_query(p1, p2, lengths1, lengths2, K, radius) + ctx.save_for_backward(p1, p2, lengths1, lengths2, idx) + ctx.mark_non_differentiable(idx) + return dists, idx + + @staticmethod + @once_differentiable + def backward(ctx, grad_dists, grad_idx): + p1, p2, lengths1, lengths2, idx = ctx.saved_tensors + # TODO(gkioxari) Change cast to floats once we add support for doubles. + if not (grad_dists.dtype == torch.float32): + grad_dists = grad_dists.float() + if not (p1.dtype == torch.float32): + p1 = p1.float() + if not (p2.dtype == torch.float32): + p2 = p2.float() + + # Reuse the KNN backward function + # by default, norm is 2 + grad_p1, grad_p2 = _C.knn_points_backward( + p1, p2, lengths1, lengths2, idx, 2, grad_dists + ) + return grad_p1, grad_p2, None, None, None, None + + +def ball_query( + p1: torch.Tensor, + p2: torch.Tensor, + lengths1: Union[torch.Tensor, None] = None, + lengths2: Union[torch.Tensor, None] = None, + K: int = 500, + radius: float = 0.2, + return_nn: bool = True, +): + """ + Ball Query is an alternative to KNN. It can be + used to find all points in p2 that are within a specified radius + to the query point in p1 (with an upper limit of K neighbors). + + The neighbors returned are not necssarily the *nearest* to the + point in p1, just the first K values in p2 which are within the + specified radius. + + This method is faster than kNN when there are large numbers of points + in p2 and the ordering of neighbors is not important compared to the + distance being within the radius threshold. + + "Ball query’s local neighborhood guarantees a fixed region scale thus + making local region features more generalizable across space, which is + preferred for tasks requiring local pattern recognition + (e.g. semantic point labeling)" [1]. + + [1] Charles R. Qi et al, "PointNet++: Deep Hierarchical Feature Learning + on Point Sets in a Metric Space", NeurIPS 2017. + + Args: + p1: Tensor of shape (N, P1, D) giving a batch of N point clouds, each + containing up to P1 points of dimension D. These represent the centers of + the ball queries. + p2: Tensor of shape (N, P2, D) giving a batch of N point clouds, each + containing up to P2 points of dimension D. + lengths1: LongTensor of shape (N,) of values in the range [0, P1], giving the + length of each pointcloud in p1. Or None to indicate that every cloud has + length P1. + lengths2: LongTensor of shape (N,) of values in the range [0, P2], giving the + length of each pointcloud in p2. Or None to indicate that every cloud has + length P2. + K: Integer giving the upper bound on the number of samples to take + within the radius + radius: the radius around each point within which the neighbors need to be located + return_nn: If set to True returns the K neighbor points in p2 for each point in p1. + + Returns: + dists: Tensor of shape (N, P1, K) giving the squared distances to + the neighbors. This is padded with zeros both where a cloud in p2 + has fewer than S points and where a cloud in p1 has fewer than P1 points + and also if there are fewer than K points which satisfy the radius threshold. + + idx: LongTensor of shape (N, P1, K) giving the indices of the + S neighbors in p2 for points in p1. + Concretely, if `p1_idx[n, i, k] = j` then `p2[n, j]` is the k-th + neighbor to `p1[n, i]` in `p2[n]`. This is padded with -1 both where a cloud + in p2 has fewer than S points and where a cloud in p1 has fewer than P1 + points and also if there are fewer than K points which satisfy the radius threshold. + + nn: Tensor of shape (N, P1, K, D) giving the K neighbors in p2 for + each point in p1. Concretely, `p2_nn[n, i, k]` gives the k-th neighbor + for `p1[n, i]`. Returned if `return_nn` is True. The output is a tensor + of shape (N, P1, K, U). + + """ + if p1.shape[0] != p2.shape[0]: + raise ValueError("pts1 and pts2 must have the same batch dimension.") + if p1.shape[2] != p2.shape[2]: + raise ValueError("pts1 and pts2 must have the same point dimension.") + + p1 = p1.contiguous() + p2 = p2.contiguous() + P1 = p1.shape[1] + P2 = p2.shape[1] + N = p1.shape[0] + + if lengths1 is None: + lengths1 = torch.full((N,), P1, dtype=torch.int64, device=p1.device) + if lengths2 is None: + lengths2 = torch.full((N,), P2, dtype=torch.int64, device=p1.device) + + dists, idx = _ball_query.apply(p1, p2, lengths1, lengths2, K, radius) + + # Gather the neighbors if needed + points_nn = masked_gather(p2, idx) if return_nn else None + + return _KNN(dists=dists, idx=idx, knn=points_nn) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/cameras_alignment.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/cameras_alignment.py new file mode 100644 index 0000000000000000000000000000000000000000..40e00aba10d2281df3d59d9f1185b12142f03d3e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/cameras_alignment.py @@ -0,0 +1,224 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import TYPE_CHECKING + +import torch + +from .. import ops + + +if TYPE_CHECKING: + from pytorch3d.renderer.cameras import CamerasBase + + +def corresponding_cameras_alignment( + cameras_src: "CamerasBase", + cameras_tgt: "CamerasBase", + estimate_scale: bool = True, + mode: str = "extrinsics", + eps: float = 1e-9, +) -> "CamerasBase": # pragma: no cover + """ + .. warning:: + The `corresponding_cameras_alignment` API is experimental + and subject to change! + + Estimates a single similarity transformation between two sets of cameras + `cameras_src` and `cameras_tgt` and returns an aligned version of + `cameras_src`. + + Given source cameras [(R_1, T_1), (R_2, T_2), ..., (R_N, T_N)] and + target cameras [(R_1', T_1'), (R_2', T_2'), ..., (R_N', T_N')], + where (R_i, T_i) is a 2-tuple of the camera rotation and translation matrix + respectively, the algorithm finds a global rotation, translation and scale + (R_A, T_A, s_A) which aligns all source cameras with the target cameras + such that the following holds: + + Under the change of coordinates using a similarity transform + (R_A, T_A, s_A) a 3D point X' is mapped to X with: :: + + X = (X' R_A + T_A) / s_A + + Then, for all cameras `i`, we assume that the following holds: :: + + X R_i + T_i = s' (X' R_i' + T_i'), + + i.e. an adjusted point X' is mapped by a camera (R_i', T_i') + to the same point as imaged from camera (R_i, T_i) after resolving + the scale ambiguity with a global scalar factor s'. + + Substituting for X above gives rise to the following: :: + + (X' R_A + T_A) / s_A R_i + T_i = s' (X' R_i' + T_i') // · s_A + (X' R_A + T_A) R_i + T_i s_A = (s' s_A) (X' R_i' + T_i') + s' := 1 / s_A # without loss of generality + (X' R_A + T_A) R_i + T_i s_A = X' R_i' + T_i' + X' R_A R_i + T_A R_i + T_i s_A = X' R_i' + T_i' + ^^^^^^^ ^^^^^^^^^^^^^^^^^ + ~= R_i' ~= T_i' + + i.e. after estimating R_A, T_A, s_A, the aligned source cameras have + extrinsics: :: + + cameras_src_align = (R_A R_i, T_A R_i + T_i s_A) ~= (R_i', T_i') + + We support two ways `R_A, T_A, s_A` can be estimated: + 1) `mode=='centers'` + Estimates the similarity alignment between camera centers using + Umeyama's algorithm (see `pytorch3d.ops.corresponding_points_alignment` + for details) and transforms camera extrinsics accordingly. + + 2) `mode=='extrinsics'` + Defines the alignment problem as a system + of the following equations: :: + + for all i: + [ R_A 0 ] x [ R_i 0 ] = [ R_i' 0 ] + [ T_A^T 1 ] [ (s_A T_i^T) 1 ] [ T_i' 1 ] + + `R_A, T_A` and `s_A` are then obtained by solving the + system in the least squares sense. + + The estimated camera transformation is a true similarity transform, i.e. + it cannot be a reflection. + + Args: + cameras_src: `N` cameras to be aligned. + cameras_tgt: `N` target cameras. + estimate_scale: Controls whether the alignment transform is rigid + (`estimate_scale=False`), or a similarity (`estimate_scale=True`). + `s_A` is set to `1` if `estimate_scale==False`. + mode: Controls the alignment algorithm. + Can be one either `'centers'` or `'extrinsics'`. Please refer to the + description above for details. + eps: A scalar for clamping to avoid dividing by zero. + Active when `estimate_scale==True`. + + Returns: + cameras_src_aligned: `cameras_src` after applying the alignment transform. + """ + + if cameras_src.R.shape[0] != cameras_tgt.R.shape[0]: + raise ValueError( + "cameras_src and cameras_tgt have to contain the same number of cameras!" + ) + + if mode == "centers": + align_fun = _align_camera_centers + elif mode == "extrinsics": + align_fun = _align_camera_extrinsics + else: + raise ValueError("mode has to be one of (centers, extrinsics)") + + align_t_R, align_t_T, align_t_s = align_fun( + cameras_src, cameras_tgt, estimate_scale=estimate_scale, eps=eps + ) + + # create a new cameras object and set the R and T accordingly + cameras_src_aligned = cameras_src.clone() + cameras_src_aligned.R = torch.bmm(align_t_R.expand_as(cameras_src.R), cameras_src.R) + cameras_src_aligned.T = ( + torch.bmm( + align_t_T[:, None].repeat(cameras_src.R.shape[0], 1, 1), + cameras_src.R, + )[:, 0] + + cameras_src.T * align_t_s + ) + + return cameras_src_aligned + + +def _align_camera_centers( + cameras_src: "CamerasBase", + cameras_tgt: "CamerasBase", + estimate_scale: bool = True, + eps: float = 1e-9, +): # pragma: no cover + """ + Use Umeyama's algorithm to align the camera centers. + """ + centers_src = cameras_src.get_camera_center() + centers_tgt = cameras_tgt.get_camera_center() + align_t = ops.corresponding_points_alignment( + centers_src[None], + centers_tgt[None], + estimate_scale=estimate_scale, + allow_reflection=False, + eps=eps, + ) + # the camera transform is the inverse of the estimated transform between centers + align_t_R = align_t.R.permute(0, 2, 1) + align_t_T = -(torch.bmm(align_t.T[:, None], align_t_R))[:, 0] + align_t_s = align_t.s[0] + + return align_t_R, align_t_T, align_t_s + + +def _align_camera_extrinsics( + cameras_src: "CamerasBase", + cameras_tgt: "CamerasBase", + estimate_scale: bool = True, + eps: float = 1e-9, +): # pragma: no cover + """ + Get the global rotation R_A with svd of cov(RR^T): + ``` + R_A R_i = R_i' for all i + R_A [R_1 R_2 ... R_N] = [R_1' R_2' ... R_N'] + U, _, V = svd([R_1 R_2 ... R_N]^T [R_1' R_2' ... R_N']) + R_A = (U V^T)^T + ``` + """ + RRcov = torch.bmm(cameras_src.R, cameras_tgt.R.transpose(2, 1)).mean(0) + U, _, V = torch.svd(RRcov) + align_t_R = V @ U.t() + + """ + The translation + scale `T_A` and `s_A` is computed by finding + a translation and scaling that aligns two tensors `A, B` + defined as follows: + ``` + T_A R_i + s_A T_i = T_i' ; for all i // · R_i^T + s_A T_i R_i^T + T_A = T_i' R_i^T ; for all i + ^^^^^^^^^ ^^^^^^^^^^ + A_i B_i + + A_i := T_i R_i^T + A = [A_1 A_2 ... A_N] + B_i := T_i' R_i^T + B = [B_1 B_2 ... B_N] + ``` + The scale s_A can be retrieved by matching the correlations of + the points sets A and B: + ``` + s_A = (A-mean(A))*(B-mean(B)).sum() / ((A-mean(A))**2).sum() + ``` + The translation `T_A` is then defined as: + ``` + T_A = mean(B) - mean(A) * s_A + ``` + """ + A = torch.bmm(cameras_src.R, cameras_src.T[:, :, None])[:, :, 0] + B = torch.bmm(cameras_src.R, cameras_tgt.T[:, :, None])[:, :, 0] + Amu = A.mean(0, keepdim=True) + Bmu = B.mean(0, keepdim=True) + if estimate_scale and A.shape[0] > 1: + # get the scaling component by matching covariances + # of centered A and centered B + Ac = A - Amu + Bc = B - Bmu + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + align_t_s = (Ac * Bc).mean() / (Ac**2).mean().clamp(eps) + else: + # set the scale to identity + align_t_s = 1.0 + # get the translation as the difference between the means of A and B + align_t_T = Bmu - align_t_s * Amu + + return align_t_R, align_t_T, align_t_s diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/cubify.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/cubify.py new file mode 100644 index 0000000000000000000000000000000000000000..ceab4bc70de23fc42364d5a670be8b92e6ea5dfc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/cubify.py @@ -0,0 +1,275 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import Optional + +import torch +import torch.nn.functional as F + +from pytorch3d.common.compat import meshgrid_ij + +from pytorch3d.structures import Meshes + + +def unravel_index(idx, dims) -> torch.Tensor: + r""" + Equivalent to np.unravel_index + Args: + idx: A LongTensor whose elements are indices into the + flattened version of an array of dimensions dims. + dims: The shape of the array to be indexed. + Implemented only for dims=(N, H, W, D) + """ + if len(dims) != 4: + raise ValueError("Expects a 4-element list.") + N, H, W, D = dims + n = idx // (H * W * D) + h = (idx - n * H * W * D) // (W * D) + w = (idx - n * H * W * D - h * W * D) // D + d = idx - n * H * W * D - h * W * D - w * D + return torch.stack((n, h, w, d), dim=1) + + +def ravel_index(idx, dims) -> torch.Tensor: + """ + Computes the linear index in an array of shape dims. + It performs the reverse functionality of unravel_index + Args: + idx: A LongTensor of shape (N, 3). Each row corresponds to indices into an + array of dimensions dims. + dims: The shape of the array to be indexed. + Implemented only for dims=(H, W, D) + """ + if len(dims) != 3: + raise ValueError("Expects a 3-element list") + if idx.shape[1] != 3: + raise ValueError("Expects an index tensor of shape Nx3") + H, W, D = dims + linind = idx[:, 0] * W * D + idx[:, 1] * D + idx[:, 2] + return linind + + +@torch.no_grad() +def cubify( + voxels: torch.Tensor, + thresh: float, + *, + feats: Optional[torch.Tensor] = None, + device=None, + align: str = "topleft" +) -> Meshes: + r""" + Converts a voxel to a mesh by replacing each occupied voxel with a cube + consisting of 12 faces and 8 vertices. Shared vertices are merged, and + internal faces are removed. + Args: + voxels: A FloatTensor of shape (N, D, H, W) containing occupancy probabilities. + thresh: A scalar threshold. If a voxel occupancy is larger than + thresh, the voxel is considered occupied. + feats: A FloatTensor of shape (N, K, D, H, W) containing the color information + of each voxel. K is the number of channels. This is supported only when + align == "center" + device: The device of the output meshes + align: Defines the alignment of the mesh vertices and the grid locations. + Has to be one of {"topleft", "corner", "center"}. See below for explanation. + Default is "topleft". + Returns: + meshes: A Meshes object of the corresponding meshes. + + + The alignment between the vertices of the cubified mesh and the voxel locations (or pixels) + is defined by the choice of `align`. We support three modes, as shown below for a 2x2 grid: + + X---X---- X-------X --------- + | | | | | | | X | X | + X---X---- --------- --------- + | | | | | | | X | X | + --------- X-------X --------- + + topleft corner center + + In the figure, X denote the grid locations and the squares represent the added cuboids. + When `align="topleft"`, then the top left corner of each cuboid corresponds to the + pixel coordinate of the input grid. + When `align="corner"`, then the corners of the output mesh span the whole grid. + When `align="center"`, then the grid locations form the center of the cuboids. + """ + + if device is None: + device = voxels.device + + if align not in ["topleft", "corner", "center"]: + raise ValueError("Align mode must be one of (topleft, corner, center).") + + if len(voxels) == 0: + return Meshes(verts=[], faces=[]) + + N, D, H, W = voxels.size() + # vertices corresponding to a unit cube: 8x3 + cube_verts = torch.tensor( + [ + [0, 0, 0], + [0, 0, 1], + [0, 1, 0], + [0, 1, 1], + [1, 0, 0], + [1, 0, 1], + [1, 1, 0], + [1, 1, 1], + ], + dtype=torch.int64, + device=device, + ) + + # faces corresponding to a unit cube: 12x3 + cube_faces = torch.tensor( + [ + [0, 1, 2], + [1, 3, 2], # left face: 0, 1 + [2, 3, 6], + [3, 7, 6], # bottom face: 2, 3 + [0, 2, 6], + [0, 6, 4], # front face: 4, 5 + [0, 5, 1], + [0, 4, 5], # up face: 6, 7 + [6, 7, 5], + [6, 5, 4], # right face: 8, 9 + [1, 7, 3], + [1, 5, 7], # back face: 10, 11 + ], + dtype=torch.int64, + device=device, + ) + + wx = torch.tensor([0.5, 0.5], device=device).view(1, 1, 1, 1, 2) + wy = torch.tensor([0.5, 0.5], device=device).view(1, 1, 1, 2, 1) + wz = torch.tensor([0.5, 0.5], device=device).view(1, 1, 2, 1, 1) + + voxelt = voxels.ge(thresh).float() + # N x 1 x D x H x W + voxelt = voxelt.view(N, 1, D, H, W) + + # N x 1 x (D-1) x (H-1) x (W-1) + voxelt_x = F.conv3d(voxelt, wx).gt(0.5).float() + voxelt_y = F.conv3d(voxelt, wy).gt(0.5).float() + voxelt_z = F.conv3d(voxelt, wz).gt(0.5).float() + + # 12 x N x 1 x D x H x W + faces_idx = torch.ones((cube_faces.size(0), N, 1, D, H, W), device=device) + + # add left face + faces_idx[0, :, :, :, :, 1:] = 1 - voxelt_x + faces_idx[1, :, :, :, :, 1:] = 1 - voxelt_x + # add bottom face + faces_idx[2, :, :, :, :-1, :] = 1 - voxelt_y + faces_idx[3, :, :, :, :-1, :] = 1 - voxelt_y + # add front face + faces_idx[4, :, :, 1:, :, :] = 1 - voxelt_z + faces_idx[5, :, :, 1:, :, :] = 1 - voxelt_z + # add up face + faces_idx[6, :, :, :, 1:, :] = 1 - voxelt_y + faces_idx[7, :, :, :, 1:, :] = 1 - voxelt_y + # add right face + faces_idx[8, :, :, :, :, :-1] = 1 - voxelt_x + faces_idx[9, :, :, :, :, :-1] = 1 - voxelt_x + # add back face + faces_idx[10, :, :, :-1, :, :] = 1 - voxelt_z + faces_idx[11, :, :, :-1, :, :] = 1 - voxelt_z + + faces_idx *= voxelt + + # N x H x W x D x 12 + faces_idx = faces_idx.permute(1, 2, 4, 5, 3, 0).squeeze(1) + # (NHWD) x 12 + faces_idx = faces_idx.contiguous() + faces_idx = faces_idx.view(-1, cube_faces.size(0)) + + # boolean to linear index + # NF x 2 + linind = torch.nonzero(faces_idx, as_tuple=False) + + # NF x 4 + nyxz = unravel_index(linind[:, 0], (N, H, W, D)) + + # NF x 3: faces + faces = torch.index_select(cube_faces, 0, linind[:, 1]) + + grid_faces = [] + for d in range(cube_faces.size(1)): + # NF x 3 + xyz = torch.index_select(cube_verts, 0, faces[:, d]) + permute_idx = torch.tensor([1, 0, 2], device=device) + yxz = torch.index_select(xyz, 1, permute_idx) + yxz += nyxz[:, 1:] + # NF x 1 + temp = ravel_index(yxz, (H + 1, W + 1, D + 1)) + grid_faces.append(temp) + # NF x 3 + grid_faces = torch.stack(grid_faces, dim=1) + + y, x, z = meshgrid_ij(torch.arange(H + 1), torch.arange(W + 1), torch.arange(D + 1)) + y = y.to(device=device, dtype=torch.float32) + x = x.to(device=device, dtype=torch.float32) + z = z.to(device=device, dtype=torch.float32) + + if align == "center": + x = x - 0.5 + y = y - 0.5 + z = z - 0.5 + + margin = 0.0 if align == "corner" else 1.0 + y = y * 2.0 / (H - margin) - 1.0 + x = x * 2.0 / (W - margin) - 1.0 + z = z * 2.0 / (D - margin) - 1.0 + + # ((H+1)(W+1)(D+1)) x 3 + grid_verts = torch.stack((x, y, z), dim=3).view(-1, 3) + + if len(nyxz) == 0: + verts_list = [torch.tensor([], dtype=torch.float32, device=device)] * N + faces_list = [torch.tensor([], dtype=torch.int64, device=device)] * N + return Meshes(verts=verts_list, faces=faces_list) + + num_verts = grid_verts.size(0) + grid_faces += nyxz[:, 0].view(-1, 1) * num_verts + idleverts = torch.ones(num_verts * N, dtype=torch.uint8, device=device) + + indices = grid_faces.flatten() + if device.type == "cpu": + indices = torch.unique(indices) + idleverts.scatter_(0, indices, 0) + grid_faces -= nyxz[:, 0].view(-1, 1) * num_verts + split_size = torch.bincount(nyxz[:, 0], minlength=N) + faces_list = list(torch.split(grid_faces, split_size.tolist(), 0)) + + idleverts = idleverts.view(N, num_verts) + idlenum = idleverts.cumsum(1) + + verts_list = [ + grid_verts.index_select(0, (idleverts[n] == 0).nonzero(as_tuple=False)[:, 0]) + for n in range(N) + ] + + textures_list = None + if feats is not None and align == "center": + # We return a TexturesAtlas containing one color for each face + # N x K x D x H x W -> N x H x W x D x K + feats = feats.permute(0, 3, 4, 2, 1) + + # (NHWD) x K + feats = feats.reshape(-1, feats.size(4)) + feats = torch.index_select(feats, 0, linind[:, 0]) + feats = feats.reshape(-1, 1, 1, feats.size(1)) + feats_list = list(torch.split(feats, split_size.tolist(), 0)) + from pytorch3d.renderer.mesh.textures import TexturesAtlas + + textures_list = TexturesAtlas(feats_list) + + faces_list = [nface - idlenum[n][nface] for n, nface in enumerate(faces_list)] + return Meshes(verts=verts_list, faces=faces_list, textures=textures_list) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/interp_face_attrs.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/interp_face_attrs.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f04891233e2566e93066f0057f2da4afdbb7cf --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/interp_face_attrs.py @@ -0,0 +1,101 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch +from pytorch3d import _C +from torch.autograd import Function +from torch.autograd.function import once_differentiable + + +def interpolate_face_attributes( + pix_to_face: torch.Tensor, + barycentric_coords: torch.Tensor, + face_attributes: torch.Tensor, +) -> torch.Tensor: + """ + Interpolate arbitrary face attributes using the barycentric coordinates + for each pixel in the rasterized output. + + Args: + pix_to_face: LongTensor of shape (...) specifying the indices + of the faces (in the packed representation) which overlap each + pixel in the image. A value < 0 indicates that the pixel does not + overlap any face and should be skipped. + barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying + the barycentric coordinates of each pixel + relative to the faces (in the packed + representation) which overlap the pixel. + face_attributes: packed attributes of shape (total_faces, 3, D), + specifying the value of the attribute for each + vertex in the face. + + Returns: + pixel_vals: tensor of shape (N, H, W, K, D) giving the interpolated + value of the face attribute for each pixel. + """ + # Check shapes + F, FV, D = face_attributes.shape + if FV != 3: + raise ValueError("Faces can only have three vertices; got %r" % FV) + N, H, W, K, _ = barycentric_coords.shape + if pix_to_face.shape != (N, H, W, K): + msg = "pix_to_face must have shape (batch_size, H, W, K); got %r" + raise ValueError(msg % (pix_to_face.shape,)) + + # On CPU use the python version + # TODO: Implement a C++ version of this function + if not pix_to_face.is_cuda: + args = (pix_to_face, barycentric_coords, face_attributes) + return interpolate_face_attributes_python(*args) + + # Otherwise flatten and call the custom autograd function + N, H, W, K = pix_to_face.shape + pix_to_face = pix_to_face.view(-1) + barycentric_coords = barycentric_coords.view(N * H * W * K, 3) + args = (pix_to_face, barycentric_coords, face_attributes) + out = _InterpFaceAttrs.apply(*args) + out = out.view(N, H, W, K, -1) + return out + + +class _InterpFaceAttrs(Function): + @staticmethod + def forward(ctx, pix_to_face, barycentric_coords, face_attrs): + args = (pix_to_face, barycentric_coords, face_attrs) + ctx.save_for_backward(*args) + return _C.interp_face_attrs_forward(*args) + + @staticmethod + @once_differentiable + def backward(ctx, grad_pix_attrs): + args = ctx.saved_tensors + args = args + (grad_pix_attrs,) + grads = _C.interp_face_attrs_backward(*args) + grad_pix_to_face = None + grad_barycentric_coords = grads[0] + grad_face_attrs = grads[1] + return grad_pix_to_face, grad_barycentric_coords, grad_face_attrs + + +def interpolate_face_attributes_python( + pix_to_face: torch.Tensor, + barycentric_coords: torch.Tensor, + face_attributes: torch.Tensor, +) -> torch.Tensor: + F, FV, D = face_attributes.shape + N, H, W, K, _ = barycentric_coords.shape + + # Replace empty pixels in pix_to_face with 0 in order to interpolate. + mask = pix_to_face < 0 + pix_to_face = pix_to_face.clone() + pix_to_face[mask] = 0 + idx = pix_to_face.view(N * H * W * K, 1, 1).expand(N * H * W * K, 3, D) + pixel_face_vals = face_attributes.gather(0, idx).view(N, H, W, K, 3, D) + pixel_vals = (barycentric_coords[..., None] * pixel_face_vals).sum(dim=-2) + pixel_vals[mask] = 0 # Replace masked values in output. + return pixel_vals diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/iou_box3d.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/iou_box3d.py new file mode 100644 index 0000000000000000000000000000000000000000..3c01bd65f8fc4f28d3b55bb5ba993c10a6c5cd7c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/iou_box3d.py @@ -0,0 +1,168 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Tuple + +import torch +import torch.nn.functional as F +from pytorch3d import _C +from torch.autograd import Function + + +# -------------------------------------------------- # +# CONSTANTS # +# -------------------------------------------------- # +""" +_box_planes and _box_triangles define the 4- and 3-connectivity +of the 8 box corners. +_box_planes gives the quad faces of the 3D box +_box_triangles gives the triangle faces of the 3D box +""" +_box_planes = [ + [0, 1, 2, 3], + [3, 2, 6, 7], + [0, 1, 5, 4], + [0, 3, 7, 4], + [1, 2, 6, 5], + [4, 5, 6, 7], +] +_box_triangles = [ + [0, 1, 2], + [0, 3, 2], + [4, 5, 6], + [4, 6, 7], + [1, 5, 6], + [1, 6, 2], + [0, 4, 7], + [0, 7, 3], + [3, 2, 6], + [3, 6, 7], + [0, 1, 5], + [0, 4, 5], +] + + +def _check_coplanar(boxes: torch.Tensor, eps: float = 1e-4) -> None: + faces = torch.tensor(_box_planes, dtype=torch.int64, device=boxes.device) + verts = boxes.index_select(index=faces.view(-1), dim=1) + B = boxes.shape[0] + P, V = faces.shape + # (B, P, 4, 3) -> (B, P, 3) + v0, v1, v2, v3 = verts.reshape(B, P, V, 3).unbind(2) + + # Compute the normal + e0 = F.normalize(v1 - v0, dim=-1) + e1 = F.normalize(v2 - v0, dim=-1) + normal = F.normalize(torch.cross(e0, e1, dim=-1), dim=-1) + + # Check the fourth vertex is also on the same plane + mat1 = (v3 - v0).view(B, 1, -1) # (B, 1, P*3) + mat2 = normal.view(B, -1, 1) # (B, P*3, 1) + if not (mat1.bmm(mat2).abs() < eps).all().item(): + msg = "Plane vertices are not coplanar" + raise ValueError(msg) + + return + + +def _check_nonzero(boxes: torch.Tensor, eps: float = 1e-4) -> None: + """ + Checks that the sides of the box have a non zero area + """ + faces = torch.tensor(_box_triangles, dtype=torch.int64, device=boxes.device) + verts = boxes.index_select(index=faces.view(-1), dim=1) + B = boxes.shape[0] + T, V = faces.shape + # (B, T, 3, 3) -> (B, T, 3) + v0, v1, v2 = verts.reshape(B, T, V, 3).unbind(2) + + normals = torch.cross(v1 - v0, v2 - v0, dim=-1) # (B, T, 3) + face_areas = normals.norm(dim=-1) / 2 + + if (face_areas < eps).any().item(): + msg = "Planes have zero areas" + raise ValueError(msg) + + return + + +class _box3d_overlap(Function): + """ + Torch autograd Function wrapper for box3d_overlap C++/CUDA implementations. + Backward is not supported. + """ + + @staticmethod + def forward(ctx, boxes1, boxes2): + """ + Arguments defintions the same as in the box3d_overlap function + """ + vol, iou = _C.iou_box3d(boxes1, boxes2) + return vol, iou + + @staticmethod + def backward(ctx, grad_vol, grad_iou): + raise ValueError("box3d_overlap backward is not supported") + + +def box3d_overlap( + boxes1: torch.Tensor, boxes2: torch.Tensor, eps: float = 1e-4 +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes the intersection of 3D boxes1 and boxes2. + + Inputs boxes1, boxes2 are tensors of shape (B, 8, 3) + (where B doesn't have to be the same for boxes1 and boxes2), + containing the 8 corners of the boxes, as follows: + + (4) +---------+. (5) + | ` . | ` . + | (0) +---+-----+ (1) + | | | | + (7) +-----+---+. (6)| + ` . | ` . | + (3) ` +---------+ (2) + + + NOTE: Throughout this implementation, we assume that boxes + are defined by their 8 corners exactly in the order specified in the + diagram above for the function to give correct results. In addition + the vertices on each plane must be coplanar. + As an alternative to the diagram, this is a unit bounding + box which has the correct vertex ordering: + + box_corner_vertices = [ + [0, 0, 0], + [1, 0, 0], + [1, 1, 0], + [0, 1, 0], + [0, 0, 1], + [1, 0, 1], + [1, 1, 1], + [0, 1, 1], + ] + + Args: + boxes1: tensor of shape (N, 8, 3) of the coordinates of the 1st boxes + boxes2: tensor of shape (M, 8, 3) of the coordinates of the 2nd boxes + Returns: + vol: (N, M) tensor of the volume of the intersecting convex shapes + iou: (N, M) tensor of the intersection over union which is + defined as: `iou = vol / (vol1 + vol2 - vol)` + """ + if not all((8, 3) == box.shape[1:] for box in [boxes1, boxes2]): + raise ValueError("Each box in the batch must be of shape (8, 3)") + + _check_coplanar(boxes1, eps) + _check_coplanar(boxes2, eps) + _check_nonzero(boxes1, eps) + _check_nonzero(boxes2, eps) + + vol, iou = _box3d_overlap.apply(boxes1, boxes2) + + return vol, iou diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/knn.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/knn.py new file mode 100644 index 0000000000000000000000000000000000000000..114334fdab5832562327b47ac8d1c34271961ff4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/knn.py @@ -0,0 +1,250 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from collections import namedtuple +from typing import Union + +import torch +from pytorch3d import _C +from torch.autograd import Function +from torch.autograd.function import once_differentiable + + +_KNN = namedtuple("KNN", "dists idx knn") + + +class _knn_points(Function): + """ + Torch autograd Function wrapper for KNN C++/CUDA implementations. + """ + + @staticmethod + # pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently. + def forward( + ctx, + p1, + p2, + lengths1, + lengths2, + K, + version, + norm: int = 2, + return_sorted: bool = True, + ): + """ + K-Nearest neighbors on point clouds. + + Args: + p1: Tensor of shape (N, P1, D) giving a batch of N point clouds, each + containing up to P1 points of dimension D. + p2: Tensor of shape (N, P2, D) giving a batch of N point clouds, each + containing up to P2 points of dimension D. + lengths1: LongTensor of shape (N,) of values in the range [0, P1], giving the + length of each pointcloud in p1. Or None to indicate that every cloud has + length P1. + lengths2: LongTensor of shape (N,) of values in the range [0, P2], giving the + length of each pointcloud in p2. Or None to indicate that every cloud has + length P2. + K: Integer giving the number of nearest neighbors to return. + version: Which KNN implementation to use in the backend. If version=-1, + the correct implementation is selected based on the shapes of the inputs. + norm: (int) indicating the norm. Only supports 1 (for L1) and 2 (for L2). + return_sorted: (bool) whether to return the nearest neighbors sorted in + ascending order of distance. + + Returns: + p1_dists: Tensor of shape (N, P1, K) giving the squared distances to + the nearest neighbors. This is padded with zeros both where a cloud in p2 + has fewer than K points and where a cloud in p1 has fewer than P1 points. + + p1_idx: LongTensor of shape (N, P1, K) giving the indices of the + K nearest neighbors from points in p1 to points in p2. + Concretely, if `p1_idx[n, i, k] = j` then `p2[n, j]` is the k-th nearest + neighbors to `p1[n, i]` in `p2[n]`. This is padded with zeros both where a cloud + in p2 has fewer than K points and where a cloud in p1 has fewer than P1 points. + """ + if not ((norm == 1) or (norm == 2)): + raise ValueError("Support for 1 or 2 norm.") + + idx, dists = _C.knn_points_idx(p1, p2, lengths1, lengths2, norm, K, version) + + # sort KNN in ascending order if K > 1 + if K > 1 and return_sorted: + if lengths2.min() < K: + P1 = p1.shape[1] + mask = lengths2[:, None] <= torch.arange(K, device=dists.device)[None] + # mask has shape [N, K], true where dists irrelevant + mask = mask[:, None].expand(-1, P1, -1) + # mask has shape [N, P1, K], true where dists irrelevant + dists[mask] = float("inf") + dists, sort_idx = dists.sort(dim=2) + dists[mask] = 0 + else: + dists, sort_idx = dists.sort(dim=2) + idx = idx.gather(2, sort_idx) + + ctx.save_for_backward(p1, p2, lengths1, lengths2, idx) + ctx.mark_non_differentiable(idx) + ctx.norm = norm + return dists, idx + + @staticmethod + @once_differentiable + def backward(ctx, grad_dists, grad_idx): + p1, p2, lengths1, lengths2, idx = ctx.saved_tensors + norm = ctx.norm + # TODO(gkioxari) Change cast to floats once we add support for doubles. + if not (grad_dists.dtype == torch.float32): + grad_dists = grad_dists.float() + if not (p1.dtype == torch.float32): + p1 = p1.float() + if not (p2.dtype == torch.float32): + p2 = p2.float() + grad_p1, grad_p2 = _C.knn_points_backward( + p1, p2, lengths1, lengths2, idx, norm, grad_dists + ) + return grad_p1, grad_p2, None, None, None, None, None, None + + +def knn_points( + p1: torch.Tensor, + p2: torch.Tensor, + lengths1: Union[torch.Tensor, None] = None, + lengths2: Union[torch.Tensor, None] = None, + norm: int = 2, + K: int = 1, + version: int = -1, + return_nn: bool = False, + return_sorted: bool = True, +) -> _KNN: + """ + K-Nearest neighbors on point clouds. + + Args: + p1: Tensor of shape (N, P1, D) giving a batch of N point clouds, each + containing up to P1 points of dimension D. + p2: Tensor of shape (N, P2, D) giving a batch of N point clouds, each + containing up to P2 points of dimension D. + lengths1: LongTensor of shape (N,) of values in the range [0, P1], giving the + length of each pointcloud in p1. Or None to indicate that every cloud has + length P1. + lengths2: LongTensor of shape (N,) of values in the range [0, P2], giving the + length of each pointcloud in p2. Or None to indicate that every cloud has + length P2. + norm: Integer indicating the norm of the distance. Supports only 1 for L1, 2 for L2. + K: Integer giving the number of nearest neighbors to return. + version: Which KNN implementation to use in the backend. If version=-1, + the correct implementation is selected based on the shapes of the inputs. + return_nn: If set to True returns the K nearest neighbors in p2 for each point in p1. + return_sorted: (bool) whether to return the nearest neighbors sorted in + ascending order of distance. + + Returns: + dists: Tensor of shape (N, P1, K) giving the squared distances to + the nearest neighbors. This is padded with zeros both where a cloud in p2 + has fewer than K points and where a cloud in p1 has fewer than P1 points. + + idx: LongTensor of shape (N, P1, K) giving the indices of the + K nearest neighbors from points in p1 to points in p2. + Concretely, if `p1_idx[n, i, k] = j` then `p2[n, j]` is the k-th nearest + neighbors to `p1[n, i]` in `p2[n]`. This is padded with zeros both where a cloud + in p2 has fewer than K points and where a cloud in p1 has fewer than P1 + points. + + nn: Tensor of shape (N, P1, K, D) giving the K nearest neighbors in p2 for + each point in p1. Concretely, `p2_nn[n, i, k]` gives the k-th nearest neighbor + for `p1[n, i]`. Returned if `return_nn` is True. + The nearest neighbors are collected using `knn_gather` + + .. code-block:: + + p2_nn = knn_gather(p2, p1_idx, lengths2) + + which is a helper function that allows indexing any tensor of shape (N, P2, U) with + the indices `p1_idx` returned by `knn_points`. The output is a tensor + of shape (N, P1, K, U). + + """ + if p1.shape[0] != p2.shape[0]: + raise ValueError("pts1 and pts2 must have the same batch dimension.") + if p1.shape[2] != p2.shape[2]: + raise ValueError("pts1 and pts2 must have the same point dimension.") + + p1 = p1.contiguous() + p2 = p2.contiguous() + + P1 = p1.shape[1] + P2 = p2.shape[1] + + if lengths1 is None: + lengths1 = torch.full((p1.shape[0],), P1, dtype=torch.int64, device=p1.device) + if lengths2 is None: + lengths2 = torch.full((p1.shape[0],), P2, dtype=torch.int64, device=p1.device) + + p1_dists, p1_idx = _knn_points.apply( + p1, p2, lengths1, lengths2, K, version, norm, return_sorted + ) + + p2_nn = None + if return_nn: + p2_nn = knn_gather(p2, p1_idx, lengths2) + + return _KNN(dists=p1_dists, idx=p1_idx, knn=p2_nn if return_nn else None) + + +def knn_gather( + x: torch.Tensor, idx: torch.Tensor, lengths: Union[torch.Tensor, None] = None +): + """ + A helper function for knn that allows indexing a tensor x with the indices `idx` + returned by `knn_points`. + + For example, if `dists, idx = knn_points(p, x, lengths_p, lengths, K)` + where p is a tensor of shape (N, L, D) and x a tensor of shape (N, M, D), + then one can compute the K nearest neighbors of p with `p_nn = knn_gather(x, idx, lengths)`. + It can also be applied for any tensor x of shape (N, M, U) where U != D. + + Args: + x: Tensor of shape (N, M, U) containing U-dimensional features to + be gathered. + idx: LongTensor of shape (N, L, K) giving the indices returned by `knn_points`. + lengths: LongTensor of shape (N,) of values in the range [0, M], giving the + length of each example in the batch in x. Or None to indicate that every + example has length M. + Returns: + x_out: Tensor of shape (N, L, K, U) resulting from gathering the elements of x + with idx, s.t. `x_out[n, l, k] = x[n, idx[n, l, k]]`. + If `k > lengths[n]` then `x_out[n, l, k]` is filled with 0.0. + """ + N, M, U = x.shape + _N, L, K = idx.shape + + if N != _N: + raise ValueError("x and idx must have same batch dimension.") + + if lengths is None: + lengths = torch.full((x.shape[0],), M, dtype=torch.int64, device=x.device) + + idx_expanded = idx[:, :, :, None].expand(-1, -1, -1, U) + # idx_expanded has shape [N, L, K, U] + + x_out = x[:, :, None].expand(-1, -1, K, -1).gather(1, idx_expanded) + # p2_nn has shape [N, L, K, U] + + needs_mask = lengths.min() < K + if needs_mask: + # mask has shape [N, K], true where idx is irrelevant because + # there is less number of points in p2 than K + mask = lengths[:, None] <= torch.arange(K, device=x.device)[None] + + # expand mask to shape [N, L, K, U] + mask = mask[:, None].expand(-1, L, -1) + mask = mask[:, :, :, None].expand(-1, -1, -1, U) + x_out[mask] = 0.0 + + return x_out diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/laplacian_matrices.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/laplacian_matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..6400923f385446767728a8d1b733b7955ee1391a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/laplacian_matrices.py @@ -0,0 +1,182 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Tuple + +import torch + + +# ------------------------ Laplacian Matrices ------------------------ # +# This file contains implementations of differentiable laplacian matrices. +# These include +# 1) Standard Laplacian matrix +# 2) Cotangent Laplacian matrix +# 3) Norm Laplacian matrix +# -------------------------------------------------------------------- # + + +def laplacian(verts: torch.Tensor, edges: torch.Tensor) -> torch.Tensor: + """ + Computes the laplacian matrix. + The definition of the laplacian is + L[i, j] = -1 , if i == j + L[i, j] = 1 / deg(i) , if (i, j) is an edge + L[i, j] = 0 , otherwise + where deg(i) is the degree of the i-th vertex in the graph. + + Args: + verts: tensor of shape (V, 3) containing the vertices of the graph + edges: tensor of shape (E, 2) containing the vertex indices of each edge + Returns: + L: Sparse FloatTensor of shape (V, V) + """ + V = verts.shape[0] + + e0, e1 = edges.unbind(1) + + idx01 = torch.stack([e0, e1], dim=1) # (E, 2) + idx10 = torch.stack([e1, e0], dim=1) # (E, 2) + idx = torch.cat([idx01, idx10], dim=0).t() # (2, 2*E) + + # First, we construct the adjacency matrix, + # i.e. A[i, j] = 1 if (i,j) is an edge, or + # A[e0, e1] = 1 & A[e1, e0] = 1 + ones = torch.ones(idx.shape[1], dtype=torch.float32, device=verts.device) + # pyre-fixme[16]: Module `sparse` has no attribute `FloatTensor`. + A = torch.sparse.FloatTensor(idx, ones, (V, V)) + + # the sum of i-th row of A gives the degree of the i-th vertex + deg = torch.sparse.sum(A, dim=1).to_dense() + + # We construct the Laplacian matrix by adding the non diagonal values + # i.e. L[i, j] = 1 ./ deg(i) if (i, j) is an edge + deg0 = deg[e0] + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + deg0 = torch.where(deg0 > 0.0, 1.0 / deg0, deg0) + deg1 = deg[e1] + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + deg1 = torch.where(deg1 > 0.0, 1.0 / deg1, deg1) + val = torch.cat([deg0, deg1]) + # pyre-fixme[16]: Module `sparse` has no attribute `FloatTensor`. + L = torch.sparse.FloatTensor(idx, val, (V, V)) + + # Then we add the diagonal values L[i, i] = -1. + idx = torch.arange(V, device=verts.device) + idx = torch.stack([idx, idx], dim=0) + ones = torch.ones(idx.shape[1], dtype=torch.float32, device=verts.device) + # pyre-fixme[16]: Module `sparse` has no attribute `FloatTensor`. + L -= torch.sparse.FloatTensor(idx, ones, (V, V)) + + return L + + +def cot_laplacian( + verts: torch.Tensor, faces: torch.Tensor, eps: float = 1e-12 +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Returns the Laplacian matrix with cotangent weights and the inverse of the + face areas. + + Args: + verts: tensor of shape (V, 3) containing the vertices of the graph + faces: tensor of shape (F, 3) containing the vertex indices of each face + Returns: + 2-element tuple containing + - **L**: Sparse FloatTensor of shape (V,V) for the Laplacian matrix. + Here, L[i, j] = cot a_ij + cot b_ij iff (i, j) is an edge in meshes. + See the description above for more clarity. + - **inv_areas**: FloatTensor of shape (V,) containing the inverse of sum of + face areas containing each vertex + """ + V, F = verts.shape[0], faces.shape[0] + + face_verts = verts[faces] + v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2] + + # Side lengths of each triangle, of shape (sum(F_n),) + # A is the side opposite v1, B is opposite v2, and C is opposite v3 + A = (v1 - v2).norm(dim=1) + B = (v0 - v2).norm(dim=1) + C = (v0 - v1).norm(dim=1) + + # Area of each triangle (with Heron's formula); shape is (sum(F_n),) + s = 0.5 * (A + B + C) + # note that the area can be negative (close to 0) causing nans after sqrt() + # we clip it to a small positive value + # pyre-fixme[16]: `float` has no attribute `clamp_`. + area = (s * (s - A) * (s - B) * (s - C)).clamp_(min=eps).sqrt() + + # Compute cotangents of angles, of shape (sum(F_n), 3) + A2, B2, C2 = A * A, B * B, C * C + cota = (B2 + C2 - A2) / area + cotb = (A2 + C2 - B2) / area + cotc = (A2 + B2 - C2) / area + cot = torch.stack([cota, cotb, cotc], dim=1) + cot /= 4.0 + + # Construct a sparse matrix by basically doing: + # L[v1, v2] = cota + # L[v2, v0] = cotb + # L[v0, v1] = cotc + ii = faces[:, [1, 2, 0]] + jj = faces[:, [2, 0, 1]] + idx = torch.stack([ii, jj], dim=0).view(2, F * 3) + # pyre-fixme[16]: Module `sparse` has no attribute `FloatTensor`. + L = torch.sparse.FloatTensor(idx, cot.view(-1), (V, V)) + + # Make it symmetric; this means we are also setting + # L[v2, v1] = cota + # L[v0, v2] = cotb + # L[v1, v0] = cotc + L += L.t() + + # For each vertex, compute the sum of areas for triangles containing it. + idx = faces.view(-1) + inv_areas = torch.zeros(V, dtype=torch.float32, device=verts.device) + val = torch.stack([area] * 3, dim=1).view(-1) + inv_areas.scatter_add_(0, idx, val) + idx = inv_areas > 0 + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + inv_areas[idx] = 1.0 / inv_areas[idx] + inv_areas = inv_areas.view(-1, 1) + + return L, inv_areas + + +def norm_laplacian( + verts: torch.Tensor, edges: torch.Tensor, eps: float = 1e-12 +) -> torch.Tensor: + """ + Norm laplacian computes a variant of the laplacian matrix which weights each + affinity with the normalized distance of the neighboring nodes. + More concretely, + L[i, j] = 1. / wij where wij = ||vi - vj|| if (vi, vj) are neighboring nodes + + Args: + verts: tensor of shape (V, 3) containing the vertices of the graph + edges: tensor of shape (E, 2) containing the vertex indices of each edge + Returns: + L: Sparse FloatTensor of shape (V, V) + """ + edge_verts = verts[edges] # (E, 2, 3) + v0, v1 = edge_verts[:, 0], edge_verts[:, 1] + + # Side lengths of each edge, of shape (E,) + w01 = 1.0 / ((v0 - v1).norm(dim=1) + eps) + + # Construct a sparse matrix by basically doing: + # L[v0, v1] = w01 + # L[v1, v0] = w01 + e01 = edges.t() # (2, E) + + V = verts.shape[0] + # pyre-fixme[16]: Module `sparse` has no attribute `FloatTensor`. + L = torch.sparse.FloatTensor(e01, w01, (V, V)) + L = L + L.t() + + return L diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/marching_cubes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/marching_cubes.py new file mode 100644 index 0000000000000000000000000000000000000000..17236857d607a17270120606288451f51cb999a7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/marching_cubes.py @@ -0,0 +1,305 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import List, Optional, Tuple + +import torch +from pytorch3d import _C +from pytorch3d.ops.marching_cubes_data import EDGE_TO_VERTICES, FACE_TABLE, INDEX +from pytorch3d.transforms import Translate +from torch.autograd import Function + + +EPS = 0.00001 + + +class Cube: + def __init__( + self, + bfl_v: Tuple[int, int, int], + volume: torch.Tensor, + isolevel: float, + ) -> None: + """ + Initializes a cube given the bottom front left vertex coordinate + and computes the cube configuration given vertex values and isolevel. + + Edge and vertex convention: + + v4_______e4____________v5 + /| /| + / | / | + e7/ | e5/ | + /___|______e6_________/ | + v7| | |v6 |e9 + | | | | + | |e8 |e10| + e11| | | | + | |______e0_________|___| + | / v0(bfl_v) | |v1 + | / | / + | /e3 | /e1 + |/_____________________|/ + v3 e2 v2 + + Args: + bfl_vertex: a tuple of size 3 corresponding to the bottom front left vertex + of the cube in (x, y, z) format + volume: the 3D scalar data + isolevel: the isosurface value used as a threshold for determining whether a point + is inside/outside the volume + """ + x, y, z = bfl_v + self.x, self.y, self.z = x, y, z + self.bfl_v = bfl_v + self.verts = [ + [x + (v & 1), y + (v >> 1 & 1), z + (v >> 2 & 1)] for v in range(8) + ] # vertex position (x, y, z) for v0-v1-v4-v5-v3-v2-v7-v6 + + # Calculates cube configuration index given values of the cube vertices + self.cube_index = 0 + for i in range(8): + v = self.verts[INDEX[i]] + value = volume[v[2]][v[1]][v[0]] + if value < isolevel: + self.cube_index |= 1 << i + + def get_vpair_from_edge(self, edge: int, W: int, H: int) -> Tuple[int, int]: + """ + Get a tuple of global vertex ID from a local edge ID + Global vertex ID is calculated as (x + dx) + (y + dy) * W + (z + dz) * W * H + + Args: + edge: local edge ID in the cube + bfl_vertex: bottom-front-left coordinate of the cube + + Returns: + a pair of global vertex ID + """ + v1, v2 = EDGE_TO_VERTICES[edge] # two end-points on the edge + v1_id = self.verts[v1][0] + self.verts[v1][1] * W + self.verts[v1][2] * W * H + v2_id = self.verts[v2][0] + self.verts[v2][1] * W + self.verts[v2][2] * W * H + return (v1_id, v2_id) + + def vert_interp( + self, + isolevel: float, + edge: int, + vol: torch.Tensor, + ) -> List: + """ + Linearly interpolate a vertex where an isosurface cuts an edge + between the two endpoint vertices, based on their values + + Args: + isolevel: the isosurface value to use as the threshold to determine + whether points are within a volume. + edge: edge (ID) to interpolate + cube: current cube vertices + vol: 3D scalar field + + Returns: + interpolated vertex: position of the interpolated vertex on the edge + """ + v1, v2 = EDGE_TO_VERTICES[edge] + p1, p2 = self.verts[v1], self.verts[v2] + val1, val2 = ( + vol[p1[2]][p1[1]][p1[0]], + vol[p2[2]][p2[1]][p2[0]], + ) + point = None + if abs(isolevel - val1) < EPS: + point = p1 + elif abs(isolevel - val2) < EPS: + point = p2 + elif abs(val1 - val2) < EPS: + point = p1 + + if point is None: + mu = (isolevel - val1) / (val2 - val1) + x1, y1, z1 = p1 + x2, y2, z2 = p2 + x = x1 + mu * (x2 - x1) + y = y1 + mu * (y2 - y1) + z = z1 + mu * (z2 - z1) + else: + x, y, z = point + return [x, y, z] + + +def marching_cubes_naive( + vol_batch: torch.Tensor, + isolevel: Optional[float] = None, + return_local_coords: bool = True, +) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + """ + Runs the classic marching cubes algorithm, iterating over + the coordinates of the volume and using a given isolevel + for determining intersected edges of cubes. + Returns vertices and faces of the obtained mesh. + This operation is non-differentiable. + + Args: + vol_batch: a Tensor of size (N, D, H, W) corresponding to + a batch of 3D scalar fields + isolevel: the isosurface value to use as the threshold to determine + whether points are within a volume. If None, then the average of the + maximum and minimum value of the scalar field will be used. + return_local_coords: bool. If True the output vertices will be in local coordinates in + the range [-1, 1] x [-1, 1] x [-1, 1]. If False they will be in the range + [0, W-1] x [0, H-1] x [0, D-1] + Returns: + verts: [{V_0}, {V_1}, ...] List of N sets of vertices of shape (|V_i|, 3) in FloatTensor + faces: [{F_0}, {F_1}, ...] List of N sets of faces of shape (|F_i|, 3) in LongTensors + """ + batched_verts, batched_faces = [], [] + D, H, W = vol_batch.shape[1:] + + # each edge is represented with its two endpoints (represented with global id) + for i in range(len(vol_batch)): + vol = vol_batch[i] + thresh = ((vol.max() + vol.min()) / 2).item() if isolevel is None else isolevel + vpair_to_edge = {} # maps from tuple of edge endpoints to edge_id + edge_id_to_v = {} # maps from edge ID to vertex position + uniq_edge_id = {} # unique edge IDs + verts = [] # store vertex positions + faces = [] # store face indices + # enumerate each cell in the 3d grid + for z in range(0, D - 1): + for y in range(0, H - 1): + for x in range(0, W - 1): + cube = Cube((x, y, z), vol, thresh) + edge_indices = FACE_TABLE[cube.cube_index] + # cube is entirely in/out of the surface + if len(edge_indices) == 0: + continue + + # gather mesh vertices/faces by processing each cube + interp_points = [[0.0, 0.0, 0.0]] * 12 + # triangle vertex IDs and positions + tri = [] + ps = [] + for i, edge in enumerate(edge_indices): + interp_points[edge] = cube.vert_interp(thresh, edge, vol) + + # Bind interpolated vertex with a global edge_id, which + # is represented by a pair of vertex ids (v1_id, v2_id) + # corresponding to a local edge. + (v1_id, v2_id) = cube.get_vpair_from_edge(edge, W, H) + edge_id = vpair_to_edge.setdefault( + (v1_id, v2_id), len(vpair_to_edge) + ) + tri.append(edge_id) + ps.append(interp_points[edge]) + # when the isolevel are the same as the edge endpoints, the interploated + # vertices can share the same values, and lead to degenerate triangles. + if ( + (i + 1) % 3 == 0 + and ps[0] != ps[1] + and ps[1] != ps[2] + and ps[2] != ps[0] + ): + for j, edge_id in enumerate(tri): + edge_id_to_v[edge_id] = ps[j] + if edge_id not in uniq_edge_id: + uniq_edge_id[edge_id] = len(verts) + verts.append(edge_id_to_v[edge_id]) + faces.append([uniq_edge_id[tri[j]] for j in range(3)]) + tri = [] + ps = [] + + if len(faces) > 0 and len(verts) > 0: + verts = torch.tensor(verts, dtype=vol.dtype) + # Convert from world coordinates ([0, D-1], [0, H-1], [0, W-1]) to + # local coordinates in the range [-1, 1] + if return_local_coords: + verts = ( + Translate(x=+1.0, y=+1.0, z=+1.0, device=vol_batch.device) + .scale((vol_batch.new_tensor([W, H, D])[None] - 1) * 0.5) + .inverse() + ).transform_points(verts[None])[0] + batched_verts.append(verts) + batched_faces.append(torch.tensor(faces, dtype=torch.int64)) + else: + batched_verts.append([]) + batched_faces.append([]) + return batched_verts, batched_faces + + +######################################## +# Marching Cubes Implementation in C++/Cuda +######################################## +class _marching_cubes(Function): + """ + Torch Function wrapper for marching_cubes implementation. + This function is not differentiable. An autograd wrapper is used + to ensure an error if user tries to get gradients. + """ + + @staticmethod + def forward(ctx, vol, isolevel): + verts, faces, ids = _C.marching_cubes(vol, isolevel) + return verts, faces, ids + + @staticmethod + def backward(ctx, grad_verts, grad_faces): + raise ValueError("marching_cubes backward is not supported") + + +def marching_cubes( + vol_batch: torch.Tensor, + isolevel: Optional[float] = None, + return_local_coords: bool = True, +) -> Tuple[List[torch.Tensor], List[torch.Tensor]]: + """ + Run marching cubes over a volume scalar field with a designated isolevel. + Returns vertices and faces of the obtained mesh. + This operation is non-differentiable. + + Args: + vol_batch: a Tensor of size (N, D, H, W) corresponding to + a batch of 3D scalar fields + isolevel: float used as threshold to determine if a point is inside/outside + the volume. If None, then the average of the maximum and minimum value + of the scalar field is used. + return_local_coords: bool. If True the output vertices will be in local coordinates in + the range [-1, 1] x [-1, 1] x [-1, 1]. If False they will be in the range + [0, W-1] x [0, H-1] x [0, D-1] + + Returns: + verts: [{V_0}, {V_1}, ...] List of N sets of vertices of shape (|V_i|, 3) in FloatTensor + faces: [{F_0}, {F_1}, ...] List of N sets of faces of shape (|F_i|, 3) in LongTensors + """ + batched_verts, batched_faces = [], [] + D, H, W = vol_batch.shape[1:] + for i in range(len(vol_batch)): + vol = vol_batch[i] + thresh = ((vol.max() + vol.min()) / 2).item() if isolevel is None else isolevel + verts, faces, ids = _marching_cubes.apply(vol, thresh) + if len(faces) > 0 and len(verts) > 0: + # Convert from world coordinates ([0, D-1], [0, H-1], [0, W-1]) to + # local coordinates in the range [-1, 1] + if return_local_coords: + verts = ( + Translate(x=+1.0, y=+1.0, z=+1.0, device=vol.device) + .scale((vol.new_tensor([W, H, D])[None] - 1) * 0.5) + .inverse() + ).transform_points(verts[None])[0] + # deduplication for cuda + if vol.is_cuda: + unique_ids, inverse_idx = torch.unique(ids, return_inverse=True) + verts_ = verts.new_zeros(unique_ids.shape[0], 3) + verts_[inverse_idx] = verts + verts = verts_ + faces = inverse_idx[faces] + batched_verts.append(verts) + batched_faces.append(faces) + else: + batched_verts.append([]) + batched_faces.append([]) + return batched_verts, batched_faces diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/marching_cubes_data.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/marching_cubes_data.py new file mode 100644 index 0000000000000000000000000000000000000000..802377e0795a980ca539e554d40ecd11ef4b3902 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/marching_cubes_data.py @@ -0,0 +1,291 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +# Maps each edge (by index) to the corresponding cube vertices +EDGE_TO_VERTICES = [ + [0, 1], + [1, 5], + [4, 5], + [0, 4], + [2, 3], + [3, 7], + [6, 7], + [2, 6], + [0, 2], + [1, 3], + [5, 7], + [4, 6], +] + +# A list of lists mapping a cube_index (a given configuration) +# to a list of faces corresponding to that configuration. Each face is represented +# by 3 consecutive numbers. A configuration will at most have 5 faces. +# +# Table taken from http://paulbourke.net/geometry/polygonise/ +FACE_TABLE = [ + [], + [0, 8, 3], + [0, 1, 9], + [1, 8, 3, 9, 8, 1], + [1, 2, 10], + [0, 8, 3, 1, 2, 10], + [9, 2, 10, 0, 2, 9], + [2, 8, 3, 2, 10, 8, 10, 9, 8], + [3, 11, 2], + [0, 11, 2, 8, 11, 0], + [1, 9, 0, 2, 3, 11], + [1, 11, 2, 1, 9, 11, 9, 8, 11], + [3, 10, 1, 11, 10, 3], + [0, 10, 1, 0, 8, 10, 8, 11, 10], + [3, 9, 0, 3, 11, 9, 11, 10, 9], + [9, 8, 10, 10, 8, 11], + [4, 7, 8], + [4, 3, 0, 7, 3, 4], + [0, 1, 9, 8, 4, 7], + [4, 1, 9, 4, 7, 1, 7, 3, 1], + [1, 2, 10, 8, 4, 7], + [3, 4, 7, 3, 0, 4, 1, 2, 10], + [9, 2, 10, 9, 0, 2, 8, 4, 7], + [2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4], + [8, 4, 7, 3, 11, 2], + [11, 4, 7, 11, 2, 4, 2, 0, 4], + [9, 0, 1, 8, 4, 7, 2, 3, 11], + [4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1], + [3, 10, 1, 3, 11, 10, 7, 8, 4], + [1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4], + [4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3], + [4, 7, 11, 4, 11, 9, 9, 11, 10], + [9, 5, 4], + [9, 5, 4, 0, 8, 3], + [0, 5, 4, 1, 5, 0], + [8, 5, 4, 8, 3, 5, 3, 1, 5], + [1, 2, 10, 9, 5, 4], + [3, 0, 8, 1, 2, 10, 4, 9, 5], + [5, 2, 10, 5, 4, 2, 4, 0, 2], + [2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8], + [9, 5, 4, 2, 3, 11], + [0, 11, 2, 0, 8, 11, 4, 9, 5], + [0, 5, 4, 0, 1, 5, 2, 3, 11], + [2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5], + [10, 3, 11, 10, 1, 3, 9, 5, 4], + [4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10], + [5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3], + [5, 4, 8, 5, 8, 10, 10, 8, 11], + [9, 7, 8, 5, 7, 9], + [9, 3, 0, 9, 5, 3, 5, 7, 3], + [0, 7, 8, 0, 1, 7, 1, 5, 7], + [1, 5, 3, 3, 5, 7], + [9, 7, 8, 9, 5, 7, 10, 1, 2], + [10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3], + [8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2], + [2, 10, 5, 2, 5, 3, 3, 5, 7], + [7, 9, 5, 7, 8, 9, 3, 11, 2], + [9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11], + [2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7], + [11, 2, 1, 11, 1, 7, 7, 1, 5], + [9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11], + [5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0], + [11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0], + [11, 10, 5, 7, 11, 5], + [10, 6, 5], + [0, 8, 3, 5, 10, 6], + [9, 0, 1, 5, 10, 6], + [1, 8, 3, 1, 9, 8, 5, 10, 6], + [1, 6, 5, 2, 6, 1], + [1, 6, 5, 1, 2, 6, 3, 0, 8], + [9, 6, 5, 9, 0, 6, 0, 2, 6], + [5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8], + [2, 3, 11, 10, 6, 5], + [11, 0, 8, 11, 2, 0, 10, 6, 5], + [0, 1, 9, 2, 3, 11, 5, 10, 6], + [5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11], + [6, 3, 11, 6, 5, 3, 5, 1, 3], + [0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6], + [3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9], + [6, 5, 9, 6, 9, 11, 11, 9, 8], + [5, 10, 6, 4, 7, 8], + [4, 3, 0, 4, 7, 3, 6, 5, 10], + [1, 9, 0, 5, 10, 6, 8, 4, 7], + [10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4], + [6, 1, 2, 6, 5, 1, 4, 7, 8], + [1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7], + [8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6], + [7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9], + [3, 11, 2, 7, 8, 4, 10, 6, 5], + [5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11], + [0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6], + [9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6], + [8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6], + [5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11], + [0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7], + [6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9], + [10, 4, 9, 6, 4, 10], + [4, 10, 6, 4, 9, 10, 0, 8, 3], + [10, 0, 1, 10, 6, 0, 6, 4, 0], + [8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10], + [1, 4, 9, 1, 2, 4, 2, 6, 4], + [3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4], + [0, 2, 4, 4, 2, 6], + [8, 3, 2, 8, 2, 4, 4, 2, 6], + [10, 4, 9, 10, 6, 4, 11, 2, 3], + [0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6], + [3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10], + [6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1], + [9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3], + [8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1], + [3, 11, 6, 3, 6, 0, 0, 6, 4], + [6, 4, 8, 11, 6, 8], + [7, 10, 6, 7, 8, 10, 8, 9, 10], + [0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10], + [10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0], + [10, 6, 7, 10, 7, 1, 1, 7, 3], + [1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7], + [2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9], + [7, 8, 0, 7, 0, 6, 6, 0, 2], + [7, 3, 2, 6, 7, 2], + [2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7], + [2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7], + [1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11], + [11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1], + [8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6], + [0, 9, 1, 11, 6, 7], + [7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0], + [7, 11, 6], + [7, 6, 11], + [3, 0, 8, 11, 7, 6], + [0, 1, 9, 11, 7, 6], + [8, 1, 9, 8, 3, 1, 11, 7, 6], + [10, 1, 2, 6, 11, 7], + [1, 2, 10, 3, 0, 8, 6, 11, 7], + [2, 9, 0, 2, 10, 9, 6, 11, 7], + [6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8], + [7, 2, 3, 6, 2, 7], + [7, 0, 8, 7, 6, 0, 6, 2, 0], + [2, 7, 6, 2, 3, 7, 0, 1, 9], + [1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6], + [10, 7, 6, 10, 1, 7, 1, 3, 7], + [10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8], + [0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7], + [7, 6, 10, 7, 10, 8, 8, 10, 9], + [6, 8, 4, 11, 8, 6], + [3, 6, 11, 3, 0, 6, 0, 4, 6], + [8, 6, 11, 8, 4, 6, 9, 0, 1], + [9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6], + [6, 8, 4, 6, 11, 8, 2, 10, 1], + [1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6], + [4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9], + [10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3], + [8, 2, 3, 8, 4, 2, 4, 6, 2], + [0, 4, 2, 4, 6, 2], + [1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8], + [1, 9, 4, 1, 4, 2, 2, 4, 6], + [8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1], + [10, 1, 0, 10, 0, 6, 6, 0, 4], + [4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3], + [10, 9, 4, 6, 10, 4], + [4, 9, 5, 7, 6, 11], + [0, 8, 3, 4, 9, 5, 11, 7, 6], + [5, 0, 1, 5, 4, 0, 7, 6, 11], + [11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5], + [9, 5, 4, 10, 1, 2, 7, 6, 11], + [6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5], + [7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2], + [3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6], + [7, 2, 3, 7, 6, 2, 5, 4, 9], + [9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7], + [3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0], + [6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8], + [9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7], + [1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4], + [4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10], + [7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10], + [6, 9, 5, 6, 11, 9, 11, 8, 9], + [3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5], + [0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11], + [6, 11, 3, 6, 3, 5, 5, 3, 1], + [1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6], + [0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10], + [11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5], + [6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3], + [5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2], + [9, 5, 6, 9, 6, 0, 0, 6, 2], + [1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8], + [1, 5, 6, 2, 1, 6], + [1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6], + [10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0], + [0, 3, 8, 5, 6, 10], + [10, 5, 6], + [11, 5, 10, 7, 5, 11], + [11, 5, 10, 11, 7, 5, 8, 3, 0], + [5, 11, 7, 5, 10, 11, 1, 9, 0], + [10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1], + [11, 1, 2, 11, 7, 1, 7, 5, 1], + [0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11], + [9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7], + [7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2], + [2, 5, 10, 2, 3, 5, 3, 7, 5], + [8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5], + [9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2], + [9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2], + [1, 3, 5, 3, 7, 5], + [0, 8, 7, 0, 7, 1, 1, 7, 5], + [9, 0, 3, 9, 3, 5, 5, 3, 7], + [9, 8, 7, 5, 9, 7], + [5, 8, 4, 5, 10, 8, 10, 11, 8], + [5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0], + [0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5], + [10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4], + [2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8], + [0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11], + [0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5], + [9, 4, 5, 2, 11, 3], + [2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4], + [5, 10, 2, 5, 2, 4, 4, 2, 0], + [3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9], + [5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2], + [8, 4, 5, 8, 5, 3, 3, 5, 1], + [0, 4, 5, 1, 0, 5], + [8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5], + [9, 4, 5], + [4, 11, 7, 4, 9, 11, 9, 10, 11], + [0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11], + [1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11], + [3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4], + [4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2], + [9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3], + [11, 7, 4, 11, 4, 2, 2, 4, 0], + [11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4], + [2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9], + [9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7], + [3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10], + [1, 10, 2, 8, 7, 4], + [4, 9, 1, 4, 1, 7, 7, 1, 3], + [4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1], + [4, 0, 3, 7, 4, 3], + [4, 8, 7], + [9, 10, 8, 10, 11, 8], + [3, 0, 9, 3, 9, 11, 11, 9, 10], + [0, 1, 10, 0, 10, 8, 8, 10, 11], + [3, 1, 10, 11, 3, 10], + [1, 2, 11, 1, 11, 9, 9, 11, 8], + [3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9], + [0, 2, 11, 8, 0, 11], + [3, 2, 11], + [2, 3, 8, 2, 8, 10, 10, 8, 9], + [9, 10, 2, 0, 9, 2], + [2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8], + [1, 10, 2], + [1, 3, 8, 9, 1, 8], + [0, 9, 1], + [0, 3, 8], + [], +] + +# mapping from 0-7 to v0-v7 in cube.vertices +INDEX = [0, 1, 5, 4, 2, 3, 7, 6] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/mesh_face_areas_normals.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/mesh_face_areas_normals.py new file mode 100644 index 0000000000000000000000000000000000000000..ea91f482cf69eed491deabb3c940537d9d2efa48 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/mesh_face_areas_normals.py @@ -0,0 +1,68 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch +from pytorch3d import _C +from torch.autograd import Function +from torch.autograd.function import once_differentiable + + +class _MeshFaceAreasNormals(Function): + """ + Torch autograd Function wrapper for face areas & normals C++/CUDA implementations. + """ + + @staticmethod + def forward(ctx, verts, faces): + """ + Args: + ctx: Context object used to calculate gradients. + verts: FloatTensor of shape (V, 3), representing the packed + batch verts tensor. + faces: LongTensor of shape (F, 3), representing the packed + batch faces tensor + Returns: + areas: FloatTensor of shape (F,) with the areas of each face + normals: FloatTensor of shape (F,3) with the normals of each face + """ + if not (verts.dim() == 2): + raise ValueError("verts need to be of shape Vx3.") + if not (verts.shape[1] == 3): + raise ValueError("verts need to be of shape Vx3.") + if not (faces.dim() == 2): + raise ValueError("faces need to be of shape Fx3.") + if not (faces.shape[1] == 3): + raise ValueError("faces need to be of shape Fx3.") + if not (faces.dtype == torch.int64): + raise ValueError("faces need to be of type torch.int64.") + # TODO(gkioxari) Change cast to floats once we add support for doubles. + if not (verts.dtype == torch.float32): + verts = verts.float() + + ctx.save_for_backward(verts, faces) + areas, normals = _C.face_areas_normals_forward(verts, faces) + return areas, normals + + @staticmethod + @once_differentiable + def backward(ctx, grad_areas, grad_normals): + grad_areas = grad_areas.contiguous() + grad_normals = grad_normals.contiguous() + verts, faces = ctx.saved_tensors + # TODO(gkioxari) Change cast to floats once we add support for doubles. + if not (grad_areas.dtype == torch.float32): + grad_areas = grad_areas.float() + if not (grad_normals.dtype == torch.float32): + grad_normals = grad_normals.float() + grad_verts = _C.face_areas_normals_backward( + grad_areas, grad_normals, verts, faces + ) + return grad_verts, None + + +mesh_face_areas_normals = _MeshFaceAreasNormals.apply diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/packed_to_padded.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/packed_to_padded.py new file mode 100644 index 0000000000000000000000000000000000000000..fd40bcc43e8ea0fa72b8a0f116cab0009967671d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/packed_to_padded.py @@ -0,0 +1,198 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch +from pytorch3d import _C +from torch.autograd import Function +from torch.autograd.function import once_differentiable + + +class _PackedToPadded(Function): + """ + Torch autograd Function wrapper for packed_to_padded C++/CUDA implementations. + """ + + @staticmethod + def forward(ctx, inputs, first_idxs, max_size): + """ + Args: + ctx: Context object used to calculate gradients. + inputs: FloatTensor of shape (F, D), representing the packed batch tensor. + e.g. areas for faces in a batch of meshes. + first_idxs: LongTensor of shape (N,) where N is the number of + elements in the batch and `first_idxs[i] = f` + means that the inputs for batch element i begin at `inputs[f]`. + max_size: Max length of an element in the batch. + + Returns: + inputs_padded: FloatTensor of shape (N, max_size, D) where max_size is max + of `sizes`. The values for batch element i which start at + `inputs[first_idxs[i]]` will be copied to `inputs_padded[i, :]`, + with zeros padding out the extra inputs. + """ + if not (inputs.dim() == 2): + raise ValueError("input can only be 2-dimensional.") + if not (first_idxs.dim() == 1): + raise ValueError("first_idxs can only be 1-dimensional.") + if not (inputs.dtype == torch.float32): + raise ValueError("input has to be of type torch.float32.") + if not (first_idxs.dtype == torch.int64): + raise ValueError("first_idxs has to be of type torch.int64.") + if not isinstance(max_size, int): + raise ValueError("max_size has to be int.") + + ctx.save_for_backward(first_idxs) + ctx.num_inputs = int(inputs.shape[0]) + inputs, first_idxs = inputs.contiguous(), first_idxs.contiguous() + inputs_padded = _C.packed_to_padded(inputs, first_idxs, max_size) + return inputs_padded + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + grad_output = grad_output.contiguous() + first_idxs = ctx.saved_tensors[0] + num_inputs = ctx.num_inputs + grad_input = _C.padded_to_packed(grad_output, first_idxs, num_inputs) + return grad_input, None, None + + +def packed_to_padded( + inputs: torch.Tensor, first_idxs: torch.LongTensor, max_size: int +) -> torch.Tensor: + """ + Torch wrapper that handles allowed input shapes. See description below. + + Args: + inputs: FloatTensor of shape (F,) or (F, ...), representing the packed + batch tensor, e.g. areas for faces in a batch of meshes. + first_idxs: LongTensor of shape (N,) where N is the number of + elements in the batch and `first_idxs[i] = f` + means that the inputs for batch element i begin at `inputs[f]`. + max_size: Max length of an element in the batch. + + Returns: + inputs_padded: FloatTensor of shape (N, max_size) or (N, max_size, ...) + where max_size is max of `sizes`. The values for batch element i + which start at `inputs[first_idxs[i]]` will be copied to + `inputs_padded[i, :]`, with zeros padding out the extra inputs. + + To handle the allowed input shapes, we convert the inputs tensor of shape + (F,) to (F, 1). We reshape the output back to (N, max_size) from + (N, max_size, 1). + """ + # if inputs is of shape (F,), reshape into (F, 1) + input_shape = inputs.shape + n_dims = inputs.dim() + if n_dims == 1: + inputs = inputs.unsqueeze(1) + else: + inputs = inputs.reshape(input_shape[0], -1) + inputs_padded = _PackedToPadded.apply(inputs, first_idxs, max_size) + # if flat is True, reshape output to (N, max_size) from (N, max_size, 1) + # else reshape output to (N, max_size, ...) + if n_dims == 1: + return inputs_padded.squeeze(2) + if n_dims == 2: + return inputs_padded + return inputs_padded.view(*inputs_padded.shape[:2], *input_shape[1:]) + + +class _PaddedToPacked(Function): + """ + Torch autograd Function wrapper for padded_to_packed C++/CUDA implementations. + """ + + @staticmethod + def forward(ctx, inputs, first_idxs, num_inputs): + """ + Args: + ctx: Context object used to calculate gradients. + inputs: FloatTensor of shape (N, max_size, D), representing + the padded tensor, e.g. areas for faces in a batch of meshes. + first_idxs: LongTensor of shape (N,) where N is the number of + elements in the batch and `first_idxs[i] = f` + means that the inputs for batch element i begin at `inputs_packed[f]`. + num_inputs: Number of packed entries (= F) + + Returns: + inputs_packed: FloatTensor of shape (F, D) where + `inputs_packed[first_idx[i]:] = inputs[i, :]`. + """ + if not (inputs.dim() == 3): + raise ValueError("input can only be 3-dimensional.") + if not (first_idxs.dim() == 1): + raise ValueError("first_idxs can only be 1-dimensional.") + if not (inputs.dtype == torch.float32): + raise ValueError("input has to be of type torch.float32.") + if not (first_idxs.dtype == torch.int64): + raise ValueError("first_idxs has to be of type torch.int64.") + if not isinstance(num_inputs, int): + raise ValueError("max_size has to be int.") + + ctx.save_for_backward(first_idxs) + ctx.max_size = inputs.shape[1] + inputs, first_idxs = inputs.contiguous(), first_idxs.contiguous() + inputs_packed = _C.padded_to_packed(inputs, first_idxs, num_inputs) + return inputs_packed + + @staticmethod + @once_differentiable + def backward(ctx, grad_output): + grad_output = grad_output.contiguous() + first_idxs = ctx.saved_tensors[0] + max_size = ctx.max_size + grad_input = _C.packed_to_padded(grad_output, first_idxs, max_size) + return grad_input, None, None + + +def padded_to_packed( + inputs: torch.Tensor, + first_idxs: torch.LongTensor, + num_inputs: int, + max_size_dim: int = 1, +) -> torch.Tensor: + """ + Torch wrapper that handles allowed input shapes. See description below. + + Args: + inputs: FloatTensor of shape (N, ..., max_size) or (N, ..., max_size, ...), + representing the padded tensor, e.g. areas for faces in a batch of + meshes, where max_size occurs on max_size_dim-th position. + first_idxs: LongTensor of shape (N,) where N is the number of + elements in the batch and `first_idxs[i] = f` + means that the inputs for batch element i begin at `inputs_packed[f]`. + num_inputs: Number of packed entries (= F) + max_size_dim: the dimension to be packed + + Returns: + inputs_packed: FloatTensor of shape (F,) or (F, ...) where + `inputs_packed[first_idx[i]:first_idx[i+1]] = inputs[i, ..., :delta[i]]`, + where `delta[i] = first_idx[i+1] - first_idx[i]`. + + To handle the allowed input shapes, we convert the inputs tensor of shape + (N, max_size) to (N, max_size, 1). We reshape the output back to (F,) from + (F, 1). + """ + n_dims = inputs.dim() + # move the variable dim to position 1 + inputs = inputs.movedim(max_size_dim, 1) + + # if inputs is of shape (N, max_size), reshape into (N, max_size, 1)) + input_shape = inputs.shape + if n_dims == 2: + inputs = inputs.unsqueeze(2) + else: + inputs = inputs.reshape(*input_shape[:2], -1) + inputs_packed = _PaddedToPacked.apply(inputs, first_idxs, num_inputs) + # if input is flat, reshape output to (F,) from (F, 1) + # else reshape output to (F, ...) + if n_dims == 2: + return inputs_packed.squeeze(1) + + return inputs_packed.view(-1, *input_shape[2:]) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/perspective_n_points.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/perspective_n_points.py new file mode 100644 index 0000000000000000000000000000000000000000..db5e0057bceb6cf7b18b8916a81cad6424701056 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/perspective_n_points.py @@ -0,0 +1,412 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +""" +This file contains Efficient PnP algorithm for Perspective-n-Points problem. +It finds a camera position (defined by rotation `R` and translation `T`) that +minimizes re-projection error between the given 3D points `x` and +the corresponding uncalibrated 2D points `y`. +""" + +import warnings +from typing import NamedTuple, Optional + +import torch +import torch.nn.functional as F +from pytorch3d.ops import points_alignment, utils as oputil + + +class EpnpSolution(NamedTuple): + x_cam: torch.Tensor + R: torch.Tensor + T: torch.Tensor + err_2d: torch.Tensor + err_3d: torch.Tensor + + +def _define_control_points(x, weight, storage_opts=None): + """ + Returns control points that define barycentric coordinates + Args: + x: Batch of 3-dimensional points of shape `(minibatch, num_points, 3)`. + weight: Batch of non-negative weights of + shape `(minibatch, num_point)`. `None` means equal weights. + storage_opts: dict of keyword arguments to the tensor constructor. + """ + storage_opts = storage_opts or {} + x_mean = oputil.wmean(x, weight) + c_world = F.pad(torch.eye(3, **storage_opts), (0, 0, 0, 1), value=0.0).expand_as( + x[:, :4, :] + ) + return c_world + x_mean + + +def _compute_alphas(x, c_world): + """ + Computes barycentric coordinates of x in the frame c_world. + Args: + x: Batch of 3-dimensional points of shape `(minibatch, num_points, 3)`. + c_world: control points in world coordinates. + """ + x = F.pad(x, (0, 1), value=1.0) + c = F.pad(c_world, (0, 1), value=1.0) + return torch.matmul(x, torch.inverse(c)) # B x N x 4 + + +def _build_M(y, alphas, weight): + """Returns the matrix defining the reprojection equations. + Args: + y: projected points in camera coordinates of size B x N x 2 + alphas: barycentric coordinates of size B x N x 4 + weight: Batch of non-negative weights of + shape `(minibatch, num_point)`. `None` means equal weights. + """ + bs, n, _ = y.size() + + # prepend t with the column of v's + def prepad(t, v): + return F.pad(t, (1, 0), value=v) + + if weight is not None: + # weight the alphas in order to get a correctly weighted version of M + alphas = alphas * weight[:, :, None] + + # outer left-multiply by alphas + def lm_alphas(t): + return torch.matmul(alphas[..., None], t).reshape(bs, n, 12) + + M = torch.cat( + ( + lm_alphas( + prepad(prepad(-y[:, :, 0, None, None], 0.0), 1.0) + ), # u constraints + lm_alphas( + prepad(prepad(-y[:, :, 1, None, None], 1.0), 0.0) + ), # v constraints + ), + dim=-1, + ).reshape(bs, -1, 12) + + return M + + +def _null_space(m, kernel_dim): + """Finds the null space (kernel) basis of the matrix + Args: + m: the batch of input matrices, B x N x 12 + kernel_dim: number of dimensions to approximate the kernel + Returns: + * a batch of null space basis vectors + of size B x 4 x 3 x kernel_dim + * a batch of spectral values where near-0s correspond to actual + kernel vectors, of size B x kernel_dim + """ + mTm = torch.bmm(m.transpose(1, 2), m) + s, v = torch.linalg.eigh(mTm) + return v[:, :, :kernel_dim].reshape(-1, 4, 3, kernel_dim), s[:, :kernel_dim] + + +def _reproj_error(y_hat, y, weight, eps=1e-9): + """Projects estimated 3D points and computes the reprojection error + Args: + y_hat: a batch of predicted 2D points in homogeneous coordinates + y: a batch of ground-truth 2D points + weight: Batch of non-negative weights of + shape `(minibatch, num_point)`. `None` means equal weights. + Returns: + Optionally weighted RMSE of difference between y and y_hat. + """ + y_hat = y_hat / torch.clamp(y_hat[..., 2:], eps) + dist = ((y - y_hat[..., :2]) ** 2).sum(dim=-1, keepdim=True) ** 0.5 + return oputil.wmean(dist, weight)[:, 0, 0] + + +def _algebraic_error(x_w_rotated, x_cam, weight): + """Computes the residual of Umeyama in 3D. + Args: + x_w_rotated: The given 3D points rotated with the predicted camera. + x_cam: the lifted 2D points y + weight: Batch of non-negative weights of + shape `(minibatch, num_point)`. `None` means equal weights. + Returns: + Optionally weighted MSE of difference between x_w_rotated and x_cam. + """ + dist = ((x_w_rotated - x_cam) ** 2).sum(dim=-1, keepdim=True) + return oputil.wmean(dist, weight)[:, 0, 0] + + +def _compute_norm_sign_scaling_factor(c_cam, alphas, x_world, y, weight, eps=1e-9): + """Given a solution, adjusts the scale and flip + Args: + c_cam: control points in camera coordinates + alphas: barycentric coordinates of the points + x_world: Batch of 3-dimensional points of shape `(minibatch, num_points, 3)`. + y: Batch of 2-dimensional points of shape `(minibatch, num_points, 2)`. + weights: Batch of non-negative weights of + shape `(minibatch, num_point)`. `None` means equal weights. + eps: epsilon to threshold negative `z` values + """ + # position of reference points in camera coordinates + x_cam = torch.matmul(alphas, c_cam) + + x_cam = x_cam * (1.0 - 2.0 * (oputil.wmean(x_cam[..., 2:], weight) < 0).float()) + if torch.any(x_cam[..., 2:] < -eps): + neg_rate = oputil.wmean((x_cam[..., 2:] < 0).float(), weight, dim=(0, 1)).item() + warnings.warn("\nEPnP: %2.2f%% points have z<0." % (neg_rate * 100.0)) + + R, T, s = points_alignment.corresponding_points_alignment( + x_world, x_cam, weight, estimate_scale=True + ) + s = s.clamp(eps) + x_cam = x_cam / s[:, None, None] + T = T / s[:, None] + x_w_rotated = torch.matmul(x_world, R) + T[:, None, :] + err_2d = _reproj_error(x_w_rotated, y, weight) + err_3d = _algebraic_error(x_w_rotated, x_cam, weight) + + return EpnpSolution(x_cam, R, T, err_2d, err_3d) + + +def _gen_pairs(input, dim=-2, reducer=lambda a, b: ((a - b) ** 2).sum(dim=-1)): + """Generates all pairs of different rows and then applies the reducer + Args: + input: a tensor + dim: a dimension to generate pairs across + reducer: a function of generated pair of rows to apply (beyond just concat) + Returns: + for default args, for A x B x C input, will output A x (B choose 2) + """ + n = input.size()[dim] + range = torch.arange(n) + idx = torch.combinations(range).to(input).long() + left = input.index_select(dim, idx[:, 0]) + right = input.index_select(dim, idx[:, 1]) + return reducer(left, right) + + +def _kernel_vec_distances(v): + """Computes the coefficients for linearization of the quadratic system + to match all pairwise distances between 4 control points (dim=1). + The last dimension corresponds to the coefficients for quadratic terms + Bij = Bi * Bj, where Bi and Bj correspond to kernel vectors. + Arg: + v: tensor of B x 4 x 3 x D, where D is dim(kernel), usually 4 + Returns: + a tensor of B x 6 x [(D choose 2) + D]; + for D=4, the last dim means [B11 B22 B33 B44 B12 B13 B14 B23 B24 B34]. + """ + dv = _gen_pairs(v, dim=-3, reducer=lambda a, b: a - b) # B x 6 x 3 x D + + # we should take dot-product of all (i,j), i < j, with coeff 2 + rows_2ij = 2.0 * _gen_pairs(dv, dim=-1, reducer=lambda a, b: (a * b).sum(dim=-2)) + # this should produce B x 6 x (D choose 2) tensor + + # we should take dot-product of all (i,i) + rows_ii = (dv**2).sum(dim=-2) + # this should produce B x 6 x D tensor + + return torch.cat((rows_ii, rows_2ij), dim=-1) + + +def _solve_lstsq_subcols(rhs, lhs, lhs_col_idx): + """Solves an over-determined linear system for selected LHS columns. + A batched version of `torch.lstsq`. + Args: + rhs: right-hand side vectors + lhs: left-hand side matrices + lhs_col_idx: a slice of columns in lhs + Returns: + a least-squares solution for lhs * X = rhs + """ + lhs = lhs.index_select(-1, torch.tensor(lhs_col_idx, device=lhs.device).long()) + return torch.matmul(torch.pinverse(lhs), rhs[:, :, None]) + + +def _binary_sign(t): + return (t >= 0).to(t) * 2.0 - 1.0 + + +def _find_null_space_coords_1(kernel_dsts, cw_dst, eps=1e-9): + """Solves case 1 from the paper [1]; solve for 4 coefficients: + [B11 B22 B33 B44 B12 B13 B14 B23 B24 B34] + ^ ^ ^ ^ + Args: + kernel_dsts: distances between kernel vectors + cw_dst: distances between control points + Returns: + coefficients to weight kernel vectors + [1] Moreno-Noguer, F., Lepetit, V., & Fua, P. (2009). + EPnP: An Accurate O(n) solution to the PnP problem. + International Journal of Computer Vision. + https://www.epfl.ch/labs/cvlab/software/multi-view-stereo/epnp/ + """ + beta = _solve_lstsq_subcols(cw_dst, kernel_dsts, [0, 4, 5, 6]) + + beta = beta * _binary_sign(beta[:, :1, :]) + return beta / torch.clamp(beta[:, :1, :] ** 0.5, eps) + + +def _find_null_space_coords_2(kernel_dsts, cw_dst): + """Solves case 2 from the paper; solve for 3 coefficients: + [B11 B22 B33 B44 B12 B13 B14 B23 B24 B34] + ^ ^ ^ + Args: + kernel_dsts: distances between kernel vectors + cw_dst: distances between control points + Returns: + coefficients to weight kernel vectors + [1] Moreno-Noguer, F., Lepetit, V., & Fua, P. (2009). + EPnP: An Accurate O(n) solution to the PnP problem. + International Journal of Computer Vision. + https://www.epfl.ch/labs/cvlab/software/multi-view-stereo/epnp/ + """ + beta = _solve_lstsq_subcols(cw_dst, kernel_dsts, [0, 4, 1]) + + coord_0 = (beta[:, :1, :].abs() ** 0.5) * _binary_sign(beta[:, 1:2, :]) + coord_1 = (beta[:, 2:3, :].abs() ** 0.5) * ( + (beta[:, :1, :] >= 0) == (beta[:, 2:3, :] >= 0) + ).float() + + return torch.cat((coord_0, coord_1, torch.zeros_like(beta[:, :2, :])), dim=1) + + +def _find_null_space_coords_3(kernel_dsts, cw_dst, eps=1e-9): + """Solves case 3 from the paper; solve for 5 coefficients: + [B11 B22 B33 B44 B12 B13 B14 B23 B24 B34] + ^ ^ ^ ^ ^ + Args: + kernel_dsts: distances between kernel vectors + cw_dst: distances between control points + Returns: + coefficients to weight kernel vectors + [1] Moreno-Noguer, F., Lepetit, V., & Fua, P. (2009). + EPnP: An Accurate O(n) solution to the PnP problem. + International Journal of Computer Vision. + https://www.epfl.ch/labs/cvlab/software/multi-view-stereo/epnp/ + """ + beta = _solve_lstsq_subcols(cw_dst, kernel_dsts, [0, 4, 1, 5, 7]) + + coord_0 = (beta[:, :1, :].abs() ** 0.5) * _binary_sign(beta[:, 1:2, :]) + coord_1 = (beta[:, 2:3, :].abs() ** 0.5) * ( + (beta[:, :1, :] >= 0) == (beta[:, 2:3, :] >= 0) + ).float() + coord_2 = beta[:, 3:4, :] / torch.clamp(coord_0[:, :1, :], eps) + + return torch.cat( + (coord_0, coord_1, coord_2, torch.zeros_like(beta[:, :1, :])), dim=1 + ) + + +def efficient_pnp( + x: torch.Tensor, + y: torch.Tensor, + weights: Optional[torch.Tensor] = None, + skip_quadratic_eq: bool = False, +) -> EpnpSolution: + """ + Implements Efficient PnP algorithm [1] for Perspective-n-Points problem: + finds a camera position (defined by rotation `R` and translation `T`) that + minimizes re-projection error between the given 3D points `x` and + the corresponding uncalibrated 2D points `y`, i.e. solves + + `y[i] = Proj(x[i] R[i] + T[i])` + + in the least-squares sense, where `i` are indices within the batch, and + `Proj` is the perspective projection operator: `Proj([x y z]) = [x/z y/z]`. + In the noise-less case, 4 points are enough to find the solution as long + as they are not co-planar. + + Args: + x: Batch of 3-dimensional points of shape `(minibatch, num_points, 3)`. + y: Batch of 2-dimensional points of shape `(minibatch, num_points, 2)`. + weights: Batch of non-negative weights of + shape `(minibatch, num_point)`. `None` means equal weights. + skip_quadratic_eq: If True, assumes the solution space for the + linear system is one-dimensional, i.e. takes the scaled eigenvector + that corresponds to the smallest eigenvalue as a solution. + If False, finds the candidate coordinates in the potentially + 4D null space by approximately solving the systems of quadratic + equations. The best candidate is chosen by examining the 2D + re-projection error. While this option finds a better solution, + especially when the number of points is small or perspective + distortions are low (the points are far away), it may be more + difficult to back-propagate through. + + Returns: + `EpnpSolution` namedtuple containing elements: + **x_cam**: Batch of transformed points `x` that is used to find + the camera parameters, of shape `(minibatch, num_points, 3)`. + In the general (noisy) case, they are not exactly equal to + `x[i] R[i] + T[i]` but are some affine transform of `x[i]`s. + **R**: Batch of rotation matrices of shape `(minibatch, 3, 3)`. + **T**: Batch of translation vectors of shape `(minibatch, 3)`. + **err_2d**: Batch of mean 2D re-projection errors of shape + `(minibatch,)`. Specifically, if `yhat` is the re-projection for + the `i`-th batch element, it returns `sum_j norm(yhat_j - y_j)` + where `j` iterates over points and `norm` denotes the L2 norm. + **err_3d**: Batch of mean algebraic errors of shape `(minibatch,)`. + Specifically, those are squared distances between `x_world` and + estimated points on the rays defined by `y`. + + [1] Moreno-Noguer, F., Lepetit, V., & Fua, P. (2009). + EPnP: An Accurate O(n) solution to the PnP problem. + International Journal of Computer Vision. + https://www.epfl.ch/labs/cvlab/software/multi-view-stereo/epnp/ + """ + # define control points in a world coordinate system (centered on the 3d + # points centroid); 4 x 3 + # TODO: more stable when initialised with the center and eigenvectors! + c_world = _define_control_points( + x.detach(), weights, storage_opts={"dtype": x.dtype, "device": x.device} + ) + + # find the linear combination of the control points to represent the 3d points + alphas = _compute_alphas(x, c_world) + + M = _build_M(y, alphas, weights) + + # Compute kernel M + kernel, spectrum = _null_space(M, 4) + + c_world_distances = _gen_pairs(c_world) + kernel_dsts = _kernel_vec_distances(kernel) + + betas = ( + [] + if skip_quadratic_eq + else [ + fnsc(kernel_dsts, c_world_distances) + for fnsc in [ + _find_null_space_coords_1, + _find_null_space_coords_2, + _find_null_space_coords_3, + ] + ] + ) + + c_cam_variants = [kernel] + [ + torch.matmul(kernel, beta[:, None, :, :]) for beta in betas + ] + + solutions = [ + _compute_norm_sign_scaling_factor(c_cam[..., 0], alphas, x, y, weights) + for c_cam in c_cam_variants + ] + + sol_zipped = EpnpSolution(*(torch.stack(list(col)) for col in zip(*solutions))) + best = torch.argmin(sol_zipped.err_2d, dim=0) + + def gather1d(source, idx): + # reduces the dim=1 by picking the slices in a 1D tensor idx + # in other words, it is batched index_select. + return source.gather( + 0, + idx.reshape(1, -1, *([1] * (len(source.shape) - 2))).expand_as(source[:1]), + )[0] + + return EpnpSolution(*[gather1d(sol_col, best) for sol_col in sol_zipped]) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/points_alignment.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/points_alignment.py new file mode 100644 index 0000000000000000000000000000000000000000..96e4b410a772d7a26df663580d4f47974b8cbdbb --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/points_alignment.py @@ -0,0 +1,391 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings +from typing import List, NamedTuple, Optional, TYPE_CHECKING, Union + +import torch +from pytorch3d.ops import knn_points +from pytorch3d.structures import utils as strutil + +from . import utils as oputil + + +if TYPE_CHECKING: + from pytorch3d.structures.pointclouds import Pointclouds + + +# named tuples for inputs/outputs +class SimilarityTransform(NamedTuple): + R: torch.Tensor + T: torch.Tensor + s: torch.Tensor + + +class ICPSolution(NamedTuple): + converged: bool + rmse: Union[torch.Tensor, None] + Xt: torch.Tensor + RTs: SimilarityTransform + t_history: List[SimilarityTransform] + + +def iterative_closest_point( + X: Union[torch.Tensor, "Pointclouds"], + Y: Union[torch.Tensor, "Pointclouds"], + init_transform: Optional[SimilarityTransform] = None, + max_iterations: int = 100, + relative_rmse_thr: float = 1e-6, + estimate_scale: bool = False, + allow_reflection: bool = False, + verbose: bool = False, +) -> ICPSolution: + """ + Executes the iterative closest point (ICP) algorithm [1, 2] in order to find + a similarity transformation (rotation `R`, translation `T`, and + optionally scale `s`) between two given differently-sized sets of + `d`-dimensional points `X` and `Y`, such that: + + `s[i] X[i] R[i] + T[i] = Y[NN[i]]`, + + for all batch indices `i` in the least squares sense. Here, Y[NN[i]] stands + for the indices of nearest neighbors from `Y` to each point in `X`. + Note, however, that the solution is only a local optimum. + + Args: + **X**: Batch of `d`-dimensional points + of shape `(minibatch, num_points_X, d)` or a `Pointclouds` object. + **Y**: Batch of `d`-dimensional points + of shape `(minibatch, num_points_Y, d)` or a `Pointclouds` object. + **init_transform**: A named-tuple `SimilarityTransform` of tensors + `R`, `T, `s`, where `R` is a batch of orthonormal matrices of + shape `(minibatch, d, d)`, `T` is a batch of translations + of shape `(minibatch, d)` and `s` is a batch of scaling factors + of shape `(minibatch,)`. + **max_iterations**: The maximum number of ICP iterations. + **relative_rmse_thr**: A threshold on the relative root mean squared error + used to terminate the algorithm. + **estimate_scale**: If `True`, also estimates a scaling component `s` + of the transformation. Otherwise assumes the identity + scale and returns a tensor of ones. + **allow_reflection**: If `True`, allows the algorithm to return `R` + which is orthonormal but has determinant==-1. + **verbose**: If `True`, prints status messages during each ICP iteration. + + Returns: + A named tuple `ICPSolution` with the following fields: + **converged**: A boolean flag denoting whether the algorithm converged + successfully (=`True`) or not (=`False`). + **rmse**: Attained root mean squared error after termination of ICP. + **Xt**: The point cloud `X` transformed with the final transformation + (`R`, `T`, `s`). If `X` is a `Pointclouds` object, returns an + instance of `Pointclouds`, otherwise returns `torch.Tensor`. + **RTs**: A named tuple `SimilarityTransform` containing + a batch of similarity transforms with fields: + **R**: Batch of orthonormal matrices of shape `(minibatch, d, d)`. + **T**: Batch of translations of shape `(minibatch, d)`. + **s**: batch of scaling factors of shape `(minibatch, )`. + **t_history**: A list of named tuples `SimilarityTransform` + the transformation parameters after each ICP iteration. + + References: + [1] Besl & McKay: A Method for Registration of 3-D Shapes. TPAMI, 1992. + [2] https://en.wikipedia.org/wiki/Iterative_closest_point + """ + + # make sure we convert input Pointclouds structures to + # padded tensors of shape (N, P, 3) + Xt, num_points_X = oputil.convert_pointclouds_to_tensor(X) + Yt, num_points_Y = oputil.convert_pointclouds_to_tensor(Y) + + b, size_X, dim = Xt.shape + + if (Xt.shape[2] != Yt.shape[2]) or (Xt.shape[0] != Yt.shape[0]): + raise ValueError( + "Point sets X and Y have to have the same " + + "number of batches and data dimensions." + ) + + if ((num_points_Y < Yt.shape[1]).any() or (num_points_X < Xt.shape[1]).any()) and ( + num_points_Y != num_points_X + ).any(): + # we have a heterogeneous input (e.g. because X/Y is + # an instance of Pointclouds) + mask_X = ( + torch.arange(size_X, dtype=torch.int64, device=Xt.device)[None] + < num_points_X[:, None] + ).type_as(Xt) + else: + mask_X = Xt.new_ones(b, size_X) + + # clone the initial point cloud + Xt_init = Xt.clone() + + if init_transform is not None: + # parse the initial transform from the input and apply to Xt + try: + R, T, s = init_transform + assert ( + R.shape == torch.Size((b, dim, dim)) + and T.shape == torch.Size((b, dim)) + and s.shape == torch.Size((b,)) + ) + except Exception: + raise ValueError( + "The initial transformation init_transform has to be " + "a named tuple SimilarityTransform with elements (R, T, s). " + "R are dim x dim orthonormal matrices of shape " + "(minibatch, dim, dim), T is a batch of dim-dimensional " + "translations of shape (minibatch, dim) and s is a batch " + "of scalars of shape (minibatch,)." + ) from None + # apply the init transform to the input point cloud + Xt = _apply_similarity_transform(Xt, R, T, s) + else: + # initialize the transformation with identity + R = oputil.eyes(dim, b, device=Xt.device, dtype=Xt.dtype) + T = Xt.new_zeros((b, dim)) + s = Xt.new_ones(b) + + prev_rmse = None + rmse = None + iteration = -1 + converged = False + + # initialize the transformation history + t_history = [] + + # the main loop over ICP iterations + for iteration in range(max_iterations): + Xt_nn_points = knn_points( + Xt, Yt, lengths1=num_points_X, lengths2=num_points_Y, K=1, return_nn=True + ).knn[:, :, 0, :] + + # get the alignment of the nearest neighbors from Yt with Xt_init + R, T, s = corresponding_points_alignment( + Xt_init, + Xt_nn_points, + weights=mask_X, + estimate_scale=estimate_scale, + allow_reflection=allow_reflection, + ) + + # apply the estimated similarity transform to Xt_init + Xt = _apply_similarity_transform(Xt_init, R, T, s) + + # add the current transformation to the history + t_history.append(SimilarityTransform(R, T, s)) + + # compute the root mean squared error + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + Xt_sq_diff = ((Xt - Xt_nn_points) ** 2).sum(2) + rmse = oputil.wmean(Xt_sq_diff[:, :, None], mask_X).sqrt()[:, 0, 0] + + # compute the relative rmse + if prev_rmse is None: + relative_rmse = rmse.new_ones(b) + else: + relative_rmse = (prev_rmse - rmse) / prev_rmse + + if verbose: + rmse_msg = ( + f"ICP iteration {iteration}: mean/max rmse = " + + f"{rmse.mean():1.2e}/{rmse.max():1.2e} " + + f"; mean relative rmse = {relative_rmse.mean():1.2e}" + ) + print(rmse_msg) + + # check for convergence + if (relative_rmse <= relative_rmse_thr).all(): + converged = True + break + + # update the previous rmse + prev_rmse = rmse + + if verbose: + if converged: + print(f"ICP has converged in {iteration + 1} iterations.") + else: + print(f"ICP has not converged in {max_iterations} iterations.") + + if oputil.is_pointclouds(X): + Xt = X.update_padded(Xt) # type: ignore + + return ICPSolution(converged, rmse, Xt, SimilarityTransform(R, T, s), t_history) + + +# threshold for checking that point crosscorelation +# is full rank in corresponding_points_alignment +AMBIGUOUS_ROT_SINGULAR_THR = 1e-15 + + +def corresponding_points_alignment( + X: Union[torch.Tensor, "Pointclouds"], + Y: Union[torch.Tensor, "Pointclouds"], + weights: Union[torch.Tensor, List[torch.Tensor], None] = None, + estimate_scale: bool = False, + allow_reflection: bool = False, + eps: float = 1e-9, +) -> SimilarityTransform: + """ + Finds a similarity transformation (rotation `R`, translation `T` + and optionally scale `s`) between two given sets of corresponding + `d`-dimensional points `X` and `Y` such that: + + `s[i] X[i] R[i] + T[i] = Y[i]`, + + for all batch indexes `i` in the least squares sense. + + The algorithm is also known as Umeyama [1]. + + Args: + **X**: Batch of `d`-dimensional points of shape `(minibatch, num_point, d)` + or a `Pointclouds` object. + **Y**: Batch of `d`-dimensional points of shape `(minibatch, num_point, d)` + or a `Pointclouds` object. + **weights**: Batch of non-negative weights of + shape `(minibatch, num_point)` or list of `minibatch` 1-dimensional + tensors that may have different shapes; in that case, the length of + i-th tensor should be equal to the number of points in X_i and Y_i. + Passing `None` means uniform weights. + **estimate_scale**: If `True`, also estimates a scaling component `s` + of the transformation. Otherwise assumes an identity + scale and returns a tensor of ones. + **allow_reflection**: If `True`, allows the algorithm to return `R` + which is orthonormal but has determinant==-1. + **eps**: A scalar for clamping to avoid dividing by zero. Active for the + code that estimates the output scale `s`. + + Returns: + 3-element named tuple `SimilarityTransform` containing + - **R**: Batch of orthonormal matrices of shape `(minibatch, d, d)`. + - **T**: Batch of translations of shape `(minibatch, d)`. + - **s**: batch of scaling factors of shape `(minibatch, )`. + + References: + [1] Shinji Umeyama: Least-Suqares Estimation of + Transformation Parameters Between Two Point Patterns + """ + + # make sure we convert input Pointclouds structures to tensors + Xt, num_points = oputil.convert_pointclouds_to_tensor(X) + Yt, num_points_Y = oputil.convert_pointclouds_to_tensor(Y) + + if (Xt.shape != Yt.shape) or (num_points != num_points_Y).any(): + raise ValueError( + "Point sets X and Y have to have the same \ + number of batches, points and dimensions." + ) + if weights is not None: + if isinstance(weights, list): + if any(np != w.shape[0] for np, w in zip(num_points, weights)): + raise ValueError( + "number of weights should equal to the " + + "number of points in the point cloud." + ) + weights = [w[..., None] for w in weights] + weights = strutil.list_to_padded(weights)[..., 0] + + if Xt.shape[:2] != weights.shape: + raise ValueError("weights should have the same first two dimensions as X.") + + b, n, dim = Xt.shape + + if (num_points < Xt.shape[1]).any() or (num_points < Yt.shape[1]).any(): + # in case we got Pointclouds as input, mask the unused entries in Xc, Yc + mask = ( + torch.arange(n, dtype=torch.int64, device=Xt.device)[None] + < num_points[:, None] + ).type_as(Xt) + weights = mask if weights is None else mask * weights.type_as(Xt) + + # compute the centroids of the point sets + Xmu = oputil.wmean(Xt, weight=weights, eps=eps) + Ymu = oputil.wmean(Yt, weight=weights, eps=eps) + + # mean-center the point sets + Xc = Xt - Xmu + Yc = Yt - Ymu + + total_weight = torch.clamp(num_points, 1) + # special handling for heterogeneous point clouds and/or input weights + if weights is not None: + Xc *= weights[:, :, None] + Yc *= weights[:, :, None] + total_weight = torch.clamp(weights.sum(1), eps) + + if (num_points < (dim + 1)).any(): + warnings.warn( + "The size of one of the point clouds is <= dim+1. " + + "corresponding_points_alignment cannot return a unique rotation." + ) + + # compute the covariance XYcov between the point sets Xc, Yc + XYcov = torch.bmm(Xc.transpose(2, 1), Yc) + XYcov = XYcov / total_weight[:, None, None] + + # decompose the covariance matrix XYcov + U, S, V = torch.svd(XYcov) + + # catch ambiguous rotation by checking the magnitude of singular values + if (S.abs() <= AMBIGUOUS_ROT_SINGULAR_THR).any() and not ( + num_points < (dim + 1) + ).any(): + warnings.warn( + "Excessively low rank of " + + "cross-correlation between aligned point clouds. " + + "corresponding_points_alignment cannot return a unique rotation." + ) + + # identity matrix used for fixing reflections + E = torch.eye(dim, dtype=XYcov.dtype, device=XYcov.device)[None].repeat(b, 1, 1) + + if not allow_reflection: + # reflection test: + # checks whether the estimated rotation has det==1, + # if not, finds the nearest rotation s.t. det==1 by + # flipping the sign of the last singular vector U + R_test = torch.bmm(U, V.transpose(2, 1)) + E[:, -1, -1] = torch.det(R_test) + + # find the rotation matrix by composing U and V again + R = torch.bmm(torch.bmm(U, E), V.transpose(2, 1)) + + if estimate_scale: + # estimate the scaling component of the transformation + trace_ES = (torch.diagonal(E, dim1=1, dim2=2) * S).sum(1) + Xcov = (Xc * Xc).sum((1, 2)) / total_weight + + # the scaling component + s = trace_ES / torch.clamp(Xcov, eps) + + # translation component + T = Ymu[:, 0, :] - s[:, None] * torch.bmm(Xmu, R)[:, 0, :] + else: + # translation component + T = Ymu[:, 0, :] - torch.bmm(Xmu, R)[:, 0, :] + + # unit scaling since we do not estimate scale + s = T.new_ones(b) + + return SimilarityTransform(R, T, s) + + +def _apply_similarity_transform( + X: torch.Tensor, R: torch.Tensor, T: torch.Tensor, s: torch.Tensor +) -> torch.Tensor: + """ + Applies a similarity transformation parametrized with a batch of orthonormal + matrices `R` of shape `(minibatch, d, d)`, a batch of translations `T` + of shape `(minibatch, d)` and a batch of scaling factors `s` + of shape `(minibatch,)` to a given `d`-dimensional cloud `X` + of shape `(minibatch, num_points, d)` + """ + X = s[:, None, None] * torch.bmm(X, R) + T[:, None, :] + return X diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/points_normals.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/points_normals.py new file mode 100644 index 0000000000000000000000000000000000000000..f4d3b27ce1284e1af38b64596366019a5b4cc962 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/points_normals.py @@ -0,0 +1,191 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Tuple, TYPE_CHECKING, Union + +import torch +from pytorch3d.common.workaround import symeig3x3 + +from .utils import convert_pointclouds_to_tensor, get_point_covariances + + +if TYPE_CHECKING: + from ..structures import Pointclouds + + +def estimate_pointcloud_normals( + pointclouds: Union[torch.Tensor, "Pointclouds"], + neighborhood_size: int = 50, + disambiguate_directions: bool = True, + *, + use_symeig_workaround: bool = True, +) -> torch.Tensor: + """ + Estimates the normals of a batch of `pointclouds`. + + The function uses `estimate_pointcloud_local_coord_frames` to estimate + the normals. Please refer to that function for more detailed information. + + Args: + **pointclouds**: Batch of 3-dimensional points of shape + `(minibatch, num_point, 3)` or a `Pointclouds` object. + **neighborhood_size**: The size of the neighborhood used to estimate the + geometry around each point. + **disambiguate_directions**: If `True`, uses the algorithm from [1] to + ensure sign consistency of the normals of neighboring points. + **use_symeig_workaround**: If `True`, uses a custom eigenvalue + calculation. + + Returns: + **normals**: A tensor of normals for each input point + of shape `(minibatch, num_point, 3)`. + If `pointclouds` are of `Pointclouds` class, returns a padded tensor. + + References: + [1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for + Local Surface Description, ECCV 2010. + """ + + curvatures, local_coord_frames = estimate_pointcloud_local_coord_frames( + pointclouds, + neighborhood_size=neighborhood_size, + disambiguate_directions=disambiguate_directions, + use_symeig_workaround=use_symeig_workaround, + ) + + # the normals correspond to the first vector of each local coord frame + normals = local_coord_frames[:, :, :, 0] + + return normals + + +def estimate_pointcloud_local_coord_frames( + pointclouds: Union[torch.Tensor, "Pointclouds"], + neighborhood_size: int = 50, + disambiguate_directions: bool = True, + *, + use_symeig_workaround: bool = True, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Estimates the principal directions of curvature (which includes normals) + of a batch of `pointclouds`. + + The algorithm first finds `neighborhood_size` nearest neighbors for each + point of the point clouds, followed by obtaining principal vectors of + covariance matrices of each of the point neighborhoods. + The main principal vector corresponds to the normals, while the + other 2 are the direction of the highest curvature and the 2nd highest + curvature. + + Note that each principal direction is given up to a sign. Hence, + the function implements `disambiguate_directions` switch that allows + to ensure consistency of the sign of neighboring normals. The implementation + follows the sign disabiguation from SHOT descriptors [1]. + + The algorithm also returns the curvature values themselves. + These are the eigenvalues of the estimated covariance matrices + of each point neighborhood. + + Args: + **pointclouds**: Batch of 3-dimensional points of shape + `(minibatch, num_point, 3)` or a `Pointclouds` object. + **neighborhood_size**: The size of the neighborhood used to estimate the + geometry around each point. + **disambiguate_directions**: If `True`, uses the algorithm from [1] to + ensure sign consistency of the normals of neighboring points. + **use_symeig_workaround**: If `True`, uses a custom eigenvalue + calculation. + + Returns: + **curvatures**: The three principal curvatures of each point + of shape `(minibatch, num_point, 3)`. + If `pointclouds` are of `Pointclouds` class, returns a padded tensor. + **local_coord_frames**: The three principal directions of the curvature + around each point of shape `(minibatch, num_point, 3, 3)`. + The principal directions are stored in columns of the output. + E.g. `local_coord_frames[i, j, :, 0]` is the normal of + `j`-th point in the `i`-th pointcloud. + If `pointclouds` are of `Pointclouds` class, returns a padded tensor. + + References: + [1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for + Local Surface Description, ECCV 2010. + """ + + points_padded, num_points = convert_pointclouds_to_tensor(pointclouds) + + ba, N, dim = points_padded.shape + if dim != 3: + raise ValueError( + "The pointclouds argument has to be of shape (minibatch, N, 3)" + ) + + if (num_points <= neighborhood_size).any(): + raise ValueError( + "The neighborhood_size argument has to be" + + " >= size of each of the point clouds." + ) + + # undo global mean for stability + # TODO: replace with tutil.wmean once landed + pcl_mean = points_padded.sum(1) / num_points[:, None] + points_centered = points_padded - pcl_mean[:, None, :] + + # get the per-point covariance and nearest neighbors used to compute it + cov, knns = get_point_covariances(points_centered, num_points, neighborhood_size) + + # get the local coord frames as principal directions of + # the per-point covariance + # this is done with torch.symeig / torch.linalg.eigh, which returns the + # eigenvectors (=principal directions) in an ascending order of their + # corresponding eigenvalues, and the smallest eigenvalue's eigenvector + # corresponds to the normal direction; or with a custom equivalent. + if use_symeig_workaround: + curvatures, local_coord_frames = symeig3x3(cov, eigenvectors=True) + else: + curvatures, local_coord_frames = torch.linalg.eigh(cov) + + # disambiguate the directions of individual principal vectors + if disambiguate_directions: + # disambiguate normal + n = _disambiguate_vector_directions( + points_centered, knns, local_coord_frames[:, :, :, 0] + ) + # disambiguate the main curvature + z = _disambiguate_vector_directions( + points_centered, knns, local_coord_frames[:, :, :, 2] + ) + # the secondary curvature is just a cross between n and z + y = torch.cross(n, z, dim=2) + # cat to form the set of principal directions + local_coord_frames = torch.stack((n, y, z), dim=3) + + return curvatures, local_coord_frames + + +def _disambiguate_vector_directions(pcl, knns, vecs: torch.Tensor) -> torch.Tensor: + """ + Disambiguates normal directions according to [1]. + + References: + [1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for + Local Surface Description, ECCV 2010. + """ + # parse out K from the shape of knns + K = knns.shape[2] + # the difference between the mean of each neighborhood and + # each element of the neighborhood + df = knns - pcl[:, :, None] + # projection of the difference on the principal direction + proj = (vecs[:, :, None] * df).sum(3) + # check how many projections are positive + n_pos = (proj > 0).type_as(knns).sum(2, keepdim=True) + # flip the principal directions where number of positive correlations + flip = (n_pos < (0.5 * K)).type_as(knns) + vecs = (1.0 - 2.0 * flip) * vecs + return vecs diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/points_to_volumes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/points_to_volumes.py new file mode 100644 index 0000000000000000000000000000000000000000..cebd7cef6d02d6531191edfe058e56946e3d7bd6 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/points_to_volumes.py @@ -0,0 +1,771 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Optional, Tuple, TYPE_CHECKING + +import torch +from pytorch3d import _C +from torch.autograd import Function +from torch.autograd.function import once_differentiable + + +if TYPE_CHECKING: + from ..structures import Pointclouds, Volumes + + +class _points_to_volumes_function(Function): + """ + For each point in a pointcloud, add point_weight to the + corresponding volume density and point_weight times its features + to the corresponding volume features. + + This function does not require any contiguity internally and therefore + doesn't need to make copies of its inputs, which is useful when GPU memory + is at a premium. (An implementation requiring contiguous inputs might be faster + though). The volumes are modified in place. + + This function is differentiable with respect to + points_features, volume_densities and volume_features. + If splat is True then it is also differentiable with respect to + points_3d. + + It may be useful to think about this function as a sort of opposite to + torch.nn.functional.grid_sample with 5D inputs. + + Args: + points_3d: Batch of 3D point cloud coordinates of shape + `(minibatch, N, 3)` where N is the number of points + in each point cloud. Coordinates have to be specified in the + local volume coordinates (ranging in [-1, 1]). + points_features: Features of shape `(minibatch, N, feature_dim)` + corresponding to the points of the input point cloud `points_3d`. + volume_features: Batch of input feature volumes + of shape `(minibatch, feature_dim, D, H, W)` + volume_densities: Batch of input feature volume densities + of shape `(minibatch, 1, D, H, W)`. Each voxel should + contain a non-negative number corresponding to its + opaqueness (the higher, the less transparent). + + grid_sizes: `LongTensor` of shape (minibatch, 3) representing the + spatial resolutions of each of the the non-flattened `volumes` + tensors. Note that the following has to hold: + `torch.prod(grid_sizes, dim=1)==N_voxels`. + + point_weight: A scalar controlling how much weight a single point has. + + mask: A binary mask of shape `(minibatch, N)` determining + which 3D points are going to be converted to the resulting + volume. Set to `None` if all points are valid. + + align_corners: as for grid_sample. + + splat: if true, trilinear interpolation. If false all the weight goes in + the nearest voxel. + + Returns: + volume_densities and volume_features, which have been modified in place. + """ + + @staticmethod + # pyre-fixme[14]: `forward` overrides method defined in `Function` inconsistently. + def forward( + ctx, + points_3d: torch.Tensor, + points_features: torch.Tensor, + volume_densities: torch.Tensor, + volume_features: torch.Tensor, + grid_sizes: torch.LongTensor, + point_weight: float, + mask: torch.Tensor, + align_corners: bool, + splat: bool, + ): + + ctx.mark_dirty(volume_densities, volume_features) + + N, P, D = points_3d.shape + if D != 3: + raise ValueError("points_3d must be 3D") + if points_3d.dtype != torch.float32: + raise ValueError("points_3d must be float32") + if points_features.dtype != torch.float32: + raise ValueError("points_features must be float32") + N1, P1, C = points_features.shape + if N1 != N or P1 != P: + raise ValueError("Bad points_features shape") + if volume_densities.dtype != torch.float32: + raise ValueError("volume_densities must be float32") + N2, one, D, H, W = volume_densities.shape + if N2 != N or one != 1: + raise ValueError("Bad volume_densities shape") + if volume_features.dtype != torch.float32: + raise ValueError("volume_features must be float32") + N3, C1, D1, H1, W1 = volume_features.shape + if N3 != N or C1 != C or D1 != D or H1 != H or W1 != W: + raise ValueError("Bad volume_features shape") + if grid_sizes.dtype != torch.int64: + raise ValueError("grid_sizes must be int64") + N4, D1 = grid_sizes.shape + if N4 != N or D1 != 3: + raise ValueError("Bad grid_sizes.shape") + if mask.dtype != torch.float32: + raise ValueError("mask must be float32") + N5, P2 = mask.shape + if N5 != N or P2 != P: + raise ValueError("Bad mask shape") + + # pyre-fixme[16]: Module `pytorch3d` has no attribute `_C`. + _C.points_to_volumes_forward( + points_3d, + points_features, + volume_densities, + volume_features, + grid_sizes, + mask, + point_weight, + align_corners, + splat, + ) + if splat: + ctx.save_for_backward(points_3d, points_features, grid_sizes, mask) + else: + ctx.save_for_backward(points_3d, grid_sizes, mask) + ctx.point_weight = point_weight + ctx.splat = splat + ctx.align_corners = align_corners + return volume_densities, volume_features + + @staticmethod + @once_differentiable + def backward(ctx, grad_volume_densities, grad_volume_features): + splat = ctx.splat + N, C = grad_volume_features.shape[:2] + if splat: + points_3d, points_features, grid_sizes, mask = ctx.saved_tensors + P = points_3d.shape[1] + grad_points_3d = torch.zeros_like(points_3d) + else: + points_3d, grid_sizes, mask = ctx.saved_tensors + P = points_3d.shape[1] + ones = points_3d.new_zeros(1, 1, 1) + # There is no gradient. Just need something to let its accessors exist. + grad_points_3d = ones.expand_as(points_3d) + # points_features not needed. Just need something to let its accessors exist. + points_features = ones.expand(N, P, C) + grad_points_features = points_3d.new_zeros(N, P, C) + _C.points_to_volumes_backward( + points_3d, + points_features, + grid_sizes, + mask, + ctx.point_weight, + ctx.align_corners, + splat, + grad_volume_densities, + grad_volume_features, + grad_points_3d, + grad_points_features, + ) + + return ( + (grad_points_3d if splat else None), + grad_points_features, + grad_volume_densities, + grad_volume_features, + None, + None, + None, + None, + None, + ) + + +_points_to_volumes = _points_to_volumes_function.apply + + +def add_pointclouds_to_volumes( + pointclouds: "Pointclouds", + initial_volumes: "Volumes", + mode: str = "trilinear", + min_weight: float = 1e-4, + rescale_features: bool = True, + _python: bool = False, +) -> "Volumes": + """ + Add a batch of point clouds represented with a `Pointclouds` structure + `pointclouds` to a batch of existing volumes represented with a + `Volumes` structure `initial_volumes`. + + More specifically, the method casts a set of weighted votes (the weights are + determined based on `mode="trilinear"|"nearest"`) into the pre-initialized + `features` and `densities` fields of `initial_volumes`. + + The method returns an updated `Volumes` object that contains a copy + of `initial_volumes` with its `features` and `densities` updated with the + result of the pointcloud addition. + + Example:: + + # init a random point cloud + pointclouds = Pointclouds( + points=torch.randn(4, 100, 3), features=torch.rand(4, 100, 5) + ) + # init an empty volume centered around [0.5, 0.5, 0.5] in world coordinates + # with a voxel size of 1.0. + initial_volumes = Volumes( + features = torch.zeros(4, 5, 25, 25, 25), + densities = torch.zeros(4, 1, 25, 25, 25), + volume_translation = [-0.5, -0.5, -0.5], + voxel_size = 1.0, + ) + # add the pointcloud to the 'initial_volumes' buffer using + # trilinear splatting + updated_volumes = add_pointclouds_to_volumes( + pointclouds=pointclouds, + initial_volumes=initial_volumes, + mode="trilinear", + ) + + Args: + pointclouds: Batch of 3D pointclouds represented with a `Pointclouds` + structure. Note that `pointclouds.features` have to be defined. + initial_volumes: Batch of initial `Volumes` with pre-initialized 1-dimensional + densities which contain non-negative numbers corresponding to the + opaqueness of each voxel (the higher, the less transparent). + mode: The mode of the conversion of individual points into the volume. + Set either to `nearest` or `trilinear`: + `nearest`: Each 3D point is first rounded to the volumetric + lattice. Each voxel is then labeled with the average + over features that fall into the given voxel. + The gradients of nearest neighbor conversion w.r.t. the + 3D locations of the points in `pointclouds` are *not* defined. + `trilinear`: Each 3D point casts 8 weighted votes to the 8-neighborhood + of its floating point coordinate. The weights are + determined using a trilinear interpolation scheme. + Trilinear splatting is fully differentiable w.r.t. all input arguments. + min_weight: A scalar controlling the lowest possible total per-voxel + weight used to normalize the features accumulated in a voxel. + Only active for `mode==trilinear`. + rescale_features: If False, output features are just the sum of input and + added points. If True, they are averaged. In both cases, + output densities are just summed without rescaling, so + you may need to rescale them afterwards. + _python: Set to True to use a pure Python implementation, e.g. for test + purposes, which requires more memory and may be slower. + + Returns: + updated_volumes: Output `Volumes` structure containing the conversion result. + """ + + if len(initial_volumes) != len(pointclouds): + raise ValueError( + "'initial_volumes' and 'pointclouds' have to have the same batch size." + ) + + # obtain the features and densities + pcl_feats = pointclouds.features_padded() + pcl_3d = pointclouds.points_padded() + + if pcl_feats is None: + raise ValueError("'pointclouds' have to have their 'features' defined.") + + # obtain the conversion mask + n_per_pcl = pointclouds.num_points_per_cloud().type_as(pcl_feats) + # pyre-fixme[6]: For 1st param expected `Union[bool, float, int]` but got `Tensor`. + mask = torch.arange(n_per_pcl.max(), dtype=pcl_feats.dtype, device=pcl_feats.device) + mask = (mask[None, :] < n_per_pcl[:, None]).type_as(mask) + + # convert to the coord frame of the volume + pcl_3d_local = initial_volumes.world_to_local_coords(pcl_3d) + + features_new, densities_new = add_points_features_to_volume_densities_features( + points_3d=pcl_3d_local, + points_features=pcl_feats, + volume_features=initial_volumes.features(), + volume_densities=initial_volumes.densities(), + min_weight=min_weight, + grid_sizes=initial_volumes.get_grid_sizes(), + mask=mask, + mode=mode, + rescale_features=rescale_features, + align_corners=initial_volumes.get_align_corners(), + _python=_python, + ) + + return initial_volumes.update_padded( + new_densities=densities_new, new_features=features_new + ) + + +def add_points_features_to_volume_densities_features( + points_3d: torch.Tensor, + points_features: torch.Tensor, + volume_densities: torch.Tensor, + volume_features: Optional[torch.Tensor], + mode: str = "trilinear", + min_weight: float = 1e-4, + mask: Optional[torch.Tensor] = None, + grid_sizes: Optional[torch.LongTensor] = None, + rescale_features: bool = True, + _python: bool = False, + align_corners: bool = True, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Convert a batch of point clouds represented with tensors of per-point + 3d coordinates and their features to a batch of volumes represented + with tensors of densities and features. + + Args: + points_3d: Batch of 3D point cloud coordinates of shape + `(minibatch, N, 3)` where N is the number of points + in each point cloud. Coordinates have to be specified in the + local volume coordinates (ranging in [-1, 1]). + points_features: Features of shape `(minibatch, N, feature_dim)` corresponding + to the points of the input point clouds `pointcloud`. + volume_densities: Batch of input feature volume densities of shape + `(minibatch, 1, D, H, W)`. Each voxel should + contain a non-negative number corresponding to its + opaqueness (the higher, the less transparent). + volume_features: Batch of input feature volumes of shape + `(minibatch, feature_dim, D, H, W)` + If set to `None`, the `volume_features` will be automatically + instantiated with a correct size and filled with 0s. + mode: The mode of the conversion of individual points into the volume. + Set either to `nearest` or `trilinear`: + `nearest`: Each 3D point is first rounded to the volumetric + lattice. Each voxel is then labeled with the average + over features that fall into the given voxel. + The gradients of nearest neighbor rounding w.r.t. the + input point locations `points_3d` are *not* defined. + `trilinear`: Each 3D point casts 8 weighted votes to the 8-neighborhood + of its floating point coordinate. The weights are + determined using a trilinear interpolation scheme. + Trilinear splatting is fully differentiable w.r.t. all input arguments. + min_weight: A scalar controlling the lowest possible total per-voxel + weight used to normalize the features accumulated in a voxel. + Only active for `mode==trilinear`. + mask: A binary mask of shape `(minibatch, N)` determining which 3D points + are going to be converted to the resulting volume. + Set to `None` if all points are valid. + grid_sizes: `LongTensor` of shape (minibatch, 3) representing the + spatial resolutions of each of the the non-flattened `volumes` tensors, + or None to indicate the whole volume is used for every batch element. + rescale_features: If False, output features are just the sum of input and + added points. If True, they are averaged. In both cases, + output densities are just summed without rescaling, so + you may need to rescale them afterwards. + _python: Set to True to use a pure Python implementation. + align_corners: as for grid_sample. + Returns: + volume_features: Output volume of shape `(minibatch, feature_dim, D, H, W)` + volume_densities: Occupancy volume of shape `(minibatch, 1, D, H, W)` + containing the total amount of votes cast to each of the voxels. + """ + + # number of points in the point cloud, its dim and batch size + ba, n_points, feature_dim = points_features.shape + ba_volume, density_dim = volume_densities.shape[:2] + + if density_dim != 1: + raise ValueError("Only one-dimensional densities are allowed.") + + # init the volumetric grid sizes if uninitialized + if grid_sizes is None: + # grid sizes shape (minibatch, 3) + grid_sizes = ( + torch.LongTensor(list(volume_densities.shape[2:])) + .to(volume_densities.device) + .expand(volume_densities.shape[0], 3) + ) + + if _python: + return _add_points_features_to_volume_densities_features_python( + points_3d=points_3d, + points_features=points_features, + volume_densities=volume_densities, + volume_features=volume_features, + mode=mode, + min_weight=min_weight, + mask=mask, + # pyre-fixme[6]: For 8th param expected `LongTensor` but got `Tensor`. + grid_sizes=grid_sizes, + ) + + if mode == "trilinear": + splat = True + elif mode == "nearest": + splat = False + else: + raise ValueError('No such interpolation mode "%s"' % mode) + + if mask is None: + mask = points_3d.new_ones(1).expand(points_3d.shape[:2]) + + volume_densities, volume_features = _points_to_volumes( + points_3d, + points_features, + volume_densities, + volume_features, + grid_sizes, + 1.0, # point_weight + mask, + align_corners, # align_corners + splat, + ) + + if rescale_features: + # divide each feature by the total weight of the votes + if splat: + volume_features = volume_features / volume_densities.clamp(min_weight) + else: + volume_features = volume_features / volume_densities.clamp(1.0) + + return volume_features, volume_densities + + +def _add_points_features_to_volume_densities_features_python( + *, + points_3d: torch.Tensor, + points_features: torch.Tensor, + volume_densities: torch.Tensor, + volume_features: Optional[torch.Tensor], + mode: str, + min_weight: float, + mask: Optional[torch.Tensor], + grid_sizes: torch.LongTensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Python implementation for add_points_features_to_volume_densities_features. + + Returns: + volume_features: Output volume of shape `(minibatch, feature_dim, D, H, W)` + volume_densities: Occupancy volume of shape `(minibatch, 1, D, H, W)` + containing the total amount of votes cast to each of the voxels. + """ + ba, n_points, feature_dim = points_features.shape + + # flatten densities and features + v_shape = volume_densities.shape[2:] + volume_densities_flatten = volume_densities.view(ba, -1, 1) + n_voxels = volume_densities_flatten.shape[1] + + if volume_features is None: + # initialize features if not passed in + volume_features_flatten = volume_densities.new_zeros(ba, feature_dim, n_voxels) + else: + # otherwise just flatten + volume_features_flatten = volume_features.view(ba, feature_dim, n_voxels) + + if mode == "trilinear": # do the splatting (trilinear interp) + volume_features, volume_densities = _splat_points_to_volumes( + points_3d, + points_features, + volume_densities_flatten, + volume_features_flatten, + grid_sizes, + mask=mask, + min_weight=min_weight, + ) + elif mode == "nearest": # nearest neighbor interp + volume_features, volume_densities = _round_points_to_volumes( + points_3d, + points_features, + volume_densities_flatten, + volume_features_flatten, + grid_sizes, + mask=mask, + ) + else: + raise ValueError('No such interpolation mode "%s"' % mode) + + # reshape into the volume shape + volume_features = volume_features.view(ba, feature_dim, *v_shape) + volume_densities = volume_densities.view(ba, 1, *v_shape) + return volume_features, volume_densities + + +def _check_points_to_volumes_inputs( + points_3d: torch.Tensor, + points_features: torch.Tensor, + volume_densities: torch.Tensor, + volume_features: torch.Tensor, + grid_sizes: torch.LongTensor, + mask: Optional[torch.Tensor] = None, +) -> None: + + max_grid_size = grid_sizes.max(dim=0).values + if torch.prod(max_grid_size) > volume_densities.shape[1]: + raise ValueError( + "One of the grid sizes corresponds to a larger number" + + " of elements than the number of elements in volume_densities." + ) + + _, n_voxels, density_dim = volume_densities.shape + + if density_dim != 1: + raise ValueError("Only one-dimensional densities are allowed.") + + ba, n_points, feature_dim = points_features.shape + + if volume_features.shape[1] != feature_dim: + raise ValueError( + "volume_features have a different number of channels" + + " than points_features." + ) + + if volume_features.shape[2] != n_voxels: + raise ValueError( + "volume_features have a different number of elements" + + " than volume_densities." + ) + + +def _splat_points_to_volumes( + points_3d: torch.Tensor, + points_features: torch.Tensor, + volume_densities: torch.Tensor, + volume_features: torch.Tensor, + grid_sizes: torch.LongTensor, + min_weight: float = 1e-4, + mask: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Convert a batch of point clouds to a batch of volumes using trilinear + splatting into a volume. + + Args: + points_3d: Batch of 3D point cloud coordinates of shape + `(minibatch, N, 3)` where N is the number of points + in each point cloud. Coordinates have to be specified in the + local volume coordinates (ranging in [-1, 1]). + points_features: Features of shape `(minibatch, N, feature_dim)` + corresponding to the points of the input point cloud `points_3d`. + volume_features: Batch of input *flattened* feature volumes + of shape `(minibatch, feature_dim, N_voxels)` + volume_densities: Batch of input *flattened* feature volume densities + of shape `(minibatch, N_voxels, 1)`. Each voxel should + contain a non-negative number corresponding to its + opaqueness (the higher, the less transparent). + grid_sizes: `LongTensor` of shape (minibatch, 3) representing the + spatial resolutions of each of the the non-flattened `volumes` tensors. + Note that the following has to hold: + `torch.prod(grid_sizes, dim=1)==N_voxels` + min_weight: A scalar controlling the lowest possible total per-voxel + weight used to normalize the features accumulated in a voxel. + mask: A binary mask of shape `(minibatch, N)` determining which 3D points + are going to be converted to the resulting volume. + Set to `None` if all points are valid. + Returns: + volume_features: Output volume of shape `(minibatch, D, N_voxels)`. + volume_densities: Occupancy volume of shape `(minibatch, 1, N_voxels)` + containing the total amount of votes cast to each of the voxels. + """ + + _check_points_to_volumes_inputs( + points_3d, + points_features, + volume_densities, + volume_features, + grid_sizes, + mask=mask, + ) + + _, n_voxels, density_dim = volume_densities.shape + ba, n_points, feature_dim = points_features.shape + + # minibatch x n_points x feature_dim -> minibatch x feature_dim x n_points + points_features = points_features.permute(0, 2, 1).contiguous() + + # XYZ = the upper-left volume index of the 8-neighborhood of every point + # grid_sizes is of the form (minibatch, depth-height-width) + grid_sizes_xyz = grid_sizes[:, [2, 1, 0]] + + # Convert from points_3d in the range [-1, 1] to + # indices in the volume grid in the range [0, grid_sizes_xyz-1] + points_3d_indices = ((points_3d + 1) * 0.5) * ( + grid_sizes_xyz[:, None].type_as(points_3d) - 1 + ) + XYZ = points_3d_indices.floor().long() + rXYZ = points_3d_indices - XYZ.type_as(points_3d) # remainder of floor + + # split into separate coordinate vectors + X, Y, Z = XYZ.split(1, dim=2) + # rX = remainder after floor = 1-"the weight of each vote into + # the X coordinate of the 8-neighborhood" + rX, rY, rZ = rXYZ.split(1, dim=2) + + # get random indices for the purpose of adding out-of-bounds values + rand_idx = X.new_zeros(X.shape).random_(0, n_voxels) + + # iterate over the x, y, z indices of the 8-neighborhood (xdiff, ydiff, zdiff) + for xdiff in (0, 1): + X_ = X + xdiff + wX = (1 - xdiff) + (2 * xdiff - 1) * rX + for ydiff in (0, 1): + Y_ = Y + ydiff + wY = (1 - ydiff) + (2 * ydiff - 1) * rY + for zdiff in (0, 1): + Z_ = Z + zdiff + wZ = (1 - zdiff) + (2 * zdiff - 1) * rZ + + # weight of each vote into the given cell of 8-neighborhood + w = wX * wY * wZ + + # valid - binary indicators of votes that fall into the volume + # pyre-fixme[16]: `int` has no attribute `long`. + valid = ( + (0 <= X_) + * (X_ < grid_sizes_xyz[:, None, 0:1]) + * (0 <= Y_) + * (Y_ < grid_sizes_xyz[:, None, 1:2]) + * (0 <= Z_) + * (Z_ < grid_sizes_xyz[:, None, 2:3]) + ).long() + + # linearized indices into the volume + idx = (Z_ * grid_sizes[:, None, 1:2] + Y_) * grid_sizes[ + :, None, 2:3 + ] + X_ + + # out-of-bounds features added to a random voxel idx with weight=0. + idx_valid = idx * valid + rand_idx * (1 - valid) + w_valid = w * valid.type_as(w) + if mask is not None: + # pyre-fixme[6]: For 1st argument expected `Tensor` but got `int`. + w_valid = w_valid * mask.type_as(w)[:, :, None] + + # scatter add casts the votes into the weight accumulator + # and the feature accumulator + # pyre-fixme[6]: For 3rd argument expected `Tensor` but got + # `Union[int, Tensor]`. + volume_densities.scatter_add_(1, idx_valid, w_valid) + + # reshape idx_valid -> (minibatch, feature_dim, n_points) + idx_valid = idx_valid.view(ba, 1, n_points).expand_as(points_features) + # pyre-fixme[16]: Item `int` of `Union[int, Tensor]` has no + # attribute `view`. + w_valid = w_valid.view(ba, 1, n_points) + + # volume_features of shape (minibatch, feature_dim, n_voxels) + volume_features.scatter_add_(2, idx_valid, w_valid * points_features) + + # divide each feature by the total weight of the votes + volume_features = volume_features / volume_densities.view(ba, 1, n_voxels).clamp( + min_weight + ) + + return volume_features, volume_densities + + +def _round_points_to_volumes( + points_3d: torch.Tensor, + points_features: torch.Tensor, + volume_densities: torch.Tensor, + volume_features: torch.Tensor, + grid_sizes: torch.LongTensor, + mask: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Convert a batch of point clouds to a batch of volumes using rounding to the + nearest integer coordinate of the volume. Features that fall into the same + voxel are averaged. + + Args: + points_3d: Batch of 3D point cloud coordinates of shape + `(minibatch, N, 3)` where N is the number of points + in each point cloud. Coordinates have to be specified in the + local volume coordinates (ranging in [-1, 1]). + points_features: Features of shape `(minibatch, N, feature_dim)` + corresponding to the points of the input point cloud `points_3d`. + volume_features: Batch of input *flattened* feature volumes + of shape `(minibatch, feature_dim, N_voxels)` + volume_densities: Batch of input *flattened* feature volume densities + of shape `(minibatch, 1, N_voxels)`. Each voxel should + contain a non-negative number corresponding to its + opaqueness (the higher, the less transparent). + grid_sizes: `LongTensor` of shape (minibatch, 3) representing the + spatial resolutions of each of the the non-flattened `volumes` tensors. + Note that the following has to hold: + `torch.prod(grid_sizes, dim=1)==N_voxels` + mask: A binary mask of shape `(minibatch, N)` determining which 3D points + are going to be converted to the resulting volume. + Set to `None` if all points are valid. + Returns: + volume_features: Output volume of shape `(minibatch, D, N_voxels)`. + volume_densities: Occupancy volume of shape `(minibatch, 1, N_voxels)` + containing the total amount of votes cast to each of the voxels. + """ + + _check_points_to_volumes_inputs( + points_3d, + points_features, + volume_densities, + volume_features, + grid_sizes, + mask=mask, + ) + + _, n_voxels, density_dim = volume_densities.shape + ba, n_points, feature_dim = points_features.shape + + # minibatch x n_points x feature_dim-> minibatch x feature_dim x n_points + points_features = points_features.permute(0, 2, 1).contiguous() + + # round the coordinates to nearest integer + # grid_sizes is of the form (minibatch, depth-height-width) + grid_sizes_xyz = grid_sizes[:, [2, 1, 0]] + XYZ = ((points_3d.detach() + 1) * 0.5) * ( + grid_sizes_xyz[:, None].type_as(points_3d) - 1 + ) + XYZ = torch.round(XYZ).long() + + # split into separate coordinate vectors + X, Y, Z = XYZ.split(1, dim=2) + + # valid - binary indicators of votes that fall into the volume + # pyre-fixme[9]: grid_sizes has type `LongTensor`; used as `Tensor`. + grid_sizes = grid_sizes.type_as(XYZ) + # pyre-fixme[16]: `int` has no attribute `long`. + valid = ( + (0 <= X) + * (X < grid_sizes_xyz[:, None, 0:1]) + * (0 <= Y) + * (Y < grid_sizes_xyz[:, None, 1:2]) + * (0 <= Z) + * (Z < grid_sizes_xyz[:, None, 2:3]) + ).long() + if mask is not None: + valid = valid * mask[:, :, None].long() + + # get random indices for the purpose of adding out-of-bounds values + rand_idx = valid.new_zeros(X.shape).random_(0, n_voxels) + + # linearized indices into the volume + idx = (Z * grid_sizes[:, None, 1:2] + Y) * grid_sizes[:, None, 2:3] + X + + # out-of-bounds features added to a random voxel idx with weight=0. + idx_valid = idx * valid + rand_idx * (1 - valid) + w_valid = valid.type_as(volume_features) + + # scatter add casts the votes into the weight accumulator + # and the feature accumulator + volume_densities.scatter_add_(1, idx_valid, w_valid) + + # reshape idx_valid -> (minibatch, feature_dim, n_points) + idx_valid = idx_valid.view(ba, 1, n_points).expand_as(points_features) + w_valid = w_valid.view(ba, 1, n_points) + + # volume_features of shape (minibatch, feature_dim, n_voxels) + volume_features.scatter_add_(2, idx_valid, w_valid * points_features) + + # divide each feature by the total weight of the votes + volume_features = volume_features / volume_densities.view(ba, 1, n_voxels).clamp( + 1.0 + ) + + return volume_features, volume_densities diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/sample_farthest_points.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/sample_farthest_points.py new file mode 100644 index 0000000000000000000000000000000000000000..a45b1de229d7640fa6fbadb98e036c8f390939c0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/sample_farthest_points.py @@ -0,0 +1,197 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from random import randint +from typing import List, Optional, Tuple, Union + +import torch +from pytorch3d import _C + +from .utils import masked_gather + + +def sample_farthest_points( + points: torch.Tensor, + lengths: Optional[torch.Tensor] = None, + K: Union[int, List, torch.Tensor] = 50, + random_start_point: bool = False, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Iterative farthest point sampling algorithm [1] to subsample a set of + K points from a given pointcloud. At each iteration, a point is selected + which has the largest nearest neighbor distance to any of the + already selected points. + + Farthest point sampling provides more uniform coverage of the input + point cloud compared to uniform random sampling. + + [1] Charles R. Qi et al, "PointNet++: Deep Hierarchical Feature Learning + on Point Sets in a Metric Space", NeurIPS 2017. + + Args: + points: (N, P, D) array containing the batch of pointclouds + lengths: (N,) number of points in each pointcloud (to support heterogeneous + batches of pointclouds) + K: samples required in each sampled point cloud (this is typically << P). If + K is an int then the same number of samples are selected for each + pointcloud in the batch. If K is a tensor is should be length (N,) + giving the number of samples to select for each element in the batch + random_start_point: bool, if True, a random point is selected as the starting + point for iterative sampling. + + Returns: + selected_points: (N, K, D), array of selected values from points. If the input + K is a tensor, then the shape will be (N, max(K), D), and padded with + 0.0 for batch elements where k_i < max(K). + selected_indices: (N, K) array of selected indices. If the input + K is a tensor, then the shape will be (N, max(K), D), and padded with + -1 for batch elements where k_i < max(K). + """ + N, P, D = points.shape + device = points.device + + # Validate inputs + if lengths is None: + lengths = torch.full((N,), P, dtype=torch.int64, device=device) + else: + if lengths.shape != (N,): + raise ValueError("points and lengths must have same batch dimension.") + if lengths.max() > P: + raise ValueError("A value in lengths was too large.") + + # TODO: support providing K as a ratio of the total number of points instead of as an int + if isinstance(K, int): + K = torch.full((N,), K, dtype=torch.int64, device=device) + elif isinstance(K, list): + K = torch.tensor(K, dtype=torch.int64, device=device) + + if K.shape[0] != N: + raise ValueError("K and points must have the same batch dimension") + + # Check dtypes are correct and convert if necessary + if not (points.dtype == torch.float32): + points = points.to(torch.float32) + if not (lengths.dtype == torch.int64): + lengths = lengths.to(torch.int64) + if not (K.dtype == torch.int64): + K = K.to(torch.int64) + + # Generate the starting indices for sampling + start_idxs = torch.zeros_like(lengths) + if random_start_point: + for n in range(N): + # pyre-fixme[6]: For 1st param expected `int` but got `Tensor`. + start_idxs[n] = torch.randint(high=lengths[n], size=(1,)).item() + + with torch.no_grad(): + # pyre-fixme[16]: `pytorch3d_._C` has no attribute `sample_farthest_points`. + idx = _C.sample_farthest_points(points, lengths, K, start_idxs) + sampled_points = masked_gather(points, idx) + + return sampled_points, idx + + +def sample_farthest_points_naive( + points: torch.Tensor, + lengths: Optional[torch.Tensor] = None, + K: Union[int, List, torch.Tensor] = 50, + random_start_point: bool = False, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Same Args/Returns as sample_farthest_points + """ + N, P, D = points.shape + device = points.device + + # Validate inputs + if lengths is None: + lengths = torch.full((N,), P, dtype=torch.int64, device=device) + else: + if lengths.shape != (N,): + raise ValueError("points and lengths must have same batch dimension.") + if lengths.max() > P: + raise ValueError("Invalid lengths.") + + # TODO: support providing K as a ratio of the total number of points instead of as an int + if isinstance(K, int): + K = torch.full((N,), K, dtype=torch.int64, device=device) + elif isinstance(K, list): + K = torch.tensor(K, dtype=torch.int64, device=device) + + if K.shape[0] != N: + raise ValueError("K and points must have the same batch dimension") + + # Find max value of K + max_K = torch.max(K) + + # List of selected indices from each batch element + all_sampled_indices = [] + + for n in range(N): + # Initialize an array for the sampled indices, shape: (max_K,) + sample_idx_batch = torch.full( + # pyre-fixme[6]: For 1st param expected `Union[List[int], Size, + # typing.Tuple[int, ...]]` but got `Tuple[Tensor]`. + (max_K,), + fill_value=-1, + dtype=torch.int64, + device=device, + ) + + # Initialize closest distances to inf, shape: (P,) + # This will be updated at each iteration to track the closest distance of the + # remaining points to any of the selected points + closest_dists = points.new_full( + # pyre-fixme[6]: For 1st param expected `Union[List[int], Size, + # typing.Tuple[int, ...]]` but got `Tuple[Tensor]`. + (lengths[n],), + float("inf"), + dtype=torch.float32, + ) + + # Select a random point index and save it as the starting point + # pyre-fixme[6]: For 2nd argument expected `int` but got `Tensor`. + selected_idx = randint(0, lengths[n] - 1) if random_start_point else 0 + sample_idx_batch[0] = selected_idx + + # If the pointcloud has fewer than K points then only iterate over the min + # pyre-fixme[6]: For 1st param expected `SupportsRichComparisonT` but got + # `Tensor`. + # pyre-fixme[6]: For 2nd param expected `SupportsRichComparisonT` but got + # `Tensor`. + k_n = min(lengths[n], K[n]) + + # Iteratively select points for a maximum of k_n + for i in range(1, k_n): + # Find the distance between the last selected point + # and all the other points. If a point has already been selected + # it's distance will be 0.0 so it will not be selected again as the max. + dist = points[n, selected_idx, :] - points[n, : lengths[n], :] + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + dist_to_last_selected = (dist**2).sum(-1) # (P - i) + + # If closer than currently saved distance to one of the selected + # points, then updated closest_dists + closest_dists = torch.min(dist_to_last_selected, closest_dists) # (P - i) + + # The aim is to pick the point that has the largest + # nearest neighbour distance to any of the already selected points + selected_idx = torch.argmax(closest_dists) + sample_idx_batch[i] = selected_idx + + # Add the list of points for this batch to the final list + all_sampled_indices.append(sample_idx_batch) + + all_sampled_indices = torch.stack(all_sampled_indices, dim=0) + + # Gather the points + all_sampled_points = masked_gather(points, all_sampled_indices) + + # Return (N, max_K, D) subsampled points and indices + return all_sampled_points, all_sampled_indices diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/sample_points_from_meshes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/sample_points_from_meshes.py new file mode 100644 index 0000000000000000000000000000000000000000..125fe683db288b8effbc0845066faba9287cbf94 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/sample_points_from_meshes.py @@ -0,0 +1,179 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +""" +This module implements utility functions for sampling points from +batches of meshes. +""" +import sys +from typing import Tuple, Union + +import torch + +from pytorch3d.ops.mesh_face_areas_normals import mesh_face_areas_normals + +from pytorch3d.ops.packed_to_padded import packed_to_padded +from pytorch3d.renderer.mesh.rasterizer import Fragments as MeshFragments + + +def sample_points_from_meshes( + meshes, + num_samples: int = 10000, + return_normals: bool = False, + return_textures: bool = False, +) -> Union[ + torch.Tensor, + Tuple[torch.Tensor, torch.Tensor], + Tuple[torch.Tensor, torch.Tensor, torch.Tensor], +]: + """ + Convert a batch of meshes to a batch of pointclouds by uniformly sampling + points on the surface of the mesh with probability proportional to the + face area. + + Args: + meshes: A Meshes object with a batch of N meshes. + num_samples: Integer giving the number of point samples per mesh. + return_normals: If True, return normals for the sampled points. + return_textures: If True, return textures for the sampled points. + + Returns: + 3-element tuple containing + + - **samples**: FloatTensor of shape (N, num_samples, 3) giving the + coordinates of sampled points for each mesh in the batch. For empty + meshes the corresponding row in the samples array will be filled with 0. + - **normals**: FloatTensor of shape (N, num_samples, 3) giving a normal vector + to each sampled point. Only returned if return_normals is True. + For empty meshes the corresponding row in the normals array will + be filled with 0. + - **textures**: FloatTensor of shape (N, num_samples, C) giving a C-dimensional + texture vector to each sampled point. Only returned if return_textures is True. + For empty meshes the corresponding row in the textures array will + be filled with 0. + + Note that in a future releases, we will replace the 3-element tuple output + with a `Pointclouds` datastructure, as follows + + .. code-block:: python + + Pointclouds(samples, normals=normals, features=textures) + """ + if meshes.isempty(): + raise ValueError("Meshes are empty.") + + verts = meshes.verts_packed() + if not torch.isfinite(verts).all(): + raise ValueError("Meshes contain nan or inf.") + + if return_textures and meshes.textures is None: + raise ValueError("Meshes do not contain textures.") + + faces = meshes.faces_packed() + mesh_to_face = meshes.mesh_to_faces_packed_first_idx() + num_meshes = len(meshes) + num_valid_meshes = torch.sum(meshes.valid) # Non empty meshes. + + # Initialize samples tensor with fill value 0 for empty meshes. + samples = torch.zeros((num_meshes, num_samples, 3), device=meshes.device) + + # Only compute samples for non empty meshes + with torch.no_grad(): + areas, _ = mesh_face_areas_normals(verts, faces) # Face areas can be zero. + max_faces = meshes.num_faces_per_mesh().max().item() + areas_padded = packed_to_padded( + areas, mesh_to_face[meshes.valid], max_faces + ) # (N, F) + + # TODO (gkioxari) Confirm multinomial bug is not present with real data. + sample_face_idxs = areas_padded.multinomial( + num_samples, replacement=True + ) # (N, num_samples) + sample_face_idxs += mesh_to_face[meshes.valid].view(num_valid_meshes, 1) + + # Get the vertex coordinates of the sampled faces. + face_verts = verts[faces] + v0, v1, v2 = face_verts[:, 0], face_verts[:, 1], face_verts[:, 2] + + # Randomly generate barycentric coords. + w0, w1, w2 = _rand_barycentric_coords( + num_valid_meshes, num_samples, verts.dtype, verts.device + ) + + # Use the barycentric coords to get a point on each sampled face. + a = v0[sample_face_idxs] # (N, num_samples, 3) + b = v1[sample_face_idxs] + c = v2[sample_face_idxs] + samples[meshes.valid] = w0[:, :, None] * a + w1[:, :, None] * b + w2[:, :, None] * c + + if return_normals: + # Initialize normals tensor with fill value 0 for empty meshes. + # Normals for the sampled points are face normals computed from + # the vertices of the face in which the sampled point lies. + normals = torch.zeros((num_meshes, num_samples, 3), device=meshes.device) + vert_normals = (v1 - v0).cross(v2 - v1, dim=1) + vert_normals = vert_normals / vert_normals.norm(dim=1, p=2, keepdim=True).clamp( + min=sys.float_info.epsilon + ) + vert_normals = vert_normals[sample_face_idxs] + normals[meshes.valid] = vert_normals + + if return_textures: + # fragment data are of shape NxHxWxK. Here H=S, W=1 & K=1. + pix_to_face = sample_face_idxs.view(len(meshes), num_samples, 1, 1) # NxSx1x1 + bary = torch.stack((w0, w1, w2), dim=2).unsqueeze(2).unsqueeze(2) # NxSx1x1x3 + # zbuf and dists are not used in `sample_textures` so we initialize them with dummy + dummy = torch.zeros( + (len(meshes), num_samples, 1, 1), device=meshes.device, dtype=torch.float32 + ) # NxSx1x1 + fragments = MeshFragments( + pix_to_face=pix_to_face, zbuf=dummy, bary_coords=bary, dists=dummy + ) + textures = meshes.sample_textures(fragments) # NxSx1x1xC + textures = textures[:, :, 0, 0, :] # NxSxC + + # return + # TODO(gkioxari) consider returning a Pointclouds instance [breaking] + if return_normals and return_textures: + # pyre-fixme[61]: `normals` may not be initialized here. + # pyre-fixme[61]: `textures` may not be initialized here. + return samples, normals, textures + if return_normals: # return_textures is False + # pyre-fixme[61]: `normals` may not be initialized here. + return samples, normals + if return_textures: # return_normals is False + # pyre-fixme[61]: `textures` may not be initialized here. + return samples, textures + return samples + + +def _rand_barycentric_coords( + size1, size2, dtype: torch.dtype, device: torch.device +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Helper function to generate random barycentric coordinates which are uniformly + distributed over a triangle. + + Args: + size1, size2: The number of coordinates generated will be size1*size2. + Output tensors will each be of shape (size1, size2). + dtype: Datatype to generate. + device: A torch.device object on which the outputs will be allocated. + + Returns: + w0, w1, w2: Tensors of shape (size1, size2) giving random barycentric + coordinates + """ + uv = torch.rand(2, size1, size2, dtype=dtype, device=device) + u, v = uv[0], uv[1] + u_sqrt = u.sqrt() + w0 = 1.0 - u_sqrt + w1 = u_sqrt * (1.0 - v) + w2 = u_sqrt * v + return w0, w1, w2 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/subdivide_meshes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/subdivide_meshes.py new file mode 100644 index 0000000000000000000000000000000000000000..86204309a4a79896028ba82c9df0df273c3c15b4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/subdivide_meshes.py @@ -0,0 +1,472 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +import torch.nn as nn +from pytorch3d.structures import Meshes + + +class SubdivideMeshes(nn.Module): + """ + Subdivide a triangle mesh by adding a new vertex at the center of each edge + and dividing each face into four new faces. Vectors of vertex + attributes can also be subdivided by averaging the values of the attributes + at the two vertices which form each edge. This implementation + preserves face orientation - if the vertices of a face are all ordered + counter-clockwise, then the faces in the subdivided meshes will also have + their vertices ordered counter-clockwise. + + If meshes is provided as an input, the initializer performs the relatively + expensive computation of determining the new face indices. This one-time + computation can be reused for all meshes with the same face topology + but different vertex positions. + """ + + def __init__(self, meshes=None) -> None: + """ + Args: + meshes: Meshes object or None. If a meshes object is provided, + the first mesh is used to compute the new faces of the + subdivided topology which can be reused for meshes with + the same input topology. + """ + super(SubdivideMeshes, self).__init__() + + self.precomputed = False + self._N = -1 + if meshes is not None: + # This computation is on indices, so gradients do not need to be + # tracked. + mesh = meshes[0] + with torch.no_grad(): + subdivided_faces = self.subdivide_faces(mesh) + if subdivided_faces.shape[1] != 3: + raise ValueError("faces can only have three vertices") + self.register_buffer("_subdivided_faces", subdivided_faces) + self.precomputed = True + + def subdivide_faces(self, meshes): + r""" + Args: + meshes: a Meshes object. + + Returns: + subdivided_faces_packed: (4*sum(F_n), 3) shape LongTensor of + original and new faces. + + Refer to pytorch3d.structures.meshes.py for more details on packed + representations of faces. + + Each face is split into 4 faces e.g. Input face + :: + v0 + /\ + / \ + / \ + e1 / \ e0 + / \ + / \ + / \ + /______________\ + v2 e2 v1 + + faces_packed = [[0, 1, 2]] + faces_packed_to_edges_packed = [[2, 1, 0]] + + `faces_packed_to_edges_packed` is used to represent all the new + vertex indices corresponding to the mid-points of edges in the mesh. + The actual vertex coordinates will be computed in the forward function. + To get the indices of the new vertices, offset + `faces_packed_to_edges_packed` by the total number of vertices. + :: + faces_packed_to_edges_packed = [[2, 1, 0]] + 3 = [[5, 4, 3]] + + e.g. subdivided face + :: + v0 + /\ + / \ + / f0 \ + v4 /______\ v3 + /\ /\ + / \ f3 / \ + / f2 \ / f1 \ + /______\/______\ + v2 v5 v1 + + f0 = [0, 3, 4] + f1 = [1, 5, 3] + f2 = [2, 4, 5] + f3 = [5, 4, 3] + + """ + verts_packed = meshes.verts_packed() + with torch.no_grad(): + faces_packed = meshes.faces_packed() + faces_packed_to_edges_packed = ( + meshes.faces_packed_to_edges_packed() + verts_packed.shape[0] + ) + + f0 = torch.stack( + [ + faces_packed[:, 0], + faces_packed_to_edges_packed[:, 2], + faces_packed_to_edges_packed[:, 1], + ], + dim=1, + ) + f1 = torch.stack( + [ + faces_packed[:, 1], + faces_packed_to_edges_packed[:, 0], + faces_packed_to_edges_packed[:, 2], + ], + dim=1, + ) + f2 = torch.stack( + [ + faces_packed[:, 2], + faces_packed_to_edges_packed[:, 1], + faces_packed_to_edges_packed[:, 0], + ], + dim=1, + ) + f3 = faces_packed_to_edges_packed + subdivided_faces_packed = torch.cat( + [f0, f1, f2, f3], dim=0 + ) # (4*sum(F_n), 3) + + return subdivided_faces_packed + + def forward(self, meshes, feats=None): + """ + Subdivide a batch of meshes by adding a new vertex on each edge, and + dividing each face into four new faces. New meshes contains two types + of vertices: + 1) Vertices that appear in the input meshes. + Data for these vertices are copied from the input meshes. + 2) New vertices at the midpoint of each edge. + Data for these vertices is the average of the data for the two + vertices that make up the edge. + + Args: + meshes: Meshes object representing a batch of meshes. + feats: Per-vertex features to be subdivided along with the verts. + Should be parallel to the packed vert representation of the + input meshes; so it should have shape (V, D) where V is the + total number of verts in the input meshes. Default: None. + + Returns: + 2-element tuple containing + + - **new_meshes**: Meshes object of a batch of subdivided meshes. + - **new_feats**: (optional) Tensor of subdivided feats, parallel to the + (packed) vertices of the subdivided meshes. Only returned + if feats is not None. + + """ + self._N = len(meshes) + if self.precomputed: + return self.subdivide_homogeneous(meshes, feats) + else: + return self.subdivide_heterogenerous(meshes, feats) + + def subdivide_homogeneous(self, meshes, feats=None): + """ + Subdivide verts (and optionally features) of a batch of meshes + where each mesh has the same topology of faces. The subdivided faces + are precomputed in the initializer. + + Args: + meshes: Meshes object representing a batch of meshes. + feats: Per-vertex features to be subdivided along with the verts. + + Returns: + 2-element tuple containing + + - **new_meshes**: Meshes object of a batch of subdivided meshes. + - **new_feats**: (optional) Tensor of subdivided feats, parallel to the + (packed) vertices of the subdivided meshes. Only returned + if feats is not None. + """ + verts = meshes.verts_padded() # (N, V, D) + edges = meshes[0].edges_packed() + + # The set of faces is the same across the different meshes. + new_faces = self._subdivided_faces.view(1, -1, 3).expand(self._N, -1, -1) + + # Add one new vertex at the midpoint of each edge by taking the average + # of the vertices that form each edge. + new_verts = verts[:, edges].mean(dim=2) + new_verts = torch.cat([verts, new_verts], dim=1) # (sum(V_n)+sum(E_n), 3) + new_feats = None + + # Calculate features for new vertices. + if feats is not None: + if feats.dim() == 2: + # feats is in packed format, transform it from packed to + # padded, i.e. (N*V, D) to (N, V, D). + feats = feats.view(verts.size(0), verts.size(1), feats.size(1)) + if feats.dim() != 3: + raise ValueError("features need to be of shape (N, V, D) or (N*V, D)") + + # Take average of the features at the vertices that form each edge. + new_feats = feats[:, edges].mean(dim=2) + new_feats = torch.cat([feats, new_feats], dim=1) # (sum(V_n)+sum(E_n), 3) + + new_meshes = Meshes(verts=new_verts, faces=new_faces) + + if feats is None: + return new_meshes + else: + return new_meshes, new_feats + + def subdivide_heterogenerous(self, meshes, feats=None): + """ + Subdivide faces, verts (and optionally features) of a batch of meshes + where each mesh can have different face topologies. + + Args: + meshes: Meshes object representing a batch of meshes. + feats: Per-vertex features to be subdivided along with the verts. + + Returns: + 2-element tuple containing + + - **new_meshes**: Meshes object of a batch of subdivided meshes. + - **new_feats**: (optional) Tensor of subdivided feats, parallel to the + (packed) vertices of the subdivided meshes. Only returned + if feats is not None. + """ + + # The computation of new faces is on face indices, so gradients do not + # need to be tracked. + verts = meshes.verts_packed() + with torch.no_grad(): + new_faces = self.subdivide_faces(meshes) + edges = meshes.edges_packed() + face_to_mesh_idx = meshes.faces_packed_to_mesh_idx() + edge_to_mesh_idx = meshes.edges_packed_to_mesh_idx() + num_edges_per_mesh = edge_to_mesh_idx.bincount(minlength=self._N) + num_verts_per_mesh = meshes.num_verts_per_mesh() + num_faces_per_mesh = meshes.num_faces_per_mesh() + + # Add one new vertex at the midpoint of each edge. + new_verts_per_mesh = num_verts_per_mesh + num_edges_per_mesh # (N,) + new_face_to_mesh_idx = torch.cat([face_to_mesh_idx] * 4, dim=0) + + # Calculate the indices needed to group the new and existing verts + # for each mesh. + verts_sort_idx = _create_verts_index( + num_verts_per_mesh, num_edges_per_mesh, meshes.device + ) # (sum(V_n)+sum(E_n),) + + verts_ordered_idx_init = torch.zeros( + new_verts_per_mesh.sum(), dtype=torch.int64, device=meshes.device + ) # (sum(V_n)+sum(E_n),) + + # Reassign vertex indices so that existing and new vertices for each + # mesh are sequential. + verts_ordered_idx = verts_ordered_idx_init.scatter_add( + 0, + verts_sort_idx, + torch.arange(new_verts_per_mesh.sum(), device=meshes.device), + ) + + # Retrieve vertex indices for each face. + new_faces = verts_ordered_idx[new_faces] + + # Calculate the indices needed to group the existing and new faces + # for each mesh. + face_sort_idx = _create_faces_index( + num_faces_per_mesh, device=meshes.device + ) + + # Reorder the faces to sequentially group existing and new faces + # for each mesh. + new_faces = new_faces[face_sort_idx] + new_face_to_mesh_idx = new_face_to_mesh_idx[face_sort_idx] + new_faces_per_mesh = new_face_to_mesh_idx.bincount( + minlength=self._N + ) # (sum(F_n)*4) + + # Add one new vertex at the midpoint of each edge by taking the average + # of the verts that form each edge. + new_verts = verts[edges].mean(dim=1) + new_verts = torch.cat([verts, new_verts], dim=0) + + # Reorder the verts to sequentially group existing and new verts for + # each mesh. + new_verts = new_verts[verts_sort_idx] + + if feats is not None: + new_feats = feats[edges].mean(dim=1) + new_feats = torch.cat([feats, new_feats], dim=0) + new_feats = new_feats[verts_sort_idx] + + verts_list = list(new_verts.split(new_verts_per_mesh.tolist(), 0)) + faces_list = list(new_faces.split(new_faces_per_mesh.tolist(), 0)) + new_verts_per_mesh_cumsum = torch.cat( + [ + new_verts_per_mesh.new_full(size=(1,), fill_value=0.0), + new_verts_per_mesh.cumsum(0)[:-1], + ], + dim=0, + ) + faces_list = [ + faces_list[n] - new_verts_per_mesh_cumsum[n] for n in range(self._N) + ] + if feats is not None: + feats_list = new_feats.split(new_verts_per_mesh.tolist(), 0) + new_meshes = Meshes(verts=verts_list, faces=faces_list) + + if feats is None: + return new_meshes + else: + new_feats = torch.cat(feats_list, dim=0) + return new_meshes, new_feats + + +def _create_verts_index(verts_per_mesh, edges_per_mesh, device=None): + """ + Helper function to group the vertex indices for each mesh. New vertices are + stacked at the end of the original verts tensor, so in order to have + sequential packing, the verts tensor needs to be reordered so that the + vertices corresponding to each mesh are grouped together. + + Args: + verts_per_mesh: Tensor of shape (N,) giving the number of vertices + in each mesh in the batch where N is the batch size. + edges_per_mesh: Tensor of shape (N,) giving the number of edges + in each mesh in the batch + + Returns: + verts_idx: A tensor with vert indices for each mesh ordered sequentially + by mesh index. + """ + # e.g. verts_per_mesh = (4, 5, 6) + # e.g. edges_per_mesh = (5, 7, 9) + + V = verts_per_mesh.sum() # e.g. 15 + E = edges_per_mesh.sum() # e.g. 21 + + verts_per_mesh_cumsum = verts_per_mesh.cumsum(dim=0) # (N,) e.g. (4, 9, 15) + edges_per_mesh_cumsum = edges_per_mesh.cumsum(dim=0) # (N,) e.g. (5, 12, 21) + + v_to_e_idx = verts_per_mesh_cumsum.clone() + + # vertex to edge index. + v_to_e_idx[1:] += edges_per_mesh_cumsum[ + :-1 + ] # e.g. (4, 9, 15) + (0, 5, 12) = (4, 14, 27) + + # vertex to edge offset. + v_to_e_offset = V - verts_per_mesh_cumsum # e.g. 15 - (4, 9, 15) = (11, 6, 0) + v_to_e_offset[1:] += edges_per_mesh_cumsum[ + :-1 + ] # e.g. (11, 6, 0) + (0, 5, 12) = (11, 11, 12) + e_to_v_idx = ( + verts_per_mesh_cumsum[:-1] + edges_per_mesh_cumsum[:-1] + ) # (4, 9) + (5, 12) = (9, 21) + e_to_v_offset = ( + verts_per_mesh_cumsum[:-1] - edges_per_mesh_cumsum[:-1] - V + ) # (4, 9) - (5, 12) - 15 = (-16, -18) + + # Add one new vertex per edge. + idx_diffs = torch.ones(V + E, device=device, dtype=torch.int64) # (36,) + idx_diffs[v_to_e_idx] += v_to_e_offset + idx_diffs[e_to_v_idx] += e_to_v_offset + + # e.g. + # [ + # 1, 1, 1, 1, 12, 1, 1, 1, 1, + # -15, 1, 1, 1, 1, 12, 1, 1, 1, 1, 1, 1, + # -17, 1, 1, 1, 1, 1, 13, 1, 1, 1, 1, 1, 1, 1 + # ] + + verts_idx = idx_diffs.cumsum(dim=0) - 1 + + # e.g. + # [ + # 0, 1, 2, 3, 15, 16, 17, 18, 19, --> mesh 0 + # 4, 5, 6, 7, 8, 20, 21, 22, 23, 24, 25, 26, --> mesh 1 + # 9, 10, 11, 12, 13, 14, 27, 28, 29, 30, 31, 32, 33, 34, 35 --> mesh 2 + # ] + # where for mesh 0, [0, 1, 2, 3] are the indices of the existing verts, and + # [15, 16, 17, 18, 19] are the indices of the new verts after subdivision. + + return verts_idx + + +def _create_faces_index(faces_per_mesh: torch.Tensor, device=None): + """ + Helper function to group the faces indices for each mesh. New faces are + stacked at the end of the original faces tensor, so in order to have + sequential packing, the faces tensor needs to be reordered to that faces + corresponding to each mesh are grouped together. + + Args: + faces_per_mesh: Tensor of shape (N,) giving the number of faces + in each mesh in the batch where N is the batch size. + + Returns: + faces_idx: A tensor with face indices for each mesh ordered sequentially + by mesh index. + """ + # e.g. faces_per_mesh = [2, 5, 3] + + F = faces_per_mesh.sum() # e.g. 10 + faces_per_mesh_cumsum = faces_per_mesh.cumsum(dim=0) # (N,) e.g. (2, 7, 10) + + switch1_idx = faces_per_mesh_cumsum.clone() + switch1_idx[1:] += ( + 3 * faces_per_mesh_cumsum[:-1] + ) # e.g. (2, 7, 10) + (0, 6, 21) = (2, 13, 31) + + switch2_idx = 2 * faces_per_mesh_cumsum # e.g. (4, 14, 20) + switch2_idx[1:] += ( + 2 * faces_per_mesh_cumsum[:-1] + ) # e.g. (4, 14, 20) + (0, 4, 14) = (4, 18, 34) + + switch3_idx = 3 * faces_per_mesh_cumsum # e.g. (6, 21, 30) + switch3_idx[1:] += faces_per_mesh_cumsum[ + :-1 + ] # e.g. (6, 21, 30) + (0, 2, 7) = (6, 23, 37) + + switch4_idx = 4 * faces_per_mesh_cumsum[:-1] # e.g. (8, 28) + + switch123_offset = F - faces_per_mesh # e.g. (8, 5, 7) + + # pyre-fixme[6]: For 1st param expected `Union[List[int], Size, + # typing.Tuple[int, ...]]` but got `Tensor`. + idx_diffs = torch.ones(4 * F, device=device, dtype=torch.int64) + idx_diffs[switch1_idx] += switch123_offset + idx_diffs[switch2_idx] += switch123_offset + idx_diffs[switch3_idx] += switch123_offset + idx_diffs[switch4_idx] -= 3 * F + + # e.g + # [ + # 1, 1, 9, 1, 9, 1, 9, 1, -> mesh 0 + # -29, 1, 1, 1, 1, 6, 1, 1, 1, 1, 6, 1, 1, 1, 1, 6, 1, 1, 1, 1, -> mesh 1 + # -29, 1, 1, 8, 1, 1, 8, 1, 1, 8, 1, 1 -> mesh 2 + # ] + + faces_idx = idx_diffs.cumsum(dim=0) - 1 + + # e.g. + # [ + # 0, 1, 10, 11, 20, 21, 30, 31, + # 2, 3, 4, 5, 6, 12, 13, 14, 15, 16, 22, 23, 24, 25, 26, 32, 33, 34, 35, 36, + # 7, 8, 9, 17, 18, 19, 27, 28, 29, 37, 38, 39 + # ] + # where for mesh 0, [0, 1] are the indices of the existing faces, and + # [10, 11, 20, 21, 30, 31] are the indices of the new faces after subdivision. + + return faces_idx diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..29afc3f9673bd2d81501464fa8a47edd01581f30 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/utils.py @@ -0,0 +1,207 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Optional, Tuple, TYPE_CHECKING, Union + +import torch + +from .knn import knn_points + + +if TYPE_CHECKING: + from pytorch3d.structures import Pointclouds + + +def masked_gather(points: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: + """ + Helper function for torch.gather to collect the points at + the given indices in idx where some of the indices might be -1 to + indicate padding. These indices are first replaced with 0. + Then the points are gathered after which the padded values + are set to 0.0. + + Args: + points: (N, P, D) float32 tensor of points + idx: (N, K) or (N, P, K) long tensor of indices into points, where + some indices are -1 to indicate padding + + Returns: + selected_points: (N, K, D) float32 tensor of points + at the given indices + """ + + if len(idx) != len(points): + raise ValueError("points and idx must have the same batch dimension") + + N, P, D = points.shape + + if idx.ndim == 3: + # Case: KNN, Ball Query where idx is of shape (N, P', K) + # where P' is not necessarily the same as P as the + # points may be gathered from a different pointcloud. + K = idx.shape[2] + # Match dimensions for points and indices + idx_expanded = idx[..., None].expand(-1, -1, -1, D) + points = points[:, :, None, :].expand(-1, -1, K, -1) + elif idx.ndim == 2: + # Farthest point sampling where idx is of shape (N, K) + idx_expanded = idx[..., None].expand(-1, -1, D) + else: + raise ValueError("idx format is not supported %s" % repr(idx.shape)) + + idx_expanded_mask = idx_expanded.eq(-1) + idx_expanded = idx_expanded.clone() + # Replace -1 values with 0 for gather + idx_expanded[idx_expanded_mask] = 0 + # Gather points + selected_points = points.gather(dim=1, index=idx_expanded) + # Replace padded values + selected_points[idx_expanded_mask] = 0.0 + return selected_points + + +def wmean( + x: torch.Tensor, + weight: Optional[torch.Tensor] = None, + dim: Union[int, Tuple[int]] = -2, + keepdim: bool = True, + eps: float = 1e-9, +) -> torch.Tensor: + """ + Finds the mean of the input tensor across the specified dimension. + If the `weight` argument is provided, computes weighted mean. + Args: + x: tensor of shape `(*, D)`, where D is assumed to be spatial; + weights: if given, non-negative tensor of shape `(*,)`. It must be + broadcastable to `x.shape[:-1]`. Note that the weights for + the last (spatial) dimension are assumed same; + dim: dimension(s) in `x` to average over; + keepdim: tells whether to keep the resulting singleton dimension. + eps: minimum clamping value in the denominator. + Returns: + the mean tensor: + * if `weights` is None => `mean(x, dim)`, + * otherwise => `sum(x*w, dim) / max{sum(w, dim), eps}`. + """ + args = {"dim": dim, "keepdim": keepdim} + + if weight is None: + # pyre-fixme[6]: For 1st param expected `Optional[dtype]` but got + # `Union[Tuple[int], int]`. + return x.mean(**args) + + if any( + xd != wd and xd != 1 and wd != 1 + for xd, wd in zip(x.shape[-2::-1], weight.shape[::-1]) + ): + raise ValueError("wmean: weights are not compatible with the tensor") + + # pyre-fixme[6]: For 1st param expected `Optional[dtype]` but got + # `Union[Tuple[int], int]`. + return (x * weight[..., None]).sum(**args) / weight[..., None].sum(**args).clamp( + eps + ) + + +def eyes( + dim: int, + N: int, + device: Optional[torch.device] = None, + dtype: torch.dtype = torch.float32, +) -> torch.Tensor: + """ + Generates a batch of `N` identity matrices of shape `(N, dim, dim)`. + + Args: + **dim**: The dimensionality of the identity matrices. + **N**: The number of identity matrices. + **device**: The device to be used for allocating the matrices. + **dtype**: The datatype of the matrices. + + Returns: + **identities**: A batch of identity matrices of shape `(N, dim, dim)`. + """ + identities = torch.eye(dim, device=device, dtype=dtype) + return identities[None].repeat(N, 1, 1) + + +def convert_pointclouds_to_tensor(pcl: Union[torch.Tensor, "Pointclouds"]): + """ + If `type(pcl)==Pointclouds`, converts a `pcl` object to a + padded representation and returns it together with the number of points + per batch. Otherwise, returns the input itself with the number of points + set to the size of the second dimension of `pcl`. + """ + if is_pointclouds(pcl): + X = pcl.points_padded() # type: ignore + num_points = pcl.num_points_per_cloud() # type: ignore + elif torch.is_tensor(pcl): + X = pcl + num_points = X.shape[1] * torch.ones( # type: ignore + X.shape[0], + device=X.device, + dtype=torch.int64, + ) + else: + raise ValueError( + "The inputs X, Y should be either Pointclouds objects or tensors." + ) + return X, num_points + + +def is_pointclouds(pcl: Union[torch.Tensor, "Pointclouds"]) -> bool: + """Checks whether the input `pcl` is an instance of `Pointclouds` + by checking the existence of `points_padded` and `num_points_per_cloud` + functions. + """ + return hasattr(pcl, "points_padded") and hasattr(pcl, "num_points_per_cloud") + + +def get_point_covariances( + points_padded: torch.Tensor, + num_points_per_cloud: torch.Tensor, + neighborhood_size: int, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes the per-point covariance matrices by of the 3D locations of + K-nearest neighbors of each point. + + Args: + **points_padded**: Input point clouds as a padded tensor + of shape `(minibatch, num_points, dim)`. + **num_points_per_cloud**: Number of points per cloud + of shape `(minibatch,)`. + **neighborhood_size**: Number of nearest neighbors for each point + used to estimate the covariance matrices. + + Returns: + **covariances**: A batch of per-point covariance matrices + of shape `(minibatch, dim, dim)`. + **k_nearest_neighbors**: A batch of `neighborhood_size` nearest + neighbors for each of the point cloud points + of shape `(minibatch, num_points, neighborhood_size, dim)`. + """ + # get K nearest neighbor idx for each point in the point cloud + k_nearest_neighbors = knn_points( + points_padded, + points_padded, + lengths1=num_points_per_cloud, + lengths2=num_points_per_cloud, + K=neighborhood_size, + return_nn=True, + ).knn + # obtain the mean of the neighborhood + pt_mean = k_nearest_neighbors.mean(2, keepdim=True) + # compute the diff of the neighborhood and the mean of the neighborhood + central_diff = k_nearest_neighbors - pt_mean + # per-nn-point covariances + per_pt_cov = central_diff.unsqueeze(4) * central_diff.unsqueeze(3) + # per-point covariances + covariances = per_pt_cov.mean(2) + + return covariances, k_nearest_neighbors diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/vert_align.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/vert_align.py new file mode 100644 index 0000000000000000000000000000000000000000..fd1a90d0f8836c87a279a2e7c1db6be0fa49b3e4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/ops/vert_align.py @@ -0,0 +1,107 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +import torch.nn.functional as F + + +def vert_align( + feats, + verts, + return_packed: bool = False, + interp_mode: str = "bilinear", + padding_mode: str = "zeros", + align_corners: bool = True, +) -> torch.Tensor: + """ + Sample vertex features from a feature map. This operation is called + "perceptual feature pooling" in [1] or "vert align" in [2]. + + [1] Wang et al, "Pixel2Mesh: Generating 3D Mesh Models from Single + RGB Images", ECCV 2018. + [2] Gkioxari et al, "Mesh R-CNN", ICCV 2019 + + Args: + feats: FloatTensor of shape (N, C, H, W) representing image features + from which to sample or a list of features each with potentially + different C, H or W dimensions. + verts: FloatTensor of shape (N, V, 3) or an object (e.g. Meshes or Pointclouds) + with `verts_padded' or `points_padded' as an attribute giving the (x, y, z) + vertex positions for which to sample. (x, y) verts should be normalized such + that (-1, -1) corresponds to top-left and (+1, +1) to bottom-right + location in the input feature map. + return_packed: (bool) Indicates whether to return packed features + interp_mode: (str) Specifies how to interpolate features. + ('bilinear' or 'nearest') + padding_mode: (str) Specifies how to handle vertices outside of the + [-1, 1] range. ('zeros', 'reflection', or 'border') + align_corners (bool): Geometrically, we consider the pixels of the + input as squares rather than points. + If set to ``True``, the extrema (``-1`` and ``1``) are considered as + referring to the center points of the input's corner pixels. If set + to ``False``, they are instead considered as referring to the corner + points of the input's corner pixels, making the sampling more + resolution agnostic. Default: ``True`` + + Returns: + feats_sampled: FloatTensor of shape (N, V, C) giving sampled features for each + vertex. If feats is a list, we return concatenated features in axis=2 of + shape (N, V, sum(C_n)) where C_n = feats[n].shape[1]. + If return_packed = True, the features are transformed to a packed + representation of shape (sum(V), C) + """ + if torch.is_tensor(verts): + if verts.dim() != 3: + raise ValueError("verts tensor should be 3 dimensional") + grid = verts + elif hasattr(verts, "verts_padded"): + grid = verts.verts_padded() + elif hasattr(verts, "points_padded"): + grid = verts.points_padded() + else: + raise ValueError( + "verts must be a tensor or have a " + + "`points_padded' or`verts_padded` attribute." + ) + + grid = grid[:, None, :, :2] # (N, 1, V, 2) + + if torch.is_tensor(feats): + feats = [feats] + for feat in feats: + if feat.dim() != 4: + raise ValueError("feats must have shape (N, C, H, W)") + if grid.shape[0] != feat.shape[0]: + raise ValueError("inconsistent batch dimension") + + feats_sampled = [] + for feat in feats: + feat_sampled = F.grid_sample( + feat, + grid, + mode=interp_mode, + padding_mode=padding_mode, + align_corners=align_corners, + ) # (N, C, 1, V) + feat_sampled = feat_sampled.squeeze(dim=2).transpose(1, 2) # (N, V, C) + feats_sampled.append(feat_sampled) + feats_sampled = torch.cat(feats_sampled, dim=2) # (N, V, sum(C)) + + if return_packed: + # flatten the first two dimensions: (N*V, C) + feats_sampled = feats_sampled.view(-1, feats_sampled.shape[-1]) + if hasattr(verts, "verts_padded_to_packed_idx"): + idx = ( + verts.verts_padded_to_packed_idx() + .view(-1, 1) + .expand(-1, feats_sampled.shape[-1]) + ) + feats_sampled = feats_sampled.gather(0, idx) # (sum(V), C) + + return feats_sampled diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e19e3eef3372503da67cd4c4d6fbb0e706fbcf99 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/__init__.py @@ -0,0 +1,95 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch + +from .blending import ( + BlendParams, + hard_rgb_blend, + sigmoid_alpha_blend, + softmax_rgb_blend, +) +from .camera_utils import join_cameras_as_batch, rotate_on_spot +from .cameras import ( # deprecated # deprecated # deprecated # deprecated + camera_position_from_spherical_angles, + CamerasBase, + FoVOrthographicCameras, + FoVPerspectiveCameras, + get_world_to_view_transform, + look_at_rotation, + look_at_view_transform, + OpenGLOrthographicCameras, + OpenGLPerspectiveCameras, + OrthographicCameras, + PerspectiveCameras, + SfMOrthographicCameras, + SfMPerspectiveCameras, +) +from .implicit import ( + AbsorptionOnlyRaymarcher, + EmissionAbsorptionRaymarcher, + GridRaysampler, + HarmonicEmbedding, + HeterogeneousRayBundle, + ImplicitRenderer, + MonteCarloRaysampler, + MultinomialRaysampler, + NDCGridRaysampler, + NDCMultinomialRaysampler, + ray_bundle_to_ray_points, + ray_bundle_variables_to_ray_points, + RayBundle, + VolumeRenderer, + VolumeSampler, +) +from .lighting import AmbientLights, diffuse, DirectionalLights, PointLights, specular +from .materials import Materials +from .mesh import ( + gouraud_shading, + HardFlatShader, + HardGouraudShader, + HardPhongShader, + MeshRasterizer, + MeshRenderer, + MeshRendererWithFragments, + phong_shading, + RasterizationSettings, + rasterize_meshes, + SoftGouraudShader, + SoftPhongShader, + SoftSilhouetteShader, + SplatterPhongShader, + Textures, + TexturesAtlas, + TexturesUV, + TexturesVertex, +) + +from .points import ( + AlphaCompositor, + NormWeightedCompositor, + PointsRasterizationSettings, + PointsRasterizer, + PointsRenderer, + rasterize_points, +) + +# Pulsar is not enabled on amd. +if not torch.version.hip: + from .points import PulsarPointsRenderer + +from .splatter_blend import SplatterBlender +from .utils import ( + convert_to_tensors_and_broadcast, + ndc_grid_sample, + ndc_to_grid_sample_coords, + TensorProperties, +) + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/blending.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/blending.py new file mode 100644 index 0000000000000000000000000000000000000000..b84b91ab6b7673a46f99fc058badb57ceb080043 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/blending.py @@ -0,0 +1,241 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import NamedTuple, Sequence, Union + +import torch +from pytorch3d import _C +from pytorch3d.common.datatypes import Device + +# Example functions for blending the top K colors per pixel using the outputs +# from rasterization. +# NOTE: All blending function should return an RGBA image per batch element + + +class BlendParams(NamedTuple): + """ + Data class to store blending params with defaults + + Members: + sigma (float): For SoftmaxPhong, controls the width of the sigmoid + function used to calculate the 2D distance based probability. Determines + the sharpness of the edges of the shape. Higher => faces have less defined + edges. For SplatterPhong, this is the standard deviation of the Gaussian + kernel. Higher => splats have a stronger effect and the rendered image is + more blurry. + gamma (float): Controls the scaling of the exponential function used + to set the opacity of the color. + Higher => faces are more transparent. + background_color: RGB values for the background color as a tuple or + as a tensor of three floats. + """ + + sigma: float = 1e-4 + gamma: float = 1e-4 + background_color: Union[torch.Tensor, Sequence[float]] = (1.0, 1.0, 1.0) + + +def _get_background_color( + blend_params: BlendParams, device: Device, dtype=torch.float32 +) -> torch.Tensor: + background_color_ = blend_params.background_color + if isinstance(background_color_, torch.Tensor): + background_color = background_color_.to(device) + else: + background_color = torch.tensor(background_color_, dtype=dtype, device=device) + return background_color + + +def hard_rgb_blend( + colors: torch.Tensor, fragments, blend_params: BlendParams +) -> torch.Tensor: + """ + Naive blending of top K faces to return an RGBA image + - **RGB** - choose color of the closest point i.e. K=0 + - **A** - 1.0 + + Args: + colors: (N, H, W, K, 3) RGB color for each of the top K faces per pixel. + fragments: the outputs of rasterization. From this we use + - pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. This is used to + determine the output shape. + blend_params: BlendParams instance that contains a background_color + field specifying the color for the background + Returns: + RGBA pixel_colors: (N, H, W, 4) + """ + background_color = _get_background_color(blend_params, fragments.pix_to_face.device) + + # Mask for the background. + is_background = fragments.pix_to_face[..., 0] < 0 # (N, H, W) + + # Find out how much background_color needs to be expanded to be used for masked_scatter. + num_background_pixels = is_background.sum() + + # Set background color. + pixel_colors = colors[..., 0, :].masked_scatter( + is_background[..., None], + background_color[None, :].expand(num_background_pixels, -1), + ) # (N, H, W, 3) + + # Concat with the alpha channel. + alpha = (~is_background).type_as(pixel_colors)[..., None] + + return torch.cat([pixel_colors, alpha], dim=-1) # (N, H, W, 4) + + +# Wrapper for the C++/CUDA Implementation of sigmoid alpha blend. +class _SigmoidAlphaBlend(torch.autograd.Function): + @staticmethod + def forward(ctx, dists, pix_to_face, sigma): + alphas = _C.sigmoid_alpha_blend(dists, pix_to_face, sigma) + ctx.save_for_backward(dists, pix_to_face, alphas) + ctx.sigma = sigma + return alphas + + @staticmethod + def backward(ctx, grad_alphas): + dists, pix_to_face, alphas = ctx.saved_tensors + sigma = ctx.sigma + grad_dists = _C.sigmoid_alpha_blend_backward( + grad_alphas, alphas, dists, pix_to_face, sigma + ) + return grad_dists, None, None + + +_sigmoid_alpha = _SigmoidAlphaBlend.apply + + +def sigmoid_alpha_blend(colors, fragments, blend_params: BlendParams) -> torch.Tensor: + """ + Silhouette blending to return an RGBA image + - **RGB** - choose color of the closest point. + - **A** - blend based on the 2D distance based probability map [1]. + + Args: + colors: (N, H, W, K, 3) RGB color for each of the top K faces per pixel. + fragments: the outputs of rasterization. From this we use + - pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. + - dists: FloatTensor of shape (N, H, W, K) specifying + the 2D euclidean distance from the center of each pixel + to each of the top K overlapping faces. + + Returns: + RGBA pixel_colors: (N, H, W, 4) + + [1] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based + 3D Reasoning', ICCV 2019 + """ + N, H, W, K = fragments.pix_to_face.shape + pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=colors.device) + pixel_colors[..., :3] = colors[..., 0, :] + alpha = _sigmoid_alpha(fragments.dists, fragments.pix_to_face, blend_params.sigma) + pixel_colors[..., 3] = alpha + return pixel_colors + + +def softmax_rgb_blend( + colors: torch.Tensor, + fragments, + blend_params: BlendParams, + znear: Union[float, torch.Tensor] = 1.0, + zfar: Union[float, torch.Tensor] = 100, +) -> torch.Tensor: + """ + RGB and alpha channel blending to return an RGBA image based on the method + proposed in [1] + - **RGB** - blend the colors based on the 2D distance based probability map and + relative z distances. + - **A** - blend based on the 2D distance based probability map. + + Args: + colors: (N, H, W, K, 3) RGB color for each of the top K faces per pixel. + fragments: namedtuple with outputs of rasterization. We use properties + - pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. + - dists: FloatTensor of shape (N, H, W, K) specifying + the 2D euclidean distance from the center of each pixel + to each of the top K overlapping faces. + - zbuf: FloatTensor of shape (N, H, W, K) specifying + the interpolated depth from each pixel to to each of the + top K overlapping faces. + blend_params: instance of BlendParams dataclass containing properties + - sigma: float, parameter which controls the width of the sigmoid + function used to calculate the 2D distance based probability. + Sigma controls the sharpness of the edges of the shape. + - gamma: float, parameter which controls the scaling of the + exponential function used to control the opacity of the color. + - background_color: (3) element list/tuple/torch.Tensor specifying + the RGB values for the background color. + znear: float, near clipping plane in the z direction + zfar: float, far clipping plane in the z direction + + Returns: + RGBA pixel_colors: (N, H, W, 4) + + [0] Shichen Liu et al, 'Soft Rasterizer: A Differentiable Renderer for + Image-based 3D Reasoning' + """ + + N, H, W, K = fragments.pix_to_face.shape + pixel_colors = torch.ones((N, H, W, 4), dtype=colors.dtype, device=colors.device) + background_color = _get_background_color(blend_params, fragments.pix_to_face.device) + + # Weight for background color + eps = 1e-10 + + # Mask for padded pixels. + mask = fragments.pix_to_face >= 0 + + # Sigmoid probability map based on the distance of the pixel to the face. + prob_map = torch.sigmoid(-fragments.dists / blend_params.sigma) * mask + + # The cumulative product ensures that alpha will be 0.0 if at least 1 + # face fully covers the pixel as for that face, prob will be 1.0. + # This results in a multiplication by 0.0 because of the (1.0 - prob) + # term. Therefore 1.0 - alpha will be 1.0. + alpha = torch.prod((1.0 - prob_map), dim=-1) + + # Weights for each face. Adjust the exponential by the max z to prevent + # overflow. zbuf shape (N, H, W, K), find max over K. + # TODO: there may still be some instability in the exponent calculation. + + # Reshape to be compatible with (N, H, W, K) values in fragments + if torch.is_tensor(zfar): + zfar = zfar[:, None, None, None] + if torch.is_tensor(znear): + znear = znear[:, None, None, None] + + # pyre-fixme[6]: Expected `float` but got `Union[float, Tensor]` + z_inv = (zfar - fragments.zbuf) / (zfar - znear) * mask + # pyre-fixme[6]: Expected `Tensor` but got `float` + z_inv_max = torch.max(z_inv, dim=-1).values[..., None].clamp(min=eps) + # pyre-fixme[6]: Expected `Tensor` but got `float` + weights_num = prob_map * torch.exp((z_inv - z_inv_max) / blend_params.gamma) + + # Also apply exp normalize trick for the background color weight. + # Clamp to ensure delta is never 0. + # pyre-fixme[6]: Expected `Tensor` for 1st param but got `float`. + delta = torch.exp((eps - z_inv_max) / blend_params.gamma).clamp(min=eps) + + # Normalize weights. + # weights_num shape: (N, H, W, K). Sum over K and divide through by the sum. + denom = weights_num.sum(dim=-1)[..., None] + delta + + # Sum: weights * textures + background color + weighted_colors = (weights_num[..., None] * colors).sum(dim=-2) + weighted_background = delta * background_color + pixel_colors[..., :3] = (weighted_colors + weighted_background) / denom + pixel_colors[..., 3] = 1.0 - alpha + + return pixel_colors diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/camera_conversions.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/camera_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..d18707a318dd9e66e4f5f7763ae0c00a03952310 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/camera_conversions.py @@ -0,0 +1,194 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import logging +from typing import Tuple + +import torch + +from ..transforms import matrix_to_rotation_6d +from .cameras import PerspectiveCameras + + +LOGGER = logging.getLogger(__name__) + + +def _cameras_from_opencv_projection( + R: torch.Tensor, + tvec: torch.Tensor, + camera_matrix: torch.Tensor, + image_size: torch.Tensor, +) -> PerspectiveCameras: + focal_length = torch.stack([camera_matrix[:, 0, 0], camera_matrix[:, 1, 1]], dim=-1) + principal_point = camera_matrix[:, :2, 2] + + # Retype the image_size correctly and flip to width, height. + image_size_wh = image_size.to(R).flip(dims=(1,)) + + # Screen to NDC conversion: + # For non square images, we scale the points such that smallest side + # has range [-1, 1] and the largest side has range [-u, u], with u > 1. + # This convention is consistent with the PyTorch3D renderer, as well as + # the transformation function `get_ndc_to_screen_transform`. + scale = image_size_wh.to(R).min(dim=1, keepdim=True)[0] / 2.0 + scale = scale.expand(-1, 2) + c0 = image_size_wh / 2.0 + + # Get the PyTorch3D focal length and principal point. + focal_pytorch3d = focal_length / scale + p0_pytorch3d = -(principal_point - c0) / scale + + # For R, T we flip x, y axes (opencv screen space has an opposite + # orientation of screen axes). + # We also transpose R (opencv multiplies points from the opposite=left side). + R_pytorch3d = R.clone().permute(0, 2, 1) + T_pytorch3d = tvec.clone() + R_pytorch3d[:, :, :2] *= -1 + T_pytorch3d[:, :2] *= -1 + + return PerspectiveCameras( + R=R_pytorch3d, + T=T_pytorch3d, + focal_length=focal_pytorch3d, + principal_point=p0_pytorch3d, + image_size=image_size, + device=R.device, + ) + + +def _opencv_from_cameras_projection( + cameras: PerspectiveCameras, + image_size: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + R_pytorch3d = cameras.R.clone() + T_pytorch3d = cameras.T.clone() + focal_pytorch3d = cameras.focal_length + p0_pytorch3d = cameras.principal_point + T_pytorch3d[:, :2] *= -1 + R_pytorch3d[:, :, :2] *= -1 + tvec = T_pytorch3d + R = R_pytorch3d.permute(0, 2, 1) + + # Retype the image_size correctly and flip to width, height. + image_size_wh = image_size.to(R).flip(dims=(1,)) + + # NDC to screen conversion. + scale = image_size_wh.to(R).min(dim=1, keepdim=True)[0] / 2.0 + scale = scale.expand(-1, 2) + c0 = image_size_wh / 2.0 + + principal_point = -p0_pytorch3d * scale + c0 + focal_length = focal_pytorch3d * scale + + camera_matrix = torch.zeros_like(R) + camera_matrix[:, :2, 2] = principal_point + camera_matrix[:, 2, 2] = 1.0 + camera_matrix[:, 0, 0] = focal_length[:, 0] + camera_matrix[:, 1, 1] = focal_length[:, 1] + return R, tvec, camera_matrix + + +def _pulsar_from_opencv_projection( + R: torch.Tensor, + tvec: torch.Tensor, + camera_matrix: torch.Tensor, + image_size: torch.Tensor, + znear: float = 0.1, +) -> torch.Tensor: + assert len(camera_matrix.size()) == 3, "This function requires batched inputs!" + assert len(R.size()) == 3, "This function requires batched inputs!" + assert len(tvec.size()) in (2, 3), "This function reuqires batched inputs!" + + # Validate parameters. + image_size_wh = image_size.to(R).flip(dims=(1,)) + assert torch.all( + image_size_wh > 0 + ), "height and width must be positive but min is: %s" % ( + str(image_size_wh.min().item()) + ) + assert ( + camera_matrix.size(1) == 3 and camera_matrix.size(2) == 3 + ), "Incorrect camera matrix shape: expected 3x3 but got %dx%d" % ( + camera_matrix.size(1), + camera_matrix.size(2), + ) + assert ( + R.size(1) == 3 and R.size(2) == 3 + ), "Incorrect R shape: expected 3x3 but got %dx%d" % ( + R.size(1), + R.size(2), + ) + if len(tvec.size()) == 2: + tvec = tvec.unsqueeze(2) + assert ( + tvec.size(1) == 3 and tvec.size(2) == 1 + ), "Incorrect tvec shape: expected 3x1 but got %dx%d" % ( + tvec.size(1), + tvec.size(2), + ) + # Check batch size. + batch_size = camera_matrix.size(0) + assert R.size(0) == batch_size, "Expected R to have batch size %d. Has size %d." % ( + batch_size, + R.size(0), + ) + assert ( + tvec.size(0) == batch_size + ), "Expected tvec to have batch size %d. Has size %d." % ( + batch_size, + tvec.size(0), + ) + # Check image sizes. + image_w = image_size_wh[0, 0] + image_h = image_size_wh[0, 1] + assert torch.all( + image_size_wh[:, 0] == image_w + ), "All images in a batch must have the same width!" + assert torch.all( + image_size_wh[:, 1] == image_h + ), "All images in a batch must have the same height!" + # Focal length. + fx = camera_matrix[:, 0, 0].unsqueeze(1) + fy = camera_matrix[:, 1, 1].unsqueeze(1) + # Check that we introduce less than 1% error by averaging the focal lengths. + fx_y = fx / fy + if torch.any(fx_y > 1.01) or torch.any(fx_y < 0.99): + LOGGER.warning( + "Pulsar only supports a single focal lengths. For converting OpenCV " + "focal lengths, we average them for x and y directions. " + "The focal lengths for x and y you provided differ by more than 1%, " + "which means this could introduce a noticeable error." + ) + f = (fx + fy) / 2 + # Normalize f into normalized device coordinates. + focal_length_px = f / image_w + # Transfer into focal_length and sensor_width. + focal_length = torch.tensor([znear - 1e-5], dtype=torch.float32, device=R.device) + focal_length = focal_length[None, :].repeat(batch_size, 1) + sensor_width = focal_length / focal_length_px + # Principal point. + cx = camera_matrix[:, 0, 2].unsqueeze(1) + cy = camera_matrix[:, 1, 2].unsqueeze(1) + # Transfer principal point offset into centered offset. + cx = -(cx - image_w / 2) + cy = cy - image_h / 2 + # Concatenate to final vector. + param = torch.cat([focal_length, sensor_width, cx, cy], dim=1) + R_trans = R.permute(0, 2, 1) + cam_pos = -torch.bmm(R_trans, tvec).squeeze(2) + cam_rot = matrix_to_rotation_6d(R_trans) + cam_params = torch.cat([cam_pos, cam_rot, param], dim=1) + return cam_params + + +def _pulsar_from_cameras_projection( + cameras: PerspectiveCameras, + image_size: torch.Tensor, +) -> torch.Tensor: + opencv_R, opencv_T, opencv_K = _opencv_from_cameras_projection(cameras, image_size) + return _pulsar_from_opencv_projection(opencv_R, opencv_T, opencv_K, image_size) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/camera_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/camera_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e56563156098ffeacd74ad692069a46838006f37 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/camera_utils.py @@ -0,0 +1,209 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Sequence, Tuple + +import torch +from pytorch3d.transforms import Transform3d + +from .cameras import CamerasBase + + +def camera_to_eye_at_up( + world_to_view_transform: Transform3d, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Given a world to view transform, return the eye, at and up vectors which + represent its position. + + For example, if cam is a camera object, then after running + + .. code-block:: + + from cameras import look_at_view_transform + eye, at, up = camera_to_eye_at_up(cam.get_world_to_view_transform()) + R, T = look_at_view_transform(eye=eye, at=at, up=up) + + any other camera created from R and T will have the same world to view + transform as cam. + + Also, given a camera position R and T, then after running: + + .. code-block:: + + from cameras import get_world_to_view_transform, look_at_view_transform + eye, at, up = camera_to_eye_at_up(get_world_to_view_transform(R=R, T=T)) + R2, T2 = look_at_view_transform(eye=eye, at=at, up=up) + + R2 will equal R and T2 will equal T. + + Args: + world_to_view_transform: Transform3d representing the extrinsic + transformation of N cameras. + + Returns: + eye: FloatTensor of shape [N, 3] representing the camera centers in world space. + at: FloatTensor of shape [N, 3] representing points in world space directly in + front of the cameras e.g. the positions of objects to be viewed by the + cameras. + up: FloatTensor of shape [N, 3] representing vectors in world space which + when projected on to the camera plane point upwards. + """ + cam_trans = world_to_view_transform.inverse() + # In the PyTorch3D right handed coordinate system, the camera in view space + # is always at the origin looking along the +z axis. + + # The up vector is not a position so cannot be transformed with + # transform_points. However the position eye+up above the camera + # (whose position vector in the camera coordinate frame is an up vector) + # can be transformed with transform_points. + eye_at_up_view = torch.tensor( + [[0, 0, 0], [0, 0, 1], [0, 1, 0]], dtype=torch.float32, device=cam_trans.device + ) + eye_at_up_world = cam_trans.transform_points(eye_at_up_view).reshape(-1, 3, 3) + + eye, at, up_plus_eye = eye_at_up_world.unbind(1) + up = up_plus_eye - eye + return eye, at, up + + +def rotate_on_spot( + R: torch.Tensor, T: torch.Tensor, rotation: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Given a camera position as R and T (batched or not), + and a rotation matrix (batched or not) + return a new R and T representing camera position(s) + in the same location but rotated on the spot by the + given rotation. In particular the new world to view + rotation will be the previous one followed by the inverse + of the given rotation. + + For example, adding the following lines before constructing a camera + will make the camera point a little to the right of where it + otherwise would have been. + + .. code-block:: + + from math import radians + from pytorch3d.transforms import axis_angle_to_matrix + angles = [0, radians(10), 0] + rotation = axis_angle_to_matrix(torch.FloatTensor(angles)) + R, T = rotate_on_spot(R, T, rotation) + + Note here that if you have a column vector, then when you + premultiply it by this `rotation` (see the rotation_conversions doc), + then it will be rotated anticlockwise if facing the -y axis. + In our context, where we postmultiply row vectors to transform them, + `rotation` will rotate the camera clockwise around the -y axis + (i.e. when looking down), which is a turn to the right. + + If angles was [radians(10), 0, 0], the camera would get pointed + up a bit instead. + + If angles was [0, 0, radians(10)], the camera would be rotated anticlockwise + a bit, so the image would appear rotated clockwise from how it + otherwise would have been. + + If you want to translate the camera from the origin in camera + coordinates, this is simple and does not need a separate function. + In particular, a translation by X = [a, b, c] would cause + the camera to move a units left, b units up, and c units + forward. This is achieved by using T-X in place of T. + + Args: + R: FloatTensor of shape [3, 3] or [N, 3, 3] + T: FloatTensor of shape [3] or [N, 3] + rotation: FloatTensor of shape [3, 3] or [n, 3, 3] + where if neither n nor N is 1, then n and N must be equal. + + Returns: + R: FloatTensor of shape [max(N, n), 3, 3] + T: FloatTensor of shape [max(N, n), 3] + """ + if R.ndim == 2: + R = R[None] + if T.ndim == 1: + T = T[None] + if rotation.ndim == 2: + rotation = rotation[None] + + if R.ndim != 3 or R.shape[1:] != (3, 3): + raise ValueError("Invalid R") + if T.ndim != 2 or T.shape[1] != 3: + raise ValueError("Invalid T") + if rotation.ndim != 3 or rotation.shape[1:] != (3, 3): + raise ValueError("Invalid rotation") + + new_R = R @ rotation.transpose(1, 2) + old_RT = torch.bmm(R, T[:, :, None]) + new_T = torch.matmul(new_R.transpose(1, 2), old_RT)[:, :, 0] + + return new_R, new_T + + +def join_cameras_as_batch(cameras_list: Sequence[CamerasBase]) -> CamerasBase: + """ + Create a batched cameras object by concatenating a list of input + cameras objects. All the tensor attributes will be joined along + the batch dimension. + + Args: + cameras_list: List of camera classes all of the same type and + on the same device. Each represents one or more cameras. + Returns: + cameras: single batched cameras object of the same + type as all the objects in the input list. + """ + # Get the type and fields to join from the first camera in the batch + c0 = cameras_list[0] + fields = c0._FIELDS + shared_fields = c0._SHARED_FIELDS + + if not all(isinstance(c, CamerasBase) for c in cameras_list): + raise ValueError("cameras in cameras_list must inherit from CamerasBase") + + if not all(type(c) is type(c0) for c in cameras_list[1:]): + raise ValueError("All cameras must be of the same type") + + if not all(c.device == c0.device for c in cameras_list[1:]): + raise ValueError("All cameras in the batch must be on the same device") + + # Concat the fields to make a batched tensor + kwargs = {} + kwargs["device"] = c0.device + + for field in fields: + field_not_none = [(getattr(c, field) is not None) for c in cameras_list] + if not any(field_not_none): + continue + if not all(field_not_none): + raise ValueError(f"Attribute {field} is inconsistently present") + + attrs_list = [getattr(c, field) for c in cameras_list] + + if field in shared_fields: + # Only needs to be set once + if not all(a == attrs_list[0] for a in attrs_list): + raise ValueError(f"Attribute {field} is not constant across inputs") + + # e.g. "in_ndc" is set as attribute "_in_ndc" on the class + # but provided as "in_ndc" in the input args + if field.startswith("_"): + field = field[1:] + + kwargs[field] = attrs_list[0] + elif isinstance(attrs_list[0], torch.Tensor): + # In the init, all inputs will be converted to + # batched tensors before set as attributes + # Join as a tensor along the batch dimension + kwargs[field] = torch.cat(attrs_list, dim=0) + else: + raise ValueError(f"Field {field} type is not supported for batching") + + return c0.__class__(**kwargs) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/cameras.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/cameras.py new file mode 100644 index 0000000000000000000000000000000000000000..7ac72033bc61a5199792e228d25f75fd5fc50895 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/cameras.py @@ -0,0 +1,1874 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import math +import warnings +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from pytorch3d.common.datatypes import Device +from pytorch3d.transforms import Rotate, Transform3d, Translate + +from .utils import convert_to_tensors_and_broadcast, TensorProperties + + +# Default values for rotation and translation matrices. +_R = torch.eye(3)[None] # (1, 3, 3) +_T = torch.zeros(1, 3) # (1, 3) + +# An input which is a float per batch element +_BatchFloatType = Union[float, Sequence[float], torch.Tensor] + +# one or two floats per batch element +_FocalLengthType = Union[ + float, Sequence[Tuple[float]], Sequence[Tuple[float, float]], torch.Tensor +] + + +class CamerasBase(TensorProperties): + """ + `CamerasBase` implements a base class for all cameras. + + For cameras, there are four different coordinate systems (or spaces) + - World coordinate system: This is the system the object lives - the world. + - Camera view coordinate system: This is the system that has its origin on + the camera and the Z-axis perpendicular to the image plane. + In PyTorch3D, we assume that +X points left, and +Y points up and + +Z points out from the image plane. + The transformation from world --> view happens after applying a rotation (R) + and translation (T) + - NDC coordinate system: This is the normalized coordinate system that confines + points in a volume the rendered part of the object or scene, also known as + view volume. For square images, given the PyTorch3D convention, (+1, +1, znear) + is the top left near corner, and (-1, -1, zfar) is the bottom right far + corner of the volume. + The transformation from view --> NDC happens after applying the camera + projection matrix (P) if defined in NDC space. + For non square images, we scale the points such that smallest side + has range [-1, 1] and the largest side has range [-u, u], with u > 1. + - Screen coordinate system: This is another representation of the view volume with + the XY coordinates defined in image space instead of a normalized space. + + An illustration of the coordinate systems can be found in pytorch3d/docs/notes/cameras.md. + + CameraBase defines methods that are common to all camera models: + - `get_camera_center` that returns the optical center of the camera in + world coordinates + - `get_world_to_view_transform` which returns a 3D transform from + world coordinates to the camera view coordinates (R, T) + - `get_full_projection_transform` which composes the projection + transform (P) with the world-to-view transform (R, T) + - `transform_points` which takes a set of input points in world coordinates and + projects to the space the camera is defined in (NDC or screen) + - `get_ndc_camera_transform` which defines the transform from screen/NDC to + PyTorch3D's NDC space + - `transform_points_ndc` which takes a set of points in world coordinates and + projects them to PyTorch3D's NDC space + - `transform_points_screen` which takes a set of points in world coordinates and + projects them to screen space + + For each new camera, one should implement the `get_projection_transform` + routine that returns the mapping from camera view coordinates to camera + coordinates (NDC or screen). + + Another useful function that is specific to each camera model is + `unproject_points` which sends points from camera coordinates (NDC or screen) + back to camera view or world coordinates depending on the `world_coordinates` + boolean argument of the function. + """ + + # Used in __getitem__ to index the relevant fields + # When creating a new camera, this should be set in the __init__ + _FIELDS: Tuple[str, ...] = () + + # Names of fields which are a constant property of the whole batch, rather + # than themselves a batch of data. + # When joining objects into a batch, they will have to agree. + _SHARED_FIELDS: Tuple[str, ...] = () + + def get_projection_transform(self, **kwargs): + """ + Calculate the projective transformation matrix. + + Args: + **kwargs: parameters for the projection can be passed in as keyword + arguments to override the default values set in `__init__`. + + Return: + a `Transform3d` object which represents a batch of projection + matrices of shape (N, 3, 3) + """ + raise NotImplementedError() + + def unproject_points(self, xy_depth: torch.Tensor, **kwargs): + """ + Transform input points from camera coordinates (NDC or screen) + to the world / camera coordinates. + + Each of the input points `xy_depth` of shape (..., 3) is + a concatenation of the x, y location and its depth. + + For instance, for an input 2D tensor of shape `(num_points, 3)` + `xy_depth` takes the following form: + `xy_depth[i] = [x[i], y[i], depth[i]]`, + for a each point at an index `i`. + + The following example demonstrates the relationship between + `transform_points` and `unproject_points`: + + .. code-block:: python + + cameras = # camera object derived from CamerasBase + xyz = # 3D points of shape (batch_size, num_points, 3) + # transform xyz to the camera view coordinates + xyz_cam = cameras.get_world_to_view_transform().transform_points(xyz) + # extract the depth of each point as the 3rd coord of xyz_cam + depth = xyz_cam[:, :, 2:] + # project the points xyz to the camera + xy = cameras.transform_points(xyz)[:, :, :2] + # append depth to xy + xy_depth = torch.cat((xy, depth), dim=2) + # unproject to the world coordinates + xyz_unproj_world = cameras.unproject_points(xy_depth, world_coordinates=True) + print(torch.allclose(xyz, xyz_unproj_world)) # True + # unproject to the camera coordinates + xyz_unproj = cameras.unproject_points(xy_depth, world_coordinates=False) + print(torch.allclose(xyz_cam, xyz_unproj)) # True + + Args: + xy_depth: torch tensor of shape (..., 3). + world_coordinates: If `True`, unprojects the points back to world + coordinates using the camera extrinsics `R` and `T`. + `False` ignores `R` and `T` and unprojects to + the camera view coordinates. + from_ndc: If `False` (default), assumes xy part of input is in + NDC space if self.in_ndc(), otherwise in screen space. If + `True`, assumes xy is in NDC space even if the camera + is defined in screen space. + + Returns + new_points: unprojected points with the same shape as `xy_depth`. + """ + raise NotImplementedError() + + def get_camera_center(self, **kwargs) -> torch.Tensor: + """ + Return the 3D location of the camera optical center + in the world coordinates. + + Args: + **kwargs: parameters for the camera extrinsics can be passed in + as keyword arguments to override the default values + set in __init__. + + Setting R or T here will update the values set in init as these + values may be needed later on in the rendering pipeline e.g. for + lighting calculations. + + Returns: + C: a batch of 3D locations of shape (N, 3) denoting + the locations of the center of each camera in the batch. + """ + w2v_trans = self.get_world_to_view_transform(**kwargs) + P = w2v_trans.inverse().get_matrix() + # the camera center is the translation component (the first 3 elements + # of the last row) of the inverted world-to-view + # transform (4x4 RT matrix) + C = P[:, 3, :3] + return C + + def get_world_to_view_transform(self, **kwargs) -> Transform3d: + """ + Return the world-to-view transform. + + Args: + **kwargs: parameters for the camera extrinsics can be passed in + as keyword arguments to override the default values + set in __init__. + + Setting R and T here will update the values set in init as these + values may be needed later on in the rendering pipeline e.g. for + lighting calculations. + + Returns: + A Transform3d object which represents a batch of transforms + of shape (N, 3, 3) + """ + R: torch.Tensor = kwargs.get("R", self.R) + T: torch.Tensor = kwargs.get("T", self.T) + self.R = R + self.T = T + world_to_view_transform = get_world_to_view_transform(R=R, T=T) + return world_to_view_transform + + def get_full_projection_transform(self, **kwargs) -> Transform3d: + """ + Return the full world-to-camera transform composing the + world-to-view and view-to-camera transforms. + If camera is defined in NDC space, the projected points are in NDC space. + If camera is defined in screen space, the projected points are in screen space. + + Args: + **kwargs: parameters for the projection transforms can be passed in + as keyword arguments to override the default values + set in __init__. + + Setting R and T here will update the values set in init as these + values may be needed later on in the rendering pipeline e.g. for + lighting calculations. + + Returns: + a Transform3d object which represents a batch of transforms + of shape (N, 3, 3) + """ + self.R: torch.Tensor = kwargs.get("R", self.R) + self.T: torch.Tensor = kwargs.get("T", self.T) + world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T) + view_to_proj_transform = self.get_projection_transform(**kwargs) + return world_to_view_transform.compose(view_to_proj_transform) + + def transform_points( + self, points, eps: Optional[float] = None, **kwargs + ) -> torch.Tensor: + """ + Transform input points from world to camera space. + If camera is defined in NDC space, the projected points are in NDC space. + If camera is defined in screen space, the projected points are in screen space. + + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the camera plane. + + Args: + points: torch tensor of shape (..., 3). + eps: If eps!=None, the argument is used to clamp the + divisor in the homogeneous normalization of the points + transformed to the ndc space. Please see + `transforms.Transform3d.transform_points` for details. + + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the + camera plane. + + Returns + new_points: transformed points with the same shape as the input. + """ + world_to_proj_transform = self.get_full_projection_transform(**kwargs) + return world_to_proj_transform.transform_points(points, eps=eps) + + def get_ndc_camera_transform(self, **kwargs) -> Transform3d: + """ + Returns the transform from camera projection space (screen or NDC) to NDC space. + For cameras that can be specified in screen space, this transform + allows points to be converted from screen to NDC space. + The default transform scales the points from [0, W]x[0, H] + to [-1, 1]x[-u, u] or [-u, u]x[-1, 1] where u > 1 is the aspect ratio of the image. + This function should be modified per camera definitions if need be, + e.g. for Perspective/Orthographic cameras we provide a custom implementation. + This transform assumes PyTorch3D coordinate system conventions for + both the NDC space and the input points. + + This transform interfaces with the PyTorch3D renderer which assumes + input points to the renderer to be in NDC space. + """ + if self.in_ndc(): + return Transform3d(device=self.device, dtype=torch.float32) + else: + # For custom cameras which can be defined in screen space, + # users might might have to implement the screen to NDC transform based + # on the definition of the camera parameters. + # See PerspectiveCameras/OrthographicCameras for an example. + # We don't flip xy because we assume that world points are in + # PyTorch3D coordinates, and thus conversion from screen to ndc + # is a mere scaling from image to [-1, 1] scale. + image_size = kwargs.get("image_size", self.get_image_size()) + return get_screen_to_ndc_transform( + self, with_xyflip=False, image_size=image_size + ) + + def transform_points_ndc( + self, points, eps: Optional[float] = None, **kwargs + ) -> torch.Tensor: + """ + Transforms points from PyTorch3D world/camera space to NDC space. + Input points follow the PyTorch3D coordinate system conventions: +X left, +Y up. + Output points are in NDC space: +X left, +Y up, origin at image center. + + Args: + points: torch tensor of shape (..., 3). + eps: If eps!=None, the argument is used to clamp the + divisor in the homogeneous normalization of the points + transformed to the ndc space. Please see + `transforms.Transform3d.transform_points` for details. + + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the + camera plane. + + Returns + new_points: transformed points with the same shape as the input. + """ + world_to_ndc_transform = self.get_full_projection_transform(**kwargs) + if not self.in_ndc(): + to_ndc_transform = self.get_ndc_camera_transform(**kwargs) + world_to_ndc_transform = world_to_ndc_transform.compose(to_ndc_transform) + + return world_to_ndc_transform.transform_points(points, eps=eps) + + def transform_points_screen( + self, points, eps: Optional[float] = None, with_xyflip: bool = True, **kwargs + ) -> torch.Tensor: + """ + Transforms points from PyTorch3D world/camera space to screen space. + Input points follow the PyTorch3D coordinate system conventions: +X left, +Y up. + Output points are in screen space: +X right, +Y down, origin at top left corner. + + Args: + points: torch tensor of shape (..., 3). + eps: If eps!=None, the argument is used to clamp the + divisor in the homogeneous normalization of the points + transformed to the ndc space. Please see + `transforms.Transform3d.transform_points` for details. + + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the + camera plane. + with_xyflip: If True, flip x and y directions. In world/camera/ndc coords, + +x points to the left and +y up. If with_xyflip is true, in screen + coords +x points right, and +y down, following the usual RGB image + convention. Warning: do not set to False unless you know what you're + doing! + + Returns + new_points: transformed points with the same shape as the input. + """ + points_ndc = self.transform_points_ndc(points, eps=eps, **kwargs) + image_size = kwargs.get("image_size", self.get_image_size()) + return get_ndc_to_screen_transform( + self, with_xyflip=with_xyflip, image_size=image_size + ).transform_points(points_ndc, eps=eps) + + def clone(self): + """ + Returns a copy of `self`. + """ + cam_type = type(self) + other = cam_type(device=self.device) + return super().clone(other) + + def is_perspective(self): + raise NotImplementedError() + + def in_ndc(self): + """ + Specifies whether the camera is defined in NDC space + or in screen (image) space + """ + raise NotImplementedError() + + def get_znear(self): + return getattr(self, "znear", None) + + def get_image_size(self): + """ + Returns the image size, if provided, expected in the form of (height, width) + The image size is used for conversion of projected points to screen coordinates. + """ + return getattr(self, "image_size", None) + + def __getitem__( + self, index: Union[int, List[int], torch.BoolTensor, torch.LongTensor] + ) -> "CamerasBase": + """ + Override for the __getitem__ method in TensorProperties which needs to be + refactored. + + Args: + index: an integer index, list/tensor of integer indices, or tensor of boolean + indicators used to filter all the fields in the cameras given by self._FIELDS. + Returns: + an instance of the current cameras class with only the values at the selected index. + """ + + kwargs = {} + + tensor_types = { + # pyre-fixme[16]: Module `cuda` has no attribute `BoolTensor`. + "bool": (torch.BoolTensor, torch.cuda.BoolTensor), + # pyre-fixme[16]: Module `cuda` has no attribute `LongTensor`. + "long": (torch.LongTensor, torch.cuda.LongTensor), + } + if not isinstance( + index, (int, list, *tensor_types["bool"], *tensor_types["long"]) + ) or ( + isinstance(index, list) + and not all(isinstance(i, int) and not isinstance(i, bool) for i in index) + ): + msg = ( + "Invalid index type, expected int, List[int] or Bool/LongTensor; got %r" + ) + raise ValueError(msg % type(index)) + + if isinstance(index, int): + index = [index] + + if isinstance(index, tensor_types["bool"]): + # pyre-fixme[16]: Item `List` of `Union[List[int], BoolTensor, + # LongTensor]` has no attribute `ndim`. + # pyre-fixme[16]: Item `List` of `Union[List[int], BoolTensor, + # LongTensor]` has no attribute `shape`. + if index.ndim != 1 or index.shape[0] != len(self): + raise ValueError( + # pyre-fixme[16]: Item `List` of `Union[List[int], BoolTensor, + # LongTensor]` has no attribute `shape`. + f"Boolean index of shape {index.shape} does not match cameras" + ) + elif max(index) >= len(self): + raise IndexError(f"Index {max(index)} is out of bounds for select cameras") + + for field in self._FIELDS: + val = getattr(self, field, None) + if val is None: + continue + + # e.g. "in_ndc" is set as attribute "_in_ndc" on the class + # but provided as "in_ndc" on initialization + if field.startswith("_"): + field = field[1:] + + if isinstance(val, (str, bool)): + kwargs[field] = val + elif isinstance(val, torch.Tensor): + # In the init, all inputs will be converted to + # tensors before setting as attributes + kwargs[field] = val[index] + else: + raise ValueError(f"Field {field} type is not supported for indexing") + + kwargs["device"] = self.device + return self.__class__(**kwargs) + + +############################################################ +# Field of View Camera Classes # +############################################################ + + +def OpenGLPerspectiveCameras( + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + aspect_ratio: _BatchFloatType = 1.0, + fov: _BatchFloatType = 60.0, + degrees: bool = True, + R: torch.Tensor = _R, + T: torch.Tensor = _T, + device: Device = "cpu", +) -> "FoVPerspectiveCameras": + """ + OpenGLPerspectiveCameras has been DEPRECATED. Use FoVPerspectiveCameras instead. + Preserving OpenGLPerspectiveCameras for backward compatibility. + """ + + warnings.warn( + """OpenGLPerspectiveCameras is deprecated, + Use FoVPerspectiveCameras instead. + OpenGLPerspectiveCameras will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return FoVPerspectiveCameras( + znear=znear, + zfar=zfar, + aspect_ratio=aspect_ratio, + fov=fov, + degrees=degrees, + R=R, + T=T, + device=device, + ) + + +class FoVPerspectiveCameras(CamerasBase): + """ + A class which stores a batch of parameters to generate a batch of + projection matrices by specifying the field of view. + The definitions of the parameters follow the OpenGL perspective camera. + + The extrinsics of the camera (R and T matrices) can also be set in the + initializer or passed in to `get_full_projection_transform` to get + the full transformation from world -> ndc. + + The `transform_points` method calculates the full world -> ndc transform + and then applies it to the input points. + + The transforms can also be returned separately as Transform3d objects. + + * Setting the Aspect Ratio for Non Square Images * + + If the desired output image size is non square (i.e. a tuple of (H, W) where H != W) + the aspect ratio needs special consideration: There are two aspect ratios + to be aware of: + - the aspect ratio of each pixel + - the aspect ratio of the output image + The `aspect_ratio` setting in the FoVPerspectiveCameras sets the + pixel aspect ratio. When using this camera with the differentiable rasterizer + be aware that in the rasterizer we assume square pixels, but allow + variable image aspect ratio (i.e rectangle images). + + In most cases you will want to set the camera `aspect_ratio=1.0` + (i.e. square pixels) and only vary the output image dimensions in pixels + for rasterization. + """ + + # For __getitem__ + _FIELDS = ( + "K", + "znear", + "zfar", + "aspect_ratio", + "fov", + "R", + "T", + "degrees", + ) + + _SHARED_FIELDS = ("degrees",) + + def __init__( + self, + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + aspect_ratio: _BatchFloatType = 1.0, + fov: _BatchFloatType = 60.0, + degrees: bool = True, + R: torch.Tensor = _R, + T: torch.Tensor = _T, + K: Optional[torch.Tensor] = None, + device: Device = "cpu", + ) -> None: + """ + + Args: + znear: near clipping plane of the view frustrum. + zfar: far clipping plane of the view frustrum. + aspect_ratio: aspect ratio of the image pixels. + 1.0 indicates square pixels. + fov: field of view angle of the camera. + degrees: bool, set to True if fov is specified in degrees. + R: Rotation matrix of shape (N, 3, 3) + T: Translation matrix of shape (N, 3) + K: (optional) A calibration matrix of shape (N, 4, 4) + If provided, don't need znear, zfar, fov, aspect_ratio, degrees + device: Device (as str or torch.device) + """ + # The initializer formats all inputs to torch tensors and broadcasts + # all the inputs to have the same batch dimension where necessary. + super().__init__( + device=device, + znear=znear, + zfar=zfar, + aspect_ratio=aspect_ratio, + fov=fov, + R=R, + T=T, + K=K, + ) + + # No need to convert to tensor or broadcast. + self.degrees = degrees + + def compute_projection_matrix( + self, znear, zfar, fov, aspect_ratio, degrees: bool + ) -> torch.Tensor: + """ + Compute the calibration matrix K of shape (N, 4, 4) + + Args: + znear: near clipping plane of the view frustrum. + zfar: far clipping plane of the view frustrum. + fov: field of view angle of the camera. + aspect_ratio: aspect ratio of the image pixels. + 1.0 indicates square pixels. + degrees: bool, set to True if fov is specified in degrees. + + Returns: + torch.FloatTensor of the calibration matrix with shape (N, 4, 4) + """ + K = torch.zeros((self._N, 4, 4), device=self.device, dtype=torch.float32) + ones = torch.ones((self._N), dtype=torch.float32, device=self.device) + if degrees: + fov = (np.pi / 180) * fov + + if not torch.is_tensor(fov): + fov = torch.tensor(fov, device=self.device) + tanHalfFov = torch.tan((fov / 2)) + max_y = tanHalfFov * znear + min_y = -max_y + max_x = max_y * aspect_ratio + min_x = -max_x + + # NOTE: In OpenGL the projection matrix changes the handedness of the + # coordinate frame. i.e the NDC space positive z direction is the + # camera space negative z direction. This is because the sign of the z + # in the projection matrix is set to -1.0. + # In pytorch3d we maintain a right handed coordinate system throughout + # so the so the z sign is 1.0. + z_sign = 1.0 + + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + K[:, 0, 0] = 2.0 * znear / (max_x - min_x) + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + K[:, 1, 1] = 2.0 * znear / (max_y - min_y) + K[:, 0, 2] = (max_x + min_x) / (max_x - min_x) + K[:, 1, 2] = (max_y + min_y) / (max_y - min_y) + K[:, 3, 2] = z_sign * ones + + # NOTE: This maps the z coordinate from [0, 1] where z = 0 if the point + # is at the near clipping plane and z = 1 when the point is at the far + # clipping plane. + K[:, 2, 2] = z_sign * zfar / (zfar - znear) + K[:, 2, 3] = -(zfar * znear) / (zfar - znear) + + return K + + def get_projection_transform(self, **kwargs) -> Transform3d: + """ + Calculate the perspective projection matrix with a symmetric + viewing frustrum. Use column major order. + The viewing frustrum will be projected into ndc, s.t. + (max_x, max_y) -> (+1, +1) + (min_x, min_y) -> (-1, -1) + + Args: + **kwargs: parameters for the projection can be passed in as keyword + arguments to override the default values set in `__init__`. + + Return: + a Transform3d object which represents a batch of projection + matrices of shape (N, 4, 4) + + .. code-block:: python + + h1 = (max_y + min_y)/(max_y - min_y) + w1 = (max_x + min_x)/(max_x - min_x) + tanhalffov = tan((fov/2)) + s1 = 1/tanhalffov + s2 = 1/(tanhalffov * (aspect_ratio)) + + # To map z to the range [0, 1] use: + f1 = far / (far - near) + f2 = -(far * near) / (far - near) + + # Projection matrix + K = [ + [s1, 0, w1, 0], + [0, s2, h1, 0], + [0, 0, f1, f2], + [0, 0, 1, 0], + ] + """ + K = kwargs.get("K", self.K) + if K is not None: + if K.shape != (self._N, 4, 4): + msg = "Expected K to have shape of (%r, 4, 4)" + raise ValueError(msg % (self._N)) + else: + K = self.compute_projection_matrix( + kwargs.get("znear", self.znear), + kwargs.get("zfar", self.zfar), + kwargs.get("fov", self.fov), + kwargs.get("aspect_ratio", self.aspect_ratio), + kwargs.get("degrees", self.degrees), + ) + + # Transpose the projection matrix as PyTorch3D transforms use row vectors. + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=self.device + ) + return transform + + def unproject_points( + self, + xy_depth: torch.Tensor, + world_coordinates: bool = True, + scaled_depth_input: bool = False, + **kwargs, + ) -> torch.Tensor: + """>! + FoV cameras further allow for passing depth in world units + (`scaled_depth_input=False`) or in the [0, 1]-normalized units + (`scaled_depth_input=True`) + + Args: + scaled_depth_input: If `True`, assumes the input depth is in + the [0, 1]-normalized units. If `False` the input depth is in + the world units. + """ + + # obtain the relevant transformation to ndc + if world_coordinates: + to_ndc_transform = self.get_full_projection_transform() + else: + to_ndc_transform = self.get_projection_transform() + + if scaled_depth_input: + # the input is scaled depth, so we don't have to do anything + xy_sdepth = xy_depth + else: + # parse out important values from the projection matrix + K_matrix = self.get_projection_transform(**kwargs.copy()).get_matrix() + # parse out f1, f2 from K_matrix + unsqueeze_shape = [1] * xy_depth.dim() + unsqueeze_shape[0] = K_matrix.shape[0] + f1 = K_matrix[:, 2, 2].reshape(unsqueeze_shape) + f2 = K_matrix[:, 3, 2].reshape(unsqueeze_shape) + # get the scaled depth + sdepth = (f1 * xy_depth[..., 2:3] + f2) / xy_depth[..., 2:3] + # concatenate xy + scaled depth + xy_sdepth = torch.cat((xy_depth[..., 0:2], sdepth), dim=-1) + + # unproject with inverse of the projection + unprojection_transform = to_ndc_transform.inverse() + return unprojection_transform.transform_points(xy_sdepth) + + def is_perspective(self): + return True + + def in_ndc(self): + return True + + +def OpenGLOrthographicCameras( + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + top: _BatchFloatType = 1.0, + bottom: _BatchFloatType = -1.0, + left: _BatchFloatType = -1.0, + right: _BatchFloatType = 1.0, + scale_xyz=((1.0, 1.0, 1.0),), # (1, 3) + R: torch.Tensor = _R, + T: torch.Tensor = _T, + device: Device = "cpu", +) -> "FoVOrthographicCameras": + """ + OpenGLOrthographicCameras has been DEPRECATED. Use FoVOrthographicCameras instead. + Preserving OpenGLOrthographicCameras for backward compatibility. + """ + + warnings.warn( + """OpenGLOrthographicCameras is deprecated, + Use FoVOrthographicCameras instead. + OpenGLOrthographicCameras will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return FoVOrthographicCameras( + znear=znear, + zfar=zfar, + max_y=top, + min_y=bottom, + max_x=right, + min_x=left, + scale_xyz=scale_xyz, + R=R, + T=T, + device=device, + ) + + +class FoVOrthographicCameras(CamerasBase): + """ + A class which stores a batch of parameters to generate a batch of + projection matrices by specifying the field of view. + The definitions of the parameters follow the OpenGL orthographic camera. + """ + + # For __getitem__ + _FIELDS = ( + "K", + "znear", + "zfar", + "R", + "T", + "max_y", + "min_y", + "max_x", + "min_x", + "scale_xyz", + ) + + def __init__( + self, + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + max_y: _BatchFloatType = 1.0, + min_y: _BatchFloatType = -1.0, + max_x: _BatchFloatType = 1.0, + min_x: _BatchFloatType = -1.0, + scale_xyz=((1.0, 1.0, 1.0),), # (1, 3) + R: torch.Tensor = _R, + T: torch.Tensor = _T, + K: Optional[torch.Tensor] = None, + device: Device = "cpu", + ): + """ + + Args: + znear: near clipping plane of the view frustrum. + zfar: far clipping plane of the view frustrum. + max_y: maximum y coordinate of the frustrum. + min_y: minimum y coordinate of the frustrum. + max_x: maximum x coordinate of the frustrum. + min_x: minimum x coordinate of the frustrum + scale_xyz: scale factors for each axis of shape (N, 3). + R: Rotation matrix of shape (N, 3, 3). + T: Translation of shape (N, 3). + K: (optional) A calibration matrix of shape (N, 4, 4) + If provided, don't need znear, zfar, max_y, min_y, max_x, min_x, scale_xyz + device: torch.device or string. + + Only need to set min_x, max_x, min_y, max_y for viewing frustrums + which are non symmetric about the origin. + """ + # The initializer formats all inputs to torch tensors and broadcasts + # all the inputs to have the same batch dimension where necessary. + super().__init__( + device=device, + znear=znear, + zfar=zfar, + max_y=max_y, + min_y=min_y, + max_x=max_x, + min_x=min_x, + scale_xyz=scale_xyz, + R=R, + T=T, + K=K, + ) + + def compute_projection_matrix( + self, znear, zfar, max_x, min_x, max_y, min_y, scale_xyz + ) -> torch.Tensor: + """ + Compute the calibration matrix K of shape (N, 4, 4) + + Args: + znear: near clipping plane of the view frustrum. + zfar: far clipping plane of the view frustrum. + max_x: maximum x coordinate of the frustrum. + min_x: minimum x coordinate of the frustrum + max_y: maximum y coordinate of the frustrum. + min_y: minimum y coordinate of the frustrum. + scale_xyz: scale factors for each axis of shape (N, 3). + """ + K = torch.zeros((self._N, 4, 4), dtype=torch.float32, device=self.device) + ones = torch.ones((self._N), dtype=torch.float32, device=self.device) + # NOTE: OpenGL flips handedness of coordinate system between camera + # space and NDC space so z sign is -ve. In PyTorch3D we maintain a + # right handed coordinate system throughout. + z_sign = +1.0 + + K[:, 0, 0] = (2.0 / (max_x - min_x)) * scale_xyz[:, 0] + K[:, 1, 1] = (2.0 / (max_y - min_y)) * scale_xyz[:, 1] + K[:, 0, 3] = -(max_x + min_x) / (max_x - min_x) + K[:, 1, 3] = -(max_y + min_y) / (max_y - min_y) + K[:, 3, 3] = ones + + # NOTE: This maps the z coordinate to the range [0, 1] and replaces the + # the OpenGL z normalization to [-1, 1] + K[:, 2, 2] = z_sign * (1.0 / (zfar - znear)) * scale_xyz[:, 2] + K[:, 2, 3] = -znear / (zfar - znear) + + return K + + def get_projection_transform(self, **kwargs) -> Transform3d: + """ + Calculate the orthographic projection matrix. + Use column major order. + + Args: + **kwargs: parameters for the projection can be passed in to + override the default values set in __init__. + Return: + a Transform3d object which represents a batch of projection + matrices of shape (N, 4, 4) + + .. code-block:: python + + scale_x = 2 / (max_x - min_x) + scale_y = 2 / (max_y - min_y) + scale_z = 2 / (far-near) + mid_x = (max_x + min_x) / (max_x - min_x) + mix_y = (max_y + min_y) / (max_y - min_y) + mid_z = (far + near) / (far - near) + + K = [ + [scale_x, 0, 0, -mid_x], + [0, scale_y, 0, -mix_y], + [0, 0, -scale_z, -mid_z], + [0, 0, 0, 1], + ] + """ + K = kwargs.get("K", self.K) + if K is not None: + if K.shape != (self._N, 4, 4): + msg = "Expected K to have shape of (%r, 4, 4)" + raise ValueError(msg % (self._N)) + else: + K = self.compute_projection_matrix( + kwargs.get("znear", self.znear), + kwargs.get("zfar", self.zfar), + kwargs.get("max_x", self.max_x), + kwargs.get("min_x", self.min_x), + kwargs.get("max_y", self.max_y), + kwargs.get("min_y", self.min_y), + kwargs.get("scale_xyz", self.scale_xyz), + ) + + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=self.device + ) + return transform + + def unproject_points( + self, + xy_depth: torch.Tensor, + world_coordinates: bool = True, + scaled_depth_input: bool = False, + **kwargs, + ) -> torch.Tensor: + """>! + FoV cameras further allow for passing depth in world units + (`scaled_depth_input=False`) or in the [0, 1]-normalized units + (`scaled_depth_input=True`) + + Args: + scaled_depth_input: If `True`, assumes the input depth is in + the [0, 1]-normalized units. If `False` the input depth is in + the world units. + """ + + if world_coordinates: + to_ndc_transform = self.get_full_projection_transform(**kwargs.copy()) + else: + to_ndc_transform = self.get_projection_transform(**kwargs.copy()) + + if scaled_depth_input: + # the input depth is already scaled + xy_sdepth = xy_depth + else: + # we have to obtain the scaled depth first + K = self.get_projection_transform(**kwargs).get_matrix() + unsqueeze_shape = [1] * K.dim() + unsqueeze_shape[0] = K.shape[0] + mid_z = K[:, 3, 2].reshape(unsqueeze_shape) + scale_z = K[:, 2, 2].reshape(unsqueeze_shape) + scaled_depth = scale_z * xy_depth[..., 2:3] + mid_z + # cat xy and scaled depth + xy_sdepth = torch.cat((xy_depth[..., :2], scaled_depth), dim=-1) + # finally invert the transform + unprojection_transform = to_ndc_transform.inverse() + return unprojection_transform.transform_points(xy_sdepth) + + def is_perspective(self): + return False + + def in_ndc(self): + return True + + +############################################################ +# MultiView Camera Classes # +############################################################ +""" +Note that the MultiView Cameras accept parameters in NDC space. +""" + + +def SfMPerspectiveCameras( + focal_length: _FocalLengthType = 1.0, + principal_point=((0.0, 0.0),), + R: torch.Tensor = _R, + T: torch.Tensor = _T, + device: Device = "cpu", +) -> "PerspectiveCameras": + """ + SfMPerspectiveCameras has been DEPRECATED. Use PerspectiveCameras instead. + Preserving SfMPerspectiveCameras for backward compatibility. + """ + + warnings.warn( + """SfMPerspectiveCameras is deprecated, + Use PerspectiveCameras instead. + SfMPerspectiveCameras will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return PerspectiveCameras( + focal_length=focal_length, + principal_point=principal_point, + R=R, + T=T, + device=device, + ) + + +class PerspectiveCameras(CamerasBase): + """ + A class which stores a batch of parameters to generate a batch of + transformation matrices using the multi-view geometry convention for + perspective camera. + + Parameters for this camera are specified in NDC if `in_ndc` is set to True. + If parameters are specified in screen space, `in_ndc` must be set to False. + """ + + # For __getitem__ + _FIELDS = ( + "K", + "R", + "T", + "focal_length", + "principal_point", + "_in_ndc", # arg is in_ndc but attribute set as _in_ndc + "image_size", + ) + + _SHARED_FIELDS = ("_in_ndc",) + + def __init__( + self, + focal_length: _FocalLengthType = 1.0, + principal_point=((0.0, 0.0),), + R: torch.Tensor = _R, + T: torch.Tensor = _T, + K: Optional[torch.Tensor] = None, + device: Device = "cpu", + in_ndc: bool = True, + image_size: Optional[Union[List, Tuple, torch.Tensor]] = None, + ) -> None: + """ + + Args: + focal_length: Focal length of the camera in world units. + A tensor of shape (N, 1) or (N, 2) for + square and non-square pixels respectively. + principal_point: xy coordinates of the center of + the principal point of the camera in pixels. + A tensor of shape (N, 2). + in_ndc: True if camera parameters are specified in NDC. + If camera parameters are in screen space, it must + be set to False. + R: Rotation matrix of shape (N, 3, 3) + T: Translation matrix of shape (N, 3) + K: (optional) A calibration matrix of shape (N, 4, 4) + If provided, don't need focal_length, principal_point + image_size: (height, width) of image size. + A tensor of shape (N, 2) or a list/tuple. Required for screen cameras. + device: torch.device or string + """ + # The initializer formats all inputs to torch tensors and broadcasts + # all the inputs to have the same batch dimension where necessary. + kwargs = {"image_size": image_size} if image_size is not None else {} + super().__init__( + device=device, + focal_length=focal_length, + principal_point=principal_point, + R=R, + T=T, + K=K, + _in_ndc=in_ndc, + **kwargs, # pyre-ignore + ) + if image_size is not None: + if (self.image_size < 1).any(): # pyre-ignore + raise ValueError("Image_size provided has invalid values") + else: + self.image_size = None + + # When focal length is provided as one value, expand to + # create (N, 2) shape tensor + if self.focal_length.ndim == 1: # (N,) + self.focal_length = self.focal_length[:, None] # (N, 1) + self.focal_length = self.focal_length.expand(-1, 2) # (N, 2) + + def get_projection_transform(self, **kwargs) -> Transform3d: + """ + Calculate the projection matrix using the + multi-view geometry convention. + + Args: + **kwargs: parameters for the projection can be passed in as keyword + arguments to override the default values set in __init__. + + Returns: + A `Transform3d` object with a batch of `N` projection transforms. + + .. code-block:: python + + fx = focal_length[:, 0] + fy = focal_length[:, 1] + px = principal_point[:, 0] + py = principal_point[:, 1] + + K = [ + [fx, 0, px, 0], + [0, fy, py, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + ] + """ + K = kwargs.get("K", self.K) + if K is not None: + if K.shape != (self._N, 4, 4): + msg = "Expected K to have shape of (%r, 4, 4)" + raise ValueError(msg % (self._N)) + else: + K = _get_sfm_calibration_matrix( + self._N, + self.device, + kwargs.get("focal_length", self.focal_length), + kwargs.get("principal_point", self.principal_point), + orthographic=False, + ) + + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=self.device + ) + return transform + + def unproject_points( + self, + xy_depth: torch.Tensor, + world_coordinates: bool = True, + from_ndc: bool = False, + **kwargs, + ) -> torch.Tensor: + """ + Args: + from_ndc: If `False` (default), assumes xy part of input is in + NDC space if self.in_ndc(), otherwise in screen space. If + `True`, assumes xy is in NDC space even if the camera + is defined in screen space. + """ + if world_coordinates: + to_camera_transform = self.get_full_projection_transform(**kwargs) + else: + to_camera_transform = self.get_projection_transform(**kwargs) + if from_ndc: + to_camera_transform = to_camera_transform.compose( + self.get_ndc_camera_transform() + ) + + unprojection_transform = to_camera_transform.inverse() + xy_inv_depth = torch.cat( + (xy_depth[..., :2], 1.0 / xy_depth[..., 2:3]), dim=-1 # type: ignore + ) + return unprojection_transform.transform_points(xy_inv_depth) + + def get_principal_point(self, **kwargs) -> torch.Tensor: + """ + Return the camera's principal point + + Args: + **kwargs: parameters for the camera extrinsics can be passed in + as keyword arguments to override the default values + set in __init__. + """ + proj_mat = self.get_projection_transform(**kwargs).get_matrix() + return proj_mat[:, 2, :2] + + def get_ndc_camera_transform(self, **kwargs) -> Transform3d: + """ + Returns the transform from camera projection space (screen or NDC) to NDC space. + If the camera is defined already in NDC space, the transform is identity. + For cameras defined in screen space, we adjust the principal point computation + which is defined in the image space (commonly) and scale the points to NDC space. + + This transform leaves the depth unchanged. + + Important: This transforms assumes PyTorch3D conventions for the input points, + i.e. +X left, +Y up. + """ + if self.in_ndc(): + ndc_transform = Transform3d(device=self.device, dtype=torch.float32) + else: + # when cameras are defined in screen/image space, the principal point is + # provided in the (+X right, +Y down), aka image, coordinate system. + # Since input points are defined in the PyTorch3D system (+X left, +Y up), + # we need to adjust for the principal point transform. + pr_point_fix = torch.zeros( + (self._N, 4, 4), device=self.device, dtype=torch.float32 + ) + pr_point_fix[:, 0, 0] = 1.0 + pr_point_fix[:, 1, 1] = 1.0 + pr_point_fix[:, 2, 2] = 1.0 + pr_point_fix[:, 3, 3] = 1.0 + pr_point_fix[:, :2, 3] = -2.0 * self.get_principal_point(**kwargs) + pr_point_fix_transform = Transform3d( + matrix=pr_point_fix.transpose(1, 2).contiguous(), device=self.device + ) + image_size = kwargs.get("image_size", self.get_image_size()) + screen_to_ndc_transform = get_screen_to_ndc_transform( + self, with_xyflip=False, image_size=image_size + ) + ndc_transform = pr_point_fix_transform.compose(screen_to_ndc_transform) + + return ndc_transform + + def is_perspective(self): + return True + + def in_ndc(self): + return self._in_ndc + + +def SfMOrthographicCameras( + focal_length: _FocalLengthType = 1.0, + principal_point=((0.0, 0.0),), + R: torch.Tensor = _R, + T: torch.Tensor = _T, + device: Device = "cpu", +) -> "OrthographicCameras": + """ + SfMOrthographicCameras has been DEPRECATED. Use OrthographicCameras instead. + Preserving SfMOrthographicCameras for backward compatibility. + """ + + warnings.warn( + """SfMOrthographicCameras is deprecated, + Use OrthographicCameras instead. + SfMOrthographicCameras will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return OrthographicCameras( + focal_length=focal_length, + principal_point=principal_point, + R=R, + T=T, + device=device, + ) + + +class OrthographicCameras(CamerasBase): + """ + A class which stores a batch of parameters to generate a batch of + transformation matrices using the multi-view geometry convention for + orthographic camera. + + Parameters for this camera are specified in NDC if `in_ndc` is set to True. + If parameters are specified in screen space, `in_ndc` must be set to False. + """ + + # For __getitem__ + _FIELDS = ( + "K", + "R", + "T", + "focal_length", + "principal_point", + "_in_ndc", + "image_size", + ) + + _SHARED_FIELDS = ("_in_ndc",) + + def __init__( + self, + focal_length: _FocalLengthType = 1.0, + principal_point=((0.0, 0.0),), + R: torch.Tensor = _R, + T: torch.Tensor = _T, + K: Optional[torch.Tensor] = None, + device: Device = "cpu", + in_ndc: bool = True, + image_size: Optional[Union[List, Tuple, torch.Tensor]] = None, + ) -> None: + """ + + Args: + focal_length: Focal length of the camera in world units. + A tensor of shape (N, 1) or (N, 2) for + square and non-square pixels respectively. + principal_point: xy coordinates of the center of + the principal point of the camera in pixels. + A tensor of shape (N, 2). + in_ndc: True if camera parameters are specified in NDC. + If False, then camera parameters are in screen space. + R: Rotation matrix of shape (N, 3, 3) + T: Translation matrix of shape (N, 3) + K: (optional) A calibration matrix of shape (N, 4, 4) + If provided, don't need focal_length, principal_point, image_size + image_size: (height, width) of image size. + A tensor of shape (N, 2) or list/tuple. Required for screen cameras. + device: torch.device or string + """ + # The initializer formats all inputs to torch tensors and broadcasts + # all the inputs to have the same batch dimension where necessary. + kwargs = {"image_size": image_size} if image_size is not None else {} + super().__init__( + device=device, + focal_length=focal_length, + principal_point=principal_point, + R=R, + T=T, + K=K, + _in_ndc=in_ndc, + **kwargs, # pyre-ignore + ) + if image_size is not None: + if (self.image_size < 1).any(): # pyre-ignore + raise ValueError("Image_size provided has invalid values") + else: + self.image_size = None + + # When focal length is provided as one value, expand to + # create (N, 2) shape tensor + if self.focal_length.ndim == 1: # (N,) + self.focal_length = self.focal_length[:, None] # (N, 1) + self.focal_length = self.focal_length.expand(-1, 2) # (N, 2) + + def get_projection_transform(self, **kwargs) -> Transform3d: + """ + Calculate the projection matrix using + the multi-view geometry convention. + + Args: + **kwargs: parameters for the projection can be passed in as keyword + arguments to override the default values set in __init__. + + Returns: + A `Transform3d` object with a batch of `N` projection transforms. + + .. code-block:: python + + fx = focal_length[:,0] + fy = focal_length[:,1] + px = principal_point[:,0] + py = principal_point[:,1] + + K = [ + [fx, 0, 0, px], + [0, fy, 0, py], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + """ + K = kwargs.get("K", self.K) + if K is not None: + if K.shape != (self._N, 4, 4): + msg = "Expected K to have shape of (%r, 4, 4)" + raise ValueError(msg % (self._N)) + else: + K = _get_sfm_calibration_matrix( + self._N, + self.device, + kwargs.get("focal_length", self.focal_length), + kwargs.get("principal_point", self.principal_point), + orthographic=True, + ) + + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=self.device + ) + return transform + + def unproject_points( + self, + xy_depth: torch.Tensor, + world_coordinates: bool = True, + from_ndc: bool = False, + **kwargs, + ) -> torch.Tensor: + """ + Args: + from_ndc: If `False` (default), assumes xy part of input is in + NDC space if self.in_ndc(), otherwise in screen space. If + `True`, assumes xy is in NDC space even if the camera + is defined in screen space. + """ + if world_coordinates: + to_camera_transform = self.get_full_projection_transform(**kwargs) + else: + to_camera_transform = self.get_projection_transform(**kwargs) + if from_ndc: + to_camera_transform = to_camera_transform.compose( + self.get_ndc_camera_transform() + ) + + unprojection_transform = to_camera_transform.inverse() + return unprojection_transform.transform_points(xy_depth) + + def get_principal_point(self, **kwargs) -> torch.Tensor: + """ + Return the camera's principal point + + Args: + **kwargs: parameters for the camera extrinsics can be passed in + as keyword arguments to override the default values + set in __init__. + """ + proj_mat = self.get_projection_transform(**kwargs).get_matrix() + return proj_mat[:, 3, :2] + + def get_ndc_camera_transform(self, **kwargs) -> Transform3d: + """ + Returns the transform from camera projection space (screen or NDC) to NDC space. + If the camera is defined already in NDC space, the transform is identity. + For cameras defined in screen space, we adjust the principal point computation + which is defined in the image space (commonly) and scale the points to NDC space. + + Important: This transforms assumes PyTorch3D conventions for the input points, + i.e. +X left, +Y up. + """ + if self.in_ndc(): + ndc_transform = Transform3d(device=self.device, dtype=torch.float32) + else: + # when cameras are defined in screen/image space, the principal point is + # provided in the (+X right, +Y down), aka image, coordinate system. + # Since input points are defined in the PyTorch3D system (+X left, +Y up), + # we need to adjust for the principal point transform. + pr_point_fix = torch.zeros( + (self._N, 4, 4), device=self.device, dtype=torch.float32 + ) + pr_point_fix[:, 0, 0] = 1.0 + pr_point_fix[:, 1, 1] = 1.0 + pr_point_fix[:, 2, 2] = 1.0 + pr_point_fix[:, 3, 3] = 1.0 + pr_point_fix[:, :2, 3] = -2.0 * self.get_principal_point(**kwargs) + pr_point_fix_transform = Transform3d( + matrix=pr_point_fix.transpose(1, 2).contiguous(), device=self.device + ) + image_size = kwargs.get("image_size", self.get_image_size()) + screen_to_ndc_transform = get_screen_to_ndc_transform( + self, with_xyflip=False, image_size=image_size + ) + ndc_transform = pr_point_fix_transform.compose(screen_to_ndc_transform) + + return ndc_transform + + def is_perspective(self): + return False + + def in_ndc(self): + return self._in_ndc + + +################################################ +# Helper functions for cameras # +################################################ + + +def _get_sfm_calibration_matrix( + N: int, + device: Device, + focal_length, + principal_point, + orthographic: bool = False, +) -> torch.Tensor: + """ + Returns a calibration matrix of a perspective/orthographic camera. + + Args: + N: Number of cameras. + focal_length: Focal length of the camera. + principal_point: xy coordinates of the center of + the principal point of the camera in pixels. + orthographic: Boolean specifying if the camera is orthographic or not + + The calibration matrix `K` is set up as follows: + + .. code-block:: python + + fx = focal_length[:,0] + fy = focal_length[:,1] + px = principal_point[:,0] + py = principal_point[:,1] + + for orthographic==True: + K = [ + [fx, 0, 0, px], + [0, fy, 0, py], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + else: + K = [ + [fx, 0, px, 0], + [0, fy, py, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + ] + + Returns: + A calibration matrix `K` of the SfM-conventioned camera + of shape (N, 4, 4). + """ + + if not torch.is_tensor(focal_length): + focal_length = torch.tensor(focal_length, device=device) + + if focal_length.ndim in (0, 1) or focal_length.shape[1] == 1: + fx = fy = focal_length + else: + fx, fy = focal_length.unbind(1) + + if not torch.is_tensor(principal_point): + principal_point = torch.tensor(principal_point, device=device) + + px, py = principal_point.unbind(1) + + K = fx.new_zeros(N, 4, 4) + K[:, 0, 0] = fx + K[:, 1, 1] = fy + if orthographic: + K[:, 0, 3] = px + K[:, 1, 3] = py + K[:, 2, 2] = 1.0 + K[:, 3, 3] = 1.0 + else: + K[:, 0, 2] = px + K[:, 1, 2] = py + K[:, 3, 2] = 1.0 + K[:, 2, 3] = 1.0 + + return K + + +################################################ +# Helper functions for world to view transforms +################################################ + + +def get_world_to_view_transform( + R: torch.Tensor = _R, T: torch.Tensor = _T +) -> Transform3d: + """ + This function returns a Transform3d representing the transformation + matrix to go from world space to view space by applying a rotation and + a translation. + + PyTorch3D uses the same convention as Hartley & Zisserman. + I.e., for camera extrinsic parameters R (rotation) and T (translation), + we map a 3D point `X_world` in world coordinates to + a point `X_cam` in camera coordinates with: + `X_cam = X_world R + T` + + Args: + R: (N, 3, 3) matrix representing the rotation. + T: (N, 3) matrix representing the translation. + + Returns: + a Transform3d object which represents the composed RT transformation. + + """ + # TODO: also support the case where RT is specified as one matrix + # of shape (N, 4, 4). + + if T.shape[0] != R.shape[0]: + msg = "Expected R, T to have the same batch dimension; got %r, %r" + raise ValueError(msg % (R.shape[0], T.shape[0])) + if T.dim() != 2 or T.shape[1:] != (3,): + msg = "Expected T to have shape (N, 3); got %r" + raise ValueError(msg % repr(T.shape)) + if R.dim() != 3 or R.shape[1:] != (3, 3): + msg = "Expected R to have shape (N, 3, 3); got %r" + raise ValueError(msg % repr(R.shape)) + + # Create a Transform3d object + T_ = Translate(T, device=T.device) + R_ = Rotate(R, device=R.device) + return R_.compose(T_) + + +def camera_position_from_spherical_angles( + distance: float, + elevation: float, + azimuth: float, + degrees: bool = True, + device: Device = "cpu", +) -> torch.Tensor: + """ + Calculate the location of the camera based on the distance away from + the target point, the elevation and azimuth angles. + + Args: + distance: distance of the camera from the object. + elevation, azimuth: angles. + The inputs distance, elevation and azimuth can be one of the following + - Python scalar + - Torch scalar + - Torch tensor of shape (N) or (1) + degrees: bool, whether the angles are specified in degrees or radians. + device: str or torch.device, device for new tensors to be placed on. + + The vectors are broadcast against each other so they all have shape (N, 1). + + Returns: + camera_position: (N, 3) xyz location of the camera. + """ + broadcasted_args = convert_to_tensors_and_broadcast( + distance, elevation, azimuth, device=device + ) + dist, elev, azim = broadcasted_args + if degrees: + elev = math.pi / 180.0 * elev + azim = math.pi / 180.0 * azim + x = dist * torch.cos(elev) * torch.sin(azim) + y = dist * torch.sin(elev) + z = dist * torch.cos(elev) * torch.cos(azim) + camera_position = torch.stack([x, y, z], dim=1) + if camera_position.dim() == 0: + camera_position = camera_position.view(1, -1) # add batch dim. + return camera_position.view(-1, 3) + + +def look_at_rotation( + camera_position, at=((0, 0, 0),), up=((0, 1, 0),), device: Device = "cpu" +) -> torch.Tensor: + """ + This function takes a vector 'camera_position' which specifies the location + of the camera in world coordinates and two vectors `at` and `up` which + indicate the position of the object and the up directions of the world + coordinate system respectively. The object is assumed to be centered at + the origin. + + The output is a rotation matrix representing the transformation + from world coordinates -> view coordinates. + + Args: + camera_position: position of the camera in world coordinates + at: position of the object in world coordinates + up: vector specifying the up direction in the world coordinate frame. + + The inputs camera_position, at and up can each be a + - 3 element tuple/list + - torch tensor of shape (1, 3) + - torch tensor of shape (N, 3) + + The vectors are broadcast against each other so they all have shape (N, 3). + + Returns: + R: (N, 3, 3) batched rotation matrices + """ + # Format input and broadcast + broadcasted_args = convert_to_tensors_and_broadcast( + camera_position, at, up, device=device + ) + camera_position, at, up = broadcasted_args + for t, n in zip([camera_position, at, up], ["camera_position", "at", "up"]): + if t.shape[-1] != 3: + msg = "Expected arg %s to have shape (N, 3); got %r" + raise ValueError(msg % (n, t.shape)) + z_axis = F.normalize(at - camera_position, eps=1e-5) + x_axis = F.normalize(torch.cross(up, z_axis, dim=1), eps=1e-5) + y_axis = F.normalize(torch.cross(z_axis, x_axis, dim=1), eps=1e-5) + is_close = torch.isclose(x_axis, torch.tensor(0.0), atol=5e-3).all( + dim=1, keepdim=True + ) + if is_close.any(): + replacement = F.normalize(torch.cross(y_axis, z_axis, dim=1), eps=1e-5) + x_axis = torch.where(is_close, replacement, x_axis) + R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1) + return R.transpose(1, 2) + + +def look_at_view_transform( + dist: _BatchFloatType = 1.0, + elev: _BatchFloatType = 0.0, + azim: _BatchFloatType = 0.0, + degrees: bool = True, + eye: Optional[Union[Sequence, torch.Tensor]] = None, + at=((0, 0, 0),), # (1, 3) + up=((0, 1, 0),), # (1, 3) + device: Device = "cpu", +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + This function returns a rotation and translation matrix + to apply the 'Look At' transformation from world -> view coordinates [0]. + + Args: + dist: distance of the camera from the object + elev: angle in degrees or radians. This is the angle between the + vector from the object to the camera, and the horizontal plane y = 0 (xz-plane). + azim: angle in degrees or radians. The vector from the object to + the camera is projected onto a horizontal plane y = 0. + azim is the angle between the projected vector and a + reference vector at (0, 0, 1) on the reference plane (the horizontal plane). + dist, elev and azim can be of shape (1), (N). + degrees: boolean flag to indicate if the elevation and azimuth + angles are specified in degrees or radians. + eye: the position of the camera(s) in world coordinates. If eye is not + None, it will override the camera position derived from dist, elev, azim. + up: the direction of the x axis in the world coordinate system. + at: the position of the object(s) in world coordinates. + eye, up and at can be of shape (1, 3) or (N, 3). + + Returns: + 2-element tuple containing + + - **R**: the rotation to apply to the points to align with the camera. + - **T**: the translation to apply to the points to align with the camera. + + References: + [0] https://www.scratchapixel.com + """ + + if eye is not None: + broadcasted_args = convert_to_tensors_and_broadcast(eye, at, up, device=device) + eye, at, up = broadcasted_args + C = eye + else: + broadcasted_args = convert_to_tensors_and_broadcast( + dist, elev, azim, at, up, device=device + ) + dist, elev, azim, at, up = broadcasted_args + C = ( + camera_position_from_spherical_angles( + dist, elev, azim, degrees=degrees, device=device + ) + + at + ) + + R = look_at_rotation(C, at, up, device=device) + T = -torch.bmm(R.transpose(1, 2), C[:, :, None])[:, :, 0] + return R, T + + +def get_ndc_to_screen_transform( + cameras, + with_xyflip: bool = False, + image_size: Optional[Union[List, Tuple, torch.Tensor]] = None, +) -> Transform3d: + """ + PyTorch3D NDC to screen conversion. + Conversion from PyTorch3D's NDC space (+X left, +Y up) to screen/image space + (+X right, +Y down, origin top left). + + Args: + cameras + with_xyflip: flips x- and y-axis if set to True. + Optional kwargs: + image_size: ((height, width),) specifying the height, width + of the image. If not provided, it reads it from cameras. + + We represent the NDC to screen conversion as a Transform3d + with projection matrix + + K = [ + [s, 0, 0, cx], + [0, s, 0, cy], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + + """ + # We require the image size, which is necessary for the transform + if image_size is None: + msg = "For NDC to screen conversion, image_size=(height, width) needs to be specified." + raise ValueError(msg) + + K = torch.zeros((cameras._N, 4, 4), device=cameras.device, dtype=torch.float32) + if not torch.is_tensor(image_size): + image_size = torch.tensor(image_size, device=cameras.device) + image_size = image_size.view(-1, 2) # of shape (1 or B)x2 + height, width = image_size.unbind(1) + + # For non square images, we scale the points such that smallest side + # has range [-1, 1] and the largest side has range [-u, u], with u > 1. + # This convention is consistent with the PyTorch3D renderer + scale = (image_size.min(dim=1).values - 0.0) / 2.0 + + K[:, 0, 0] = scale + K[:, 1, 1] = scale + K[:, 0, 3] = -1.0 * (width - 0.0) / 2.0 + K[:, 1, 3] = -1.0 * (height - 0.0) / 2.0 + K[:, 2, 2] = 1.0 + K[:, 3, 3] = 1.0 + + # Transpose the projection matrix as PyTorch3D transforms use row vectors. + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=cameras.device + ) + + if with_xyflip: + # flip x, y axis + xyflip = torch.eye(4, device=cameras.device, dtype=torch.float32) + xyflip[0, 0] = -1.0 + xyflip[1, 1] = -1.0 + xyflip = xyflip.view(1, 4, 4).expand(cameras._N, -1, -1) + xyflip_transform = Transform3d( + matrix=xyflip.transpose(1, 2).contiguous(), device=cameras.device + ) + transform = transform.compose(xyflip_transform) + return transform + + +def get_screen_to_ndc_transform( + cameras, + with_xyflip: bool = False, + image_size: Optional[Union[List, Tuple, torch.Tensor]] = None, +) -> Transform3d: + """ + Screen to PyTorch3D NDC conversion. + Conversion from screen/image space (+X right, +Y down, origin top left) + to PyTorch3D's NDC space (+X left, +Y up). + + Args: + cameras + with_xyflip: flips x- and y-axis if set to True. + Optional kwargs: + image_size: ((height, width),) specifying the height, width + of the image. If not provided, it reads it from cameras. + + We represent the screen to NDC conversion as a Transform3d + with projection matrix + + K = [ + [1/s, 0, 0, cx/s], + [ 0, 1/s, 0, cy/s], + [ 0, 0, 1, 0], + [ 0, 0, 0, 1], + ] + + """ + transform = get_ndc_to_screen_transform( + cameras, + with_xyflip=with_xyflip, + image_size=image_size, + ).inverse() + return transform + + +def try_get_projection_transform( + cameras: CamerasBase, cameras_kwargs: Dict[str, Any] +) -> Optional[Transform3d]: + """ + Try block to get projection transform from cameras and cameras_kwargs. + + Args: + cameras: cameras instance, can be linear cameras or nonliear cameras + cameras_kwargs: camera parameters to be passed to cameras + + Returns: + If the camera implemented projection_transform, return the + projection transform; Otherwise, return None + """ + + transform = None + try: + transform = cameras.get_projection_transform(**cameras_kwargs) + except NotImplementedError: + pass + return transform diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/compositing.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/compositing.py new file mode 100644 index 0000000000000000000000000000000000000000..507f3f90755309c8f98eadee6231c7e16223f37f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/compositing.py @@ -0,0 +1,244 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch +from pytorch3d import _C + + +# Example functions for blending the top K features per pixel using the outputs +# from rasterization. +# NOTE: All blending function should return a (N, H, W, C) tensor per batch element. +# This can be an image (C=3) or a set of features. + + +class _CompositeAlphaPoints(torch.autograd.Function): + """ + Composite features within a z-buffer using alpha compositing. Given a z-buffer + with corresponding features and weights, these values are accumulated according + to their weights such that features nearer in depth contribute more to the final + feature than ones further away. + + Concretely this means: + weighted_fs[b,c,i,j] = sum_k cum_alpha_k * features[c,pointsidx[b,k,i,j]] + cum_alpha_k = alphas[b,k,i,j] * prod_l=0..k-1 (1 - alphas[b,l,i,j]) + + Args: + features: Packed Tensor of shape (C, P) giving the features of each point. + alphas: float32 Tensor of shape (N, points_per_pixel, image_size, + image_size) giving the weight of each point in the z-buffer. + Values should be in the interval [0, 1]. + pointsidx: int32 Tensor of shape (N, points_per_pixel, image_size, image_size) + giving the indices of the nearest points at each pixel, sorted in z-order. + Concretely pointsidx[n, k, y, x] = p means that features[:, p] is the + feature of the kth closest point (along the z-direction) to pixel (y, x) in + batch element n. This is weighted by alphas[n, k, y, x]. + + Returns: + weighted_fs: Tensor of shape (N, C, image_size, image_size) + giving the accumulated features at each point. + """ + + @staticmethod + def forward(ctx, features, alphas, points_idx): + pt_cld = _C.accum_alphacomposite(features, alphas, points_idx) + + ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone()) + return pt_cld + + @staticmethod + def backward(ctx, grad_output): + grad_features = None + grad_alphas = None + grad_points_idx = None + features, alphas, points_idx = ctx.saved_tensors + + grad_features, grad_alphas = _C.accum_alphacomposite_backward( + grad_output, features, alphas, points_idx + ) + + return grad_features, grad_alphas, grad_points_idx, None + + +def alpha_composite(pointsidx, alphas, pt_clds) -> torch.Tensor: + """ + Composite features within a z-buffer using alpha compositing. Given a z-buffer + with corresponding features and weights, these values are accumulated according + to their weights such that features nearer in depth contribute more to the final + feature than ones further away. + + Concretely this means: + weighted_fs[b,c,i,j] = sum_k cum_alpha_k * features[c,pointsidx[b,k,i,j]] + cum_alpha_k = alphas[b,k,i,j] * prod_l=0..k-1 (1 - alphas[b,l,i,j]) + + + Args: + pt_clds: Tensor of shape (N, C, P) giving the features of each point (can use + RGB for example). + alphas: float32 Tensor of shape (N, points_per_pixel, image_size, + image_size) giving the weight of each point in the z-buffer. + Values should be in the interval [0, 1]. + pointsidx: int32 Tensor of shape (N, points_per_pixel, image_size, image_size) + giving the indices of the nearest points at each pixel, sorted in z-order. + Concretely pointsidx[n, k, y, x] = p means that features[n, :, p] is the + feature of the kth closest point (along the z-direction) to pixel (y, x) in + batch element n. This is weighted by alphas[n, k, y, x]. + + Returns: + Combined features: Tensor of shape (N, C, image_size, image_size) + giving the accumulated features at each point. + """ + return _CompositeAlphaPoints.apply(pt_clds, alphas, pointsidx) + + +class _CompositeNormWeightedSumPoints(torch.autograd.Function): + """ + Composite features within a z-buffer using normalized weighted sum. Given a z-buffer + with corresponding features and weights, these values are accumulated + according to their weights such that depth is ignored; the weights are used to + perform a weighted sum. + + Concretely this means: + weighted_fs[b,c,i,j] = + sum_k alphas[b,k,i,j] * features[c,pointsidx[b,k,i,j]] / sum_k alphas[b,k,i,j] + + Args: + features: Packed Tensor of shape (C, P) giving the features of each point. + alphas: float32 Tensor of shape (N, points_per_pixel, image_size, + image_size) giving the weight of each point in the z-buffer. + Values should be in the interval [0, 1]. + pointsidx: int32 Tensor of shape (N, points_per_pixel, image_size, image_size) + giving the indices of the nearest points at each pixel, sorted in z-order. + Concretely pointsidx[n, k, y, x] = p means that features[:, p] is the + feature of the kth closest point (along the z-direction) to pixel (y, x) in + batch element n. This is weighted by alphas[n, k, y, x]. + + Returns: + weighted_fs: Tensor of shape (N, C, image_size, image_size) + giving the accumulated features at each point. + """ + + @staticmethod + def forward(ctx, features, alphas, points_idx): + pt_cld = _C.accum_weightedsumnorm(features, alphas, points_idx) + + ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone()) + return pt_cld + + @staticmethod + def backward(ctx, grad_output): + grad_features = None + grad_alphas = None + grad_points_idx = None + features, alphas, points_idx = ctx.saved_tensors + + grad_features, grad_alphas = _C.accum_weightedsumnorm_backward( + grad_output, features, alphas, points_idx + ) + + return grad_features, grad_alphas, grad_points_idx, None + + +def norm_weighted_sum(pointsidx, alphas, pt_clds) -> torch.Tensor: + """ + Composite features within a z-buffer using normalized weighted sum. Given a z-buffer + with corresponding features and weights, these values are accumulated + according to their weights such that depth is ignored; the weights are used to + perform a weighted sum. + + Concretely this means: + weighted_fs[b,c,i,j] = + sum_k alphas[b,k,i,j] * features[c,pointsidx[b,k,i,j]] / sum_k alphas[b,k,i,j] + + Args: + pt_clds: Packed feature tensor of shape (C, P) giving the features of each point + (can use RGB for example). + alphas: float32 Tensor of shape (N, points_per_pixel, image_size, + image_size) giving the weight of each point in the z-buffer. + Values should be in the interval [0, 1]. + pointsidx: int32 Tensor of shape (N, points_per_pixel, image_size, image_size) + giving the indices of the nearest points at each pixel, sorted in z-order. + Concretely pointsidx[n, k, y, x] = p means that features[:, p] is the + feature of the kth closest point (along the z-direction) to pixel (y, x) in + batch element n. This is weighted by alphas[n, k, y, x]. + + Returns: + Combined features: Tensor of shape (N, C, image_size, image_size) + giving the accumulated features at each point. + """ + return _CompositeNormWeightedSumPoints.apply(pt_clds, alphas, pointsidx) + + +class _CompositeWeightedSumPoints(torch.autograd.Function): + """ + Composite features within a z-buffer using normalized weighted sum. Given a z-buffer + with corresponding features and weights, these values are accumulated + according to their weights such that depth is ignored; the weights are used to + perform a weighted sum. As opposed to norm weighted sum, the weights are not + normalized to sum to 1. + + Concretely this means: + weighted_fs[b,c,i,j] = sum_k alphas[b,k,i,j] * features[c,pointsidx[b,k,i,j]] + + Args: + features: Packed Tensor of shape (C, P) giving the features of each point. + alphas: float32 Tensor of shape (N, points_per_pixel, image_size, + image_size) giving the weight of each point in the z-buffer. + Values should be in the interval [0, 1]. + pointsidx: int32 Tensor of shape (N, points_per_pixel, image_size, image_size) + giving the indices of the nearest points at each pixel, sorted in z-order. + Concretely pointsidx[n, k, y, x] = p means that features[:, p] is the + feature of the kth closest point (along the z-direction) to pixel (y, x) in + batch element n. This is weighted by alphas[n, k, y, x]. + + Returns: + weighted_fs: Tensor of shape (N, C, image_size, image_size) + giving the accumulated features at each point. + """ + + @staticmethod + def forward(ctx, features, alphas, points_idx): + pt_cld = _C.accum_weightedsum(features, alphas, points_idx) + + ctx.save_for_backward(features.clone(), alphas.clone(), points_idx.clone()) + return pt_cld + + @staticmethod + def backward(ctx, grad_output): + grad_features = None + grad_alphas = None + grad_points_idx = None + features, alphas, points_idx = ctx.saved_tensors + + grad_features, grad_alphas = _C.accum_weightedsum_backward( + grad_output, features, alphas, points_idx + ) + + return grad_features, grad_alphas, grad_points_idx, None + + +def weighted_sum(pointsidx, alphas, pt_clds) -> torch.Tensor: + """ + Composite features within a z-buffer using normalized weighted sum. + + Args: + pt_clds: Packed Tensor of shape (C, P) giving the features of each point + (can use RGB for example). + alphas: float32 Tensor of shape (N, points_per_pixel, image_size, + image_size) giving the weight of each point in the z-buffer. + Values should be in the interval [0, 1]. + pointsidx: int32 Tensor of shape (N, points_per_pixel, image_size, image_size) + giving the indices of the nearest points at each pixel, sorted in z-order. + Concretely pointsidx[n, k, y, x] = p means that features[:, p] is the + feature of the kth closest point (along the z-direction) to pixel (y, x) in + batch element n. This is weighted by alphas[n, k, y, x]. + + Returns: + Combined features: Tensor of shape (N, C, image_size, image_size) + giving the accumulated features at each point. + """ + return _CompositeWeightedSumPoints.apply(pt_clds, alphas, pointsidx) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/fisheyecameras.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/fisheyecameras.py new file mode 100644 index 0000000000000000000000000000000000000000..5b464a0be54317c259d658b695f4adacf4fdad21 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/fisheyecameras.py @@ -0,0 +1,585 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import math +from typing import List, Optional, Tuple, Union + +import torch +from pytorch3d.common.datatypes import Device +from pytorch3d.renderer.cameras import _R, _T, CamerasBase + +_focal_length = torch.tensor(((1.0,),)) +_principal_point = torch.tensor(((0.0, 0.0),)) +_radial_params = torch.tensor(((0.0, 0.0, 0.0, 0.0, 0.0, 0.0),)) +_tangential_params = torch.tensor(((0.0, 0.0),)) +_thin_prism_params = torch.tensor(((0.0, 0.0, 0.0, 0.0),)) + + +class FishEyeCameras(CamerasBase): + """ + A class which extends Pinhole camera by considering radial, tangential and + thin-prism distortion. For the fisheye camera model, k1, k2, ..., k_n_radial are + polynomial coefficents to model radial distortions. Two common types of radial + distortions are barrel and pincusion radial distortions. + + a = x / z, b = y / z, r = (a*a+b*b)^(1/2) + th = atan(r) + [x_r] = (th+ k0 * th^3 + k1* th^5 + ...) [a/r] + [y_r] [b/r] [1] + + + The tangential distortion parameters are p1 and p2. The primary cause is + due to the lens assembly not being centered over and parallel to the image plane. + tangentialDistortion = [(2 x_r^2 + rd^2)*p_0 + 2*x_r*y_r*p_1] + [(2 y_r^2 + rd^2)*p_1 + 2*x_r*y_r*p_0] [2] + where rd^2 = x_r^2 + y_r^2 + + The thin-prism distortion is modeled with s1, s2, s3, s4 coefficients + thinPrismDistortion = [s0 * rd^2 + s1 rd^4] + [s2 * rd^2 + s3 rd^4] [3] + + The projection + proj = diag(f, f) * uvDistorted + [cu; cv] + uvDistorted = [x_r] + tangentialDistortion + thinPrismDistortion [4] + [y_r] + f is the focal length and cu, cv are principal points in x, y axis. + + """ + + _FIELDS = ( + "focal_length", + "principal_point", + "R", + "T", + "radial_params", + "tangential_params", + "thin_prism_params", + "world_coordinates", + "use_radial", + "use_tangential", + "use_tin_prism", + "device", + "image_size", + ) + + def __init__( + self, + focal_length=_focal_length, + principal_point=_principal_point, + radial_params=_radial_params, + tangential_params=_tangential_params, + thin_prism_params=_thin_prism_params, + R: torch.Tensor = _R, + T: torch.Tensor = _T, + world_coordinates: bool = False, + use_radial: bool = True, + use_tangential: bool = True, + use_thin_prism: bool = True, + device: Device = "cpu", + image_size: Optional[Union[List, Tuple, torch.Tensor]] = None, + ) -> None: + """ + + Args: + focal_ength: Focal length of the camera in world units. + A tensor of shape (N, 1) for square pixels, + where N is number of transforms. + principal_point: xy coordinates of the center of + the principal point of the camera in pixels. + A tensor of shape (N, 2). + radial_params: parameters for radial distortions. + A tensor of shape (N, num_radial). + tangential_params:parameters for tangential distortions. + A tensor of shape (N, 2). + thin_prism_params: parameters for thin-prism distortions. + A tensor of shape (N, 4). + R: Rotation matrix of shape (N, 3, 3) + T: Translation matrix of shape (N, 3) + world_coordinates: if True, project from world coordinates; otherwise from camera + coordinates + use_radial: radial_distortion, default to True + use_tangential: tangential distortion, default to True + use_thin_prism: thin prism distortion, default to True + device: torch.device or string + image_size: (height, width) of image size. + A tensor of shape (N, 2) or a list/tuple. Required for screen cameras. + + """ + + kwargs = {"image_size": image_size} if image_size is not None else {} + super().__init__( + device=device, + R=R, + T=T, + **kwargs, # pyre-ignore + ) + if image_size is not None: + if (self.image_size < 1).any(): # pyre-ignore + raise ValueError("Image_size provided has invalid values") + else: + self.image_size = None + + self.device = device + self.focal = focal_length.to(self.device) + self.principal_point = principal_point.to(self.device) + self.radial_params = radial_params.to(self.device) + self.tangential_params = tangential_params.to(self.device) + self.thin_prism_params = thin_prism_params.to(self.device) + self.R = R + self.T = T + self.world_coordinates = world_coordinates + self.use_radial = use_radial + self.use_tangential = use_tangential + self.use_thin_prism = use_thin_prism + self.epsilon = 1e-10 + self.num_distortion_iters = 50 + + self.R = self.R.to(self.device) + self.T = self.T.to(self.device) + self.num_radial = radial_params.shape[-1] + + def _project_points_batch( + self, + focal, + principal_point, + radial_params, + tangential_params, + thin_prism_params, + points, + ) -> torch.Tensor: + """ + Takes in points in the local reference frame of the camera and projects it + onto the image plan. Since this is a symmetric model, points with negative z are + projected to the positive sphere. i.e project(1,1,-1) == project(-1,-1,1) + + Args: + focal: (1) + principal_point: (2) + radial_params: (num_radial) + tangential_params: (2) + thin_prism_params: (4) + points in the camera coordinate frame: (..., 3). E.g., (P, 3) (1, P, 3) + or (M, P, 3) where P is the number of points + + Returns: + projected_points in the image plane: (..., 3). E.g., (P, 3) or + (1, P, 3) or (M, P, 3) + + """ + assert points.shape[-1] == 3, "points shape incorrect" + ab = points[..., :2] / points[..., 2:] + uv_distorted = ab + + r = ab.norm(dim=-1) + th = r.atan() + theta_sq = th * th + + # compute radial distortions, eq 1 + t = theta_sq + theta_pow = torch.stack([t, t**2, t**3, t**4, t**5, t**6], dim=-1) + th_radial = 1 + torch.sum(theta_pow * radial_params, dim=-1) + + # compute th/r, using the limit for small values + th_divr = th / r + boolean_mask = abs(r) < self.epsilon + th_divr[boolean_mask] = 1.0 + + # the distorted coordinates -- except for focal length and principal point + # start with the radial term + coeff = th_radial * th_divr + xr_yr = coeff[..., None] * ab + xr_yr_squared_norm = torch.pow(xr_yr, 2).sum(dim=-1, keepdim=True) + + if self.use_radial: + uv_distorted = xr_yr + + # compute tangential distortions, eq 2 + if self.use_tangential: + temp = 2 * torch.sum( + xr_yr * tangential_params, + dim=-1, + ) + uv_distorted = uv_distorted + ( + temp[..., None] * xr_yr + xr_yr_squared_norm * tangential_params + ) + + # compute thin-prism distortions, eq 3 + sh = uv_distorted.shape[:-1] + if self.use_thin_prism: + radial_powers = torch.cat( + [xr_yr_squared_norm, xr_yr_squared_norm * xr_yr_squared_norm], dim=-1 + ) + uv_distorted[..., 0] = uv_distorted[..., 0] + torch.sum( + thin_prism_params[..., 0:2] * radial_powers, + dim=-1, + ) + uv_distorted[..., 1] = uv_distorted[..., 1] + torch.sum( + thin_prism_params[..., 2:4] * radial_powers, + dim=-1, + ) + # return value: distorted points on the uv plane, eq 4 + projected_points = focal * uv_distorted + principal_point + return torch.cat( + [projected_points, torch.ones(list(sh) + [1], device=self.device)], dim=-1 + ) + + def check_input(self, points: torch.Tensor, batch_size: int): + """ + Check if the shapes are broadcastable between points and transforms. + Accept points of shape (P, 3) or (1, P, 3) or (M, P, 3). The batch_size + for transforms should be 1 when points take (M, P, 3). The batch_size + can be 1 or N when points take shape (P, 3). + + Args: + points: tensor of shape (P, 3) or (1, P, 3) or (M, P, 3) + batch_size: number of transforms + + Returns: + Boolean value if the input shapes are compatible. + """ + if points.ndim > 3: + return False + if points.ndim == 3: + M, P, K = points.shape + if K != 3: + return False + if M > 1 and batch_size > 1: + return False + return True + + def transform_points( + self, points, eps: Optional[float] = None, **kwargs + ) -> torch.Tensor: + """ + Transform input points from camera space to image space. + Args: + points: tensor of (..., 3). E.g., (P, 3) or (1, P, 3), (M, P, 3) + eps: tiny number to avoid zero divsion + + Returns: + torch.Tensor + when points take shape (P, 3) or (1, P, 3), output is (N, P, 3) + when points take shape (M, P, 3), output is (M, P, 3) + where N is the number of transforms, P number of points + """ + # project from world space to camera space + if self.world_coordinates: + world_to_view_transform = self.get_world_to_view_transform( + R=self.R, T=self.T + ) + points = world_to_view_transform.transform_points( + points.to(self.device), eps=eps + ) + else: + points = points.to(self.device) + + # project from camera space to image space + N = len(self.radial_params) + if not self.check_input(points, N): + msg = "Expected points of (P, 3) with batch_size 1 or N, or shape (M, P, 3) \ + with batch_size 1; got points of shape %r and batch_size %r" + raise ValueError(msg % (points.shape, N)) + + if N == 1: + return self._project_points_batch( + self.focal[0], + self.principal_point[0], + self.radial_params[0], + self.tangential_params[0], + self.thin_prism_params[0], + points, + ) + else: + outputs = [] + for i in range(N): + outputs.append( + self._project_points_batch( + self.focal[i], + self.principal_point[i], + self.radial_params[i], + self.tangential_params[i], + self.thin_prism_params[i], + points, + ) + ) + outputs = torch.stack(outputs, dim=0) + return outputs.squeeze() + + def _unproject_points_batch( + self, + focal, + principal_point, + radial_params, + tangential_params, + thin_prism_params, + xy: torch.Tensor, + ) -> torch.Tensor: + """ + Args: + focal: (1) + principal_point: (2) + radial_params: (num_radial) + tangential_params: (2) + thin_prism_params: (4) + xy: (..., 2) + + Returns: + point3d_est: (..., 3) + """ + sh = list(xy.shape[:-1]) + assert xy.shape[-1] == 2, "xy_depth shape incorrect" + uv_distorted = (xy - principal_point) / focal + + # get xr_yr from uvDistorted + xr_yr = self._compute_xr_yr_from_uv_distorted( + tangential_params, thin_prism_params, uv_distorted + ) + xr_yrNorm = torch.norm(xr_yr, dim=-1) + + # find theta + theta = self._get_theta_from_norm_xr_yr(radial_params, xr_yrNorm) + # get the point coordinates: + point3d_est = theta.new_ones(*sh, 3) + point3d_est[..., :2] = theta.tan()[..., None] / xr_yrNorm[..., None] * xr_yr + return point3d_est + + def unproject_points( + self, + xy_depth: torch.Tensor, + world_coordinates: bool = True, + scaled_depth_input: bool = False, + **kwargs, + ) -> torch.Tensor: + """ + Takes in 3-point ``uv_depth`` in the image plane of the camera and unprojects it + into the reference frame of the camera. + This function is the inverse of ``transform_points``. In particular it holds that + + X = unproject(project(X)) + and + x = project(unproject(s*x)) + + Args: + xy_depth: points in the image plane of shape (..., 3). E.g., + (P, 3) or (1, P, 3) or (M, P, 3) + world_coordinates: if the output is in world_coordinate, if False, convert to + camera coordinate + scaled_depth_input: False + + Returns: + unprojected_points in the camera frame with z = 1 + when points take shape (P, 3) or (1, P, 3), output is (N, P, 3) + when points take shape (M, P, 3), output is (M, P, 3) + where N is the number of transforms, P number of point + """ + xy_depth = xy_depth.to(self.device) + N = len(self.radial_params) + if N == 1: + return self._unproject_points_batch( + self.focal[0], + self.principal_point[0], + self.radial_params[0], + self.tangential_params[0], + self.thin_prism_params[0], + xy_depth[..., 0:2], + ) + else: + outputs = [] + for i in range(N): + outputs.append( + self._unproject_points_batch( + self.focal[i], + self.principal_point[i], + self.radial_params[i], + self.tangential_params[i], + self.thin_prism_params[i], + xy_depth[..., 0:2], + ) + ) + outputs = torch.stack(outputs, dim=0) + return outputs.squeeze() + + def _compute_xr_yr_from_uv_distorted( + self, tangential_params, thin_prism_params, uv_distorted: torch.Tensor + ) -> torch.Tensor: + """ + Helper function to compute the vector [x_r; y_r] from uvDistorted + + Args: + tangential_params: (2) + thin_prism_params: (4) + uv_distorted: (..., 2), E.g., (P, 2), (1, P, 2), (M, P, 2) + + Returns: + xr_yr: (..., 2) + """ + # early exit if we're not using any tangential/ thin prism distortions + if not self.use_tangential and not self.use_thin_prism: + return uv_distorted + + xr_yr = uv_distorted + # do Newton iterations to find xr_yr + for _ in range(self.num_distortion_iters): + # compute the estimated uvDistorted + uv_distorted_est = xr_yr.clone() + xr_yr_squared_norm = torch.pow(xr_yr, 2).sum(dim=-1, keepdim=True) + + if self.use_tangential: + temp = 2.0 * torch.sum( + xr_yr * tangential_params[..., 0:2], + dim=-1, + keepdim=True, + ) + uv_distorted_est = uv_distorted_est + ( + temp * xr_yr + xr_yr_squared_norm * tangential_params[..., 0:2] + ) + + if self.use_thin_prism: + radial_powers = torch.cat( + [xr_yr_squared_norm, xr_yr_squared_norm * xr_yr_squared_norm], + dim=-1, + ) + uv_distorted_est[..., 0] = uv_distorted_est[..., 0] + torch.sum( + thin_prism_params[..., 0:2] * radial_powers, + dim=-1, + ) + uv_distorted_est[..., 1] = uv_distorted_est[..., 1] + torch.sum( + thin_prism_params[..., 2:4] * radial_powers, + dim=-1, + ) + + # compute the derivative of uvDistorted wrt xr_yr + duv_distorted_dxryr = self._compute_duv_distorted_dxryr( + tangential_params, thin_prism_params, xr_yr, xr_yr_squared_norm[..., 0] + ) + # compute correction: + # note: the matrix duvDistorted_dxryr will be close to identity (for reasonable + # values of tangential/thin prism distortions) + correction = torch.linalg.solve( + duv_distorted_dxryr, (uv_distorted - uv_distorted_est)[..., None] + ) + xr_yr = xr_yr + correction[..., 0] + return xr_yr + + def _get_theta_from_norm_xr_yr( + self, radial_params, th_radial_desired + ) -> torch.Tensor: + """ + Helper function to compute the angle theta from the norm of the vector [x_r; y_r] + + Args: + radial_params: k1, k2, ..., k_num_radial, (num_radial) + th_radial_desired: desired angle of shape (...), E.g., (P), (1, P), (M, P) + + Returns: + th: angle theta (in radians) of shape (...), E.g., (P), (1, P), (M, P) + """ + sh = list(th_radial_desired.shape) + th = th_radial_desired + c = torch.tensor( + [2.0 * i + 3 for i in range(self.num_radial)], device=self.device + ) + for _ in range(self.num_distortion_iters): + theta_sq = th * th + th_radial = 1.0 + dthD_dth = 1.0 + + # compute the theta polynomial and its derivative wrt theta + t = theta_sq + theta_pow = torch.stack([t, t**2, t**3, t**4, t**5, t**6], dim=-1) + th_radial = th_radial + torch.sum(theta_pow * radial_params, dim=-1) + + dthD_dth = dthD_dth + torch.sum(c * radial_params * theta_pow, dim=-1) + th_radial = th_radial * th + + # compute the correction + step = torch.zeros(*sh, device=self.device) + # make sure don't divide by zero + nonzero_mask = dthD_dth.abs() > self.epsilon + step = step + nonzero_mask * (th_radial_desired - th_radial) / dthD_dth + # if derivative is close to zero, apply small correction in the appropriate + # direction to avoid numerical explosions + close_to_zero_mask = dthD_dth.abs() <= self.epsilon + dir_mask = (th_radial_desired - th_radial) * dthD_dth > 0.0 + boolean_mask = close_to_zero_mask & dir_mask + step = step + 10.0 * self.epsilon * boolean_mask + step = step - 10 * self.epsilon * (~nonzero_mask & ~boolean_mask) + + # apply correction + th = th + step + # revert to within 180 degrees FOV to avoid numerical overflow + idw = th.abs() >= math.pi / 2.0 + th[idw] = 0.999 * math.pi / 2.0 + return th + + def _compute_duv_distorted_dxryr( + self, tangential_params, thin_prism_params, xr_yr, xr_yr_squareNorm + ) -> torch.Tensor: + """ + Helper function, computes the Jacobian of uvDistorted wrt the vector [x_r;y_r] + + Args: + tangential_params: (2) + thin_prism_params: (4) + xr_yr: (P, 2) + xr_yr_squareNorm: (...), E.g., (P), (1, P), (M, P) + + Returns: + duv_distorted_dxryr: (..., 2, 2) Jacobian + """ + sh = list(xr_yr.shape[:-1]) + duv_distorted_dxryr = torch.empty((*sh, 2, 2), device=self.device) + if self.use_tangential: + duv_distorted_dxryr[..., 0, 0] = ( + 1.0 + + 6.0 * xr_yr[..., 0] * tangential_params[..., 0] + + 2.0 * xr_yr[..., 1] * tangential_params[..., 1] + ) + offdiag = 2.0 * ( + xr_yr[..., 0] * tangential_params[..., 1] + + xr_yr[..., 1] * tangential_params[..., 0] + ) + duv_distorted_dxryr[..., 0, 1] = offdiag + duv_distorted_dxryr[..., 1, 0] = offdiag + duv_distorted_dxryr[..., 1, 1] = ( + 1.0 + + 6.0 * xr_yr[..., 1] * tangential_params[..., 1] + + 2.0 * xr_yr[..., 0] * tangential_params[..., 0] + ) + else: + duv_distorted_dxryr = torch.eye(2).repeat(*sh, 1, 1) + + if self.use_thin_prism: + temp1 = 2.0 * ( + thin_prism_params[..., 0] + + 2.0 * thin_prism_params[..., 1] * xr_yr_squareNorm[...] + ) + duv_distorted_dxryr[..., 0, 0] = ( + duv_distorted_dxryr[..., 0, 0] + xr_yr[..., 0] * temp1 + ) + duv_distorted_dxryr[..., 0, 1] = ( + duv_distorted_dxryr[..., 0, 1] + xr_yr[..., 1] * temp1 + ) + + temp2 = 2.0 * ( + thin_prism_params[..., 2] + + 2.0 * thin_prism_params[..., 3] * xr_yr_squareNorm[...] + ) + duv_distorted_dxryr[..., 1, 0] = ( + duv_distorted_dxryr[..., 1, 0] + xr_yr[..., 0] * temp2 + ) + duv_distorted_dxryr[..., 1, 1] = ( + duv_distorted_dxryr[..., 1, 1] + xr_yr[..., 1] * temp2 + ) + return duv_distorted_dxryr + + def in_ndc(self): + return True + + def is_perspective(self): + return False diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d13affae2db0c661ab1073e50945dd95db5eeba8 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/__init__.py @@ -0,0 +1,27 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .harmonic_embedding import HarmonicEmbedding +from .raymarching import AbsorptionOnlyRaymarcher, EmissionAbsorptionRaymarcher +from .raysampling import ( + GridRaysampler, + MonteCarloRaysampler, + MultinomialRaysampler, + NDCGridRaysampler, + NDCMultinomialRaysampler, +) +from .renderer import ImplicitRenderer, VolumeRenderer, VolumeSampler +from .utils import ( + HeterogeneousRayBundle, + ray_bundle_to_ray_points, + ray_bundle_variables_to_ray_points, + RayBundle, +) + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/harmonic_embedding.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/harmonic_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..c3b1e94440c4f6a36643309b42a265ad7a42bcb7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/harmonic_embedding.py @@ -0,0 +1,184 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Optional + +import torch + + +class HarmonicEmbedding(torch.nn.Module): + def __init__( + self, + n_harmonic_functions: int = 6, + omega_0: float = 1.0, + logspace: bool = True, + append_input: bool = True, + ) -> None: + """ + The harmonic embedding layer supports the classical + Nerf positional encoding described in + `NeRF `_ + and the integrated position encoding in + `MIP-NeRF `_. + + During the inference you can provide the extra argument `diag_cov`. + + If `diag_cov is None`, it converts + rays parametrized with a `ray_bundle` to 3D points by + extending each ray according to the corresponding length. + Then it converts each feature + (i.e. vector along the last dimension) in `x` + into a series of harmonic features `embedding`, + where for each i in range(dim) the following are present + in embedding[...]:: + + [ + sin(f_1*x[..., i]), + sin(f_2*x[..., i]), + ... + sin(f_N * x[..., i]), + cos(f_1*x[..., i]), + cos(f_2*x[..., i]), + ... + cos(f_N * x[..., i]), + x[..., i], # only present if append_input is True. + ] + + where N corresponds to `n_harmonic_functions-1`, and f_i is a scalar + denoting the i-th frequency of the harmonic embedding. + + + If `diag_cov is not None`, it approximates + conical frustums following a ray bundle as gaussians, + defined by x, the means of the gaussians and diag_cov, + the diagonal covariances. + Then it converts each gaussian + into a series of harmonic features `embedding`, + where for each i in range(dim) the following are present + in embedding[...]:: + + [ + sin(f_1*x[..., i]) * exp(0.5 * f_1**2 * diag_cov[..., i,]), + sin(f_2*x[..., i]) * exp(0.5 * f_2**2 * diag_cov[..., i,]), + ... + sin(f_N * x[..., i]) * exp(0.5 * f_N**2 * diag_cov[..., i,]), + cos(f_1*x[..., i]) * exp(0.5 * f_1**2 * diag_cov[..., i,]), + cos(f_2*x[..., i]) * exp(0.5 * f_2**2 * diag_cov[..., i,]),, + ... + cos(f_N * x[..., i]) * exp(0.5 * f_N**2 * diag_cov[..., i,]), + x[..., i], # only present if append_input is True. + ] + + where N equals `n_harmonic_functions-1`, and f_i is a scalar + denoting the i-th frequency of the harmonic embedding. + + If `logspace==True`, the frequencies `[f_1, ..., f_N]` are + powers of 2: + `f_1, ..., f_N = 2**torch.arange(n_harmonic_functions)` + + If `logspace==False`, frequencies are linearly spaced between + `1.0` and `2**(n_harmonic_functions-1)`: + `f_1, ..., f_N = torch.linspace( + 1.0, 2**(n_harmonic_functions-1), n_harmonic_functions + )` + + Note that `x` is also premultiplied by the base frequency `omega_0` + before evaluating the harmonic functions. + + Args: + n_harmonic_functions: int, number of harmonic + features + omega_0: float, base frequency + logspace: bool, Whether to space the frequencies in + logspace or linear space + append_input: bool, whether to concat the original + input to the harmonic embedding. If true the + output is of the form (embed.sin(), embed.cos(), x) + """ + super().__init__() + + if logspace: + frequencies = 2.0 ** torch.arange( + n_harmonic_functions, + dtype=torch.float32, + ) + else: + frequencies = torch.linspace( + 1.0, + 2.0 ** (n_harmonic_functions - 1), + n_harmonic_functions, + dtype=torch.float32, + ) + + self.register_buffer("_frequencies", frequencies * omega_0, persistent=False) + self.register_buffer( + "_zero_half_pi", torch.tensor([0.0, 0.5 * torch.pi]), persistent=False + ) + self.append_input = append_input + + def forward( + self, x: torch.Tensor, diag_cov: Optional[torch.Tensor] = None, **kwargs + ) -> torch.Tensor: + """ + Args: + x: tensor of shape [..., dim] + diag_cov: An optional tensor of shape `(..., dim)` + representing the diagonal covariance matrices of our Gaussians, joined with x + as means of the Gaussians. + + Returns: + embedding: a harmonic embedding of `x` of shape + [..., (n_harmonic_functions * 2 + int(append_input)) * num_points_per_ray] + """ + # [..., dim, n_harmonic_functions] + embed = x[..., None] * self._frequencies + # [..., 1, dim, n_harmonic_functions] + [2, 1, 1] => [..., 2, dim, n_harmonic_functions] + embed = embed[..., None, :, :] + self._zero_half_pi[..., None, None] + # Use the trig identity cos(x) = sin(x + pi/2) + # and do one vectorized call to sin([x, x+pi/2]) instead of (sin(x), cos(x)). + embed = embed.sin() + if diag_cov is not None: + x_var = diag_cov[..., None] * torch.pow(self._frequencies, 2) + exp_var = torch.exp(-0.5 * x_var) + # [..., 2, dim, n_harmonic_functions] + embed = embed * exp_var[..., None, :, :] + + embed = embed.reshape(*x.shape[:-1], -1) + + if self.append_input: + return torch.cat([embed, x], dim=-1) + return embed + + @staticmethod + def get_output_dim_static( + input_dims: int, + n_harmonic_functions: int, + append_input: bool, + ) -> int: + """ + Utility to help predict the shape of the output of `forward`. + + Args: + input_dims: length of the last dimension of the input tensor + n_harmonic_functions: number of embedding frequencies + append_input: whether or not to concat the original + input to the harmonic embedding + Returns: + int: the length of the last dimension of the output tensor + """ + return input_dims * (2 * n_harmonic_functions + int(append_input)) + + def get_output_dim(self, input_dims: int = 3) -> int: + """ + Same as above. The default for input_dims is 3 for 3D applications + which use harmonic embedding for positional encoding, + so the input might be xyz. + """ + return self.get_output_dim_static( + input_dims, len(self._frequencies), self.append_input + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/raysampling.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/raysampling.py new file mode 100644 index 0000000000000000000000000000000000000000..09decba8f6681fedfc3d5e1f16cdf3830aecac63 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/raysampling.py @@ -0,0 +1,796 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings +from typing import Optional, Tuple, Union + +import torch +from pytorch3d.common.compat import meshgrid_ij +from pytorch3d.ops import padded_to_packed +from pytorch3d.renderer.cameras import CamerasBase +from pytorch3d.renderer.implicit.utils import HeterogeneousRayBundle, RayBundle +from torch.nn import functional as F + + +""" +This file defines three raysampling techniques: + - MultinomialRaysampler which can be used to sample rays from pixels of an image grid + - NDCMultinomialRaysampler which can be used to sample rays from pixels of an image grid, + which follows the pytorch3d convention for image grid coordinates + - MonteCarloRaysampler which randomly selects real-valued locations in the image plane + and emits rays from them +""" + + +class MultinomialRaysampler(torch.nn.Module): + """ + Samples a fixed number of points along rays which are regularly distributed + in a batch of rectangular image grids. Points along each ray + have uniformly-spaced z-coordinates between a predefined + minimum and maximum depth. + + The raysampler first generates a 3D coordinate grid of the following form:: + + / min_x, min_y, max_depth -------------- / max_x, min_y, max_depth + / /| + / / | ^ + / min_depth min_depth / | | + min_x ----------------------------- max_x | | image + min_y min_y | | height + | | | | + | | | v + | | | + | | / max_x, max_y, ^ + | | / max_depth / + min_x max_y / / n_pts_per_ray + max_y ----------------------------- max_x/ min_depth v + < --- image_width --- > + + In order to generate ray points, `MultinomialRaysampler` takes each 3D point of + the grid (with coordinates `[x, y, depth]`) and unprojects it + with `cameras.unproject_points([x, y, depth])`, where `cameras` are an + additional input to the `forward` function. + + Note that this is a generic implementation that can support any image grid + coordinate convention. For a raysampler which follows the PyTorch3D + coordinate conventions please refer to `NDCMultinomialRaysampler`. + As such, `NDCMultinomialRaysampler` is a special case of `MultinomialRaysampler`. + + Attributes: + min_x: The leftmost x-coordinate of each ray's source pixel's center. + max_x: The rightmost x-coordinate of each ray's source pixel's center. + min_y: The topmost y-coordinate of each ray's source pixel's center. + max_y: The bottommost y-coordinate of each ray's source pixel's center. + """ + + def __init__( + self, + *, + min_x: float, + max_x: float, + min_y: float, + max_y: float, + image_width: int, + image_height: int, + n_pts_per_ray: int, + min_depth: float, + max_depth: float, + n_rays_per_image: Optional[int] = None, + n_rays_total: Optional[int] = None, + unit_directions: bool = False, + stratified_sampling: bool = False, + ) -> None: + """ + Args: + min_x: The leftmost x-coordinate of each ray's source pixel's center. + max_x: The rightmost x-coordinate of each ray's source pixel's center. + min_y: The topmost y-coordinate of each ray's source pixel's center. + max_y: The bottommost y-coordinate of each ray's source pixel's center. + image_width: The horizontal size of the image grid. + image_height: The vertical size of the image grid. + n_pts_per_ray: The number of points sampled along each ray. + min_depth: The minimum depth of a ray-point. + max_depth: The maximum depth of a ray-point. + n_rays_per_image: If given, this amount of rays are sampled from the grid. + `n_rays_per_image` and `n_rays_total` cannot both be defined. + n_rays_total: How many rays in total to sample from the cameras provided. The result + is as if `n_rays_total_training` cameras were sampled with replacement from the + cameras provided and for every camera one ray was sampled. If set returns the + HeterogeneousRayBundle with batch_size=n_rays_total. + `n_rays_per_image` and `n_rays_total` cannot both be defined. + unit_directions: whether to normalize direction vectors in ray bundle. + stratified_sampling: if True, performs stratified random sampling + along the ray; otherwise takes ray points at deterministic offsets. + """ + super().__init__() + self._n_pts_per_ray = n_pts_per_ray + self._min_depth = min_depth + self._max_depth = max_depth + self._n_rays_per_image = n_rays_per_image + self._n_rays_total = n_rays_total + self._unit_directions = unit_directions + self._stratified_sampling = stratified_sampling + self.min_x, self.max_x = min_x, max_x + self.min_y, self.max_y = min_y, max_y + # get the initial grid of image xy coords + y, x = meshgrid_ij( + torch.linspace(min_y, max_y, image_height, dtype=torch.float32), + torch.linspace(min_x, max_x, image_width, dtype=torch.float32), + ) + _xy_grid = torch.stack([x, y], dim=-1) + + self.register_buffer("_xy_grid", _xy_grid, persistent=False) + + def forward( + self, + cameras: CamerasBase, + *, + mask: Optional[torch.Tensor] = None, + min_depth: Optional[float] = None, + max_depth: Optional[float] = None, + n_rays_per_image: Optional[int] = None, + n_pts_per_ray: Optional[int] = None, + stratified_sampling: Optional[bool] = None, + n_rays_total: Optional[int] = None, + **kwargs, + ) -> Union[RayBundle, HeterogeneousRayBundle]: + """ + Args: + cameras: A batch of `batch_size` cameras from which the rays are emitted. + mask: if given, the rays are sampled from the mask. Should be of size + (batch_size, image_height, image_width). + min_depth: The minimum depth of a ray-point. + max_depth: The maximum depth of a ray-point. + n_rays_per_image: If given, this amount of rays are sampled from the grid. + `n_rays_per_image` and `n_rays_total` cannot both be defined. + n_pts_per_ray: The number of points sampled along each ray. + stratified_sampling: if set, overrides stratified_sampling provided + in __init__. + n_rays_total: How many rays in total to sample from the cameras provided. The result + is as if `n_rays_total_training` cameras were sampled with replacement from the + cameras provided and for every camera one ray was sampled. If set returns the + HeterogeneousRayBundle with batch_size=n_rays_total. + `n_rays_per_image` and `n_rays_total` cannot both be defined. + Returns: + A named tuple RayBundle or dataclass HeterogeneousRayBundle with the + following fields: + + origins: A tensor of shape + `(batch_size, s1, s2, 3)` + denoting the locations of ray origins in the world coordinates. + directions: A tensor of shape + `(batch_size, s1, s2, 3)` + denoting the directions of each ray in the world coordinates. + lengths: A tensor of shape + `(batch_size, s1, s2, n_pts_per_ray)` + containing the z-coordinate (=depth) of each ray in world units. + xys: A tensor of shape + `(batch_size, s1, s2, 2)` + containing the 2D image coordinates of each ray or, + if mask is given, `(batch_size, n, 1, 2)` + Here `s1, s2` refer to spatial dimensions. + `(s1, s2)` refer to (highest priority first): + - `(1, 1)` if `n_rays_total` is provided, (batch_size=n_rays_total) + - `(n_rays_per_image, 1) if `n_rays_per_image` if provided, + - `(n, 1)` where n is the minimum cardinality of the mask + in the batch if `mask` is provided + - `(image_height, image_width)` if nothing from above is satisfied + + `HeterogeneousRayBundle` has additional members: + - camera_ids: tensor of shape (M,), where `M` is the number of unique sampled + cameras. It represents unique ids of sampled cameras. + - camera_counts: tensor of shape (M,), where `M` is the number of unique sampled + cameras. Represents how many times each camera from `camera_ids` was sampled + + `HeterogeneousRayBundle` is returned if `n_rays_total` is provided else `RayBundle` + is returned. + """ + n_rays_total = n_rays_total or self._n_rays_total + n_rays_per_image = n_rays_per_image or self._n_rays_per_image + if (n_rays_total is not None) and (n_rays_per_image is not None): + raise ValueError( + "`n_rays_total` and `n_rays_per_image` cannot both be defined." + ) + if n_rays_total: + ( + cameras, + mask, + camera_ids, # unique ids of sampled cameras + camera_counts, # number of times unique camera id was sampled + # `n_rays_per_image` is equal to the max number of times a simgle camera + # was sampled. We sample all cameras at `camera_ids` `n_rays_per_image` times + # and then discard the unneeded rays. + # pyre-ignore[9] + n_rays_per_image, + ) = _sample_cameras_and_masks(n_rays_total, cameras, mask) + else: + # pyre-ignore[9] + camera_ids: torch.LongTensor = torch.arange(len(cameras), dtype=torch.long) + + batch_size = cameras.R.shape[0] + device = cameras.device + + # expand the (H, W, 2) grid batch_size-times to (B, H, W, 2) + xy_grid = self._xy_grid.to(device).expand(batch_size, -1, -1, -1) + + if mask is not None and n_rays_per_image is None: + # if num rays not given, sample according to the smallest mask + n_rays_per_image = ( + n_rays_per_image or mask.sum(dim=(1, 2)).min().int().item() + ) + + if n_rays_per_image is not None: + if mask is not None: + assert mask.shape == xy_grid.shape[:3] + weights = mask.reshape(batch_size, -1) + else: + # it is probably more efficient to use torch.randperm + # for uniform weights but it is unlikely given that randperm + # is not batched and does not support partial permutation + _, width, height, _ = xy_grid.shape + weights = xy_grid.new_ones(batch_size, width * height) + # pyre-fixme[6]: For 2nd param expected `int` but got `Union[bool, + # float, int]`. + rays_idx = _safe_multinomial(weights, n_rays_per_image)[..., None].expand( + -1, -1, 2 + ) + + xy_grid = torch.gather(xy_grid.reshape(batch_size, -1, 2), 1, rays_idx)[ + :, :, None + ] + + min_depth = min_depth if min_depth is not None else self._min_depth + max_depth = max_depth if max_depth is not None else self._max_depth + n_pts_per_ray = ( + n_pts_per_ray if n_pts_per_ray is not None else self._n_pts_per_ray + ) + stratified_sampling = ( + stratified_sampling + if stratified_sampling is not None + else self._stratified_sampling + ) + + ray_bundle = _xy_to_ray_bundle( + cameras, + xy_grid, + min_depth, + max_depth, + n_pts_per_ray, + self._unit_directions, + stratified_sampling, + ) + + return ( + # pyre-ignore[61] + _pack_ray_bundle(ray_bundle, camera_ids, camera_counts) + if n_rays_total + else ray_bundle + ) + + +class NDCMultinomialRaysampler(MultinomialRaysampler): + """ + Samples a fixed number of points along rays which are regularly distributed + in a batch of rectangular image grids. Points along each ray + have uniformly-spaced z-coordinates between a predefined minimum and maximum depth. + + `NDCMultinomialRaysampler` follows the screen conventions of the `Meshes` and `Pointclouds` + renderers. I.e. the pixel coordinates are in [-1, 1]x[-u, u] or [-u, u]x[-1, 1] + where u > 1 is the aspect ratio of the image. + + For the description of arguments, see the documentation to MultinomialRaysampler. + """ + + def __init__( + self, + *, + image_width: int, + image_height: int, + n_pts_per_ray: int, + min_depth: float, + max_depth: float, + n_rays_per_image: Optional[int] = None, + n_rays_total: Optional[int] = None, + unit_directions: bool = False, + stratified_sampling: bool = False, + ) -> None: + if image_width >= image_height: + range_x = image_width / image_height + range_y = 1.0 + else: + range_x = 1.0 + range_y = image_height / image_width + + half_pix_width = range_x / image_width + half_pix_height = range_y / image_height + super().__init__( + min_x=range_x - half_pix_width, + max_x=-range_x + half_pix_width, + min_y=range_y - half_pix_height, + max_y=-range_y + half_pix_height, + image_width=image_width, + image_height=image_height, + n_pts_per_ray=n_pts_per_ray, + min_depth=min_depth, + max_depth=max_depth, + n_rays_per_image=n_rays_per_image, + n_rays_total=n_rays_total, + unit_directions=unit_directions, + stratified_sampling=stratified_sampling, + ) + + +class MonteCarloRaysampler(torch.nn.Module): + """ + Samples a fixed number of pixels within denoted xy bounds uniformly at random. + For each pixel, a fixed number of points is sampled along its ray at uniformly-spaced + z-coordinates such that the z-coordinates range between a predefined minimum + and maximum depth. + + For practical purposes, this is similar to MultinomialRaysampler without a mask, + however sampling at real-valued locations bypassing replacement checks may be faster. + """ + + def __init__( + self, + min_x: float, + max_x: float, + min_y: float, + max_y: float, + n_rays_per_image: int, + n_pts_per_ray: int, + min_depth: float, + max_depth: float, + *, + n_rays_total: Optional[int] = None, + unit_directions: bool = False, + stratified_sampling: bool = False, + ) -> None: + """ + Args: + min_x: The smallest x-coordinate of each ray's source pixel. + max_x: The largest x-coordinate of each ray's source pixel. + min_y: The smallest y-coordinate of each ray's source pixel. + max_y: The largest y-coordinate of each ray's source pixel. + n_rays_per_image: The number of rays randomly sampled in each camera. + `n_rays_per_image` and `n_rays_total` cannot both be defined. + n_pts_per_ray: The number of points sampled along each ray. + min_depth: The minimum depth of each ray-point. + max_depth: The maximum depth of each ray-point. + n_rays_total: How many rays in total to sample from the cameras provided. The result + is as if `n_rays_total_training` cameras were sampled with replacement from the + cameras provided and for every camera one ray was sampled. If set returns the + HeterogeneousRayBundle with batch_size=n_rays_total. + `n_rays_per_image` and `n_rays_total` cannot both be defined. + unit_directions: whether to normalize direction vectors in ray bundle. + stratified_sampling: if True, performs stratified sampling in n_pts_per_ray + bins for each ray; otherwise takes n_pts_per_ray deterministic points + on each ray with uniform offsets. + """ + super().__init__() + self._min_x = min_x + self._max_x = max_x + self._min_y = min_y + self._max_y = max_y + self._n_rays_per_image = n_rays_per_image + self._n_pts_per_ray = n_pts_per_ray + self._min_depth = min_depth + self._max_depth = max_depth + self._n_rays_total = n_rays_total + self._unit_directions = unit_directions + self._stratified_sampling = stratified_sampling + + def forward( + self, + cameras: CamerasBase, + *, + stratified_sampling: Optional[bool] = None, + **kwargs, + ) -> Union[RayBundle, HeterogeneousRayBundle]: + """ + Args: + cameras: A batch of `batch_size` cameras from which the rays are emitted. + stratified_sampling: if set, overrides stratified_sampling provided + in __init__. + Returns: + A named tuple `RayBundle` or dataclass `HeterogeneousRayBundle` with the + following fields: + + origins: A tensor of shape + `(batch_size, n_rays_per_image, 3)` + denoting the locations of ray origins in the world coordinates. + directions: A tensor of shape + `(batch_size, n_rays_per_image, 3)` + denoting the directions of each ray in the world coordinates. + lengths: A tensor of shape + `(batch_size, n_rays_per_image, n_pts_per_ray)` + containing the z-coordinate (=depth) of each ray in world units. + xys: A tensor of shape + `(batch_size, n_rays_per_image, 2)` + containing the 2D image coordinates of each ray. + If `n_rays_total` is provided `batch_size=n_rays_total`and + `n_rays_per_image=1` and `HeterogeneousRayBundle` is returned else `RayBundle` + is returned. + + `HeterogeneousRayBundle` has additional members: + - camera_ids: tensor of shape (M,), where `M` is the number of unique sampled + cameras. It represents unique ids of sampled cameras. + - camera_counts: tensor of shape (M,), where `M` is the number of unique sampled + cameras. Represents how many times each camera from `camera_ids` was sampled + """ + if ( + sum(x is not None for x in [self._n_rays_total, self._n_rays_per_image]) + != 1 + ): + raise ValueError( + "Exactly one of `self.n_rays_total` and `self.n_rays_per_image` " + "must be given." + ) + + if self._n_rays_total: + ( + cameras, + _, + camera_ids, + camera_counts, + n_rays_per_image, + ) = _sample_cameras_and_masks(self._n_rays_total, cameras, None) + else: + # pyre-ignore[9] + camera_ids: torch.LongTensor = torch.arange(len(cameras), dtype=torch.long) + n_rays_per_image = self._n_rays_per_image + + batch_size = cameras.R.shape[0] + + device = cameras.device + + # get the initial grid of image xy coords + # of shape (batch_size, n_rays_per_image, 2) + rays_xy = torch.cat( + [ + torch.rand( + size=(batch_size, n_rays_per_image, 1), + dtype=torch.float32, + device=device, + ) + * (high - low) + + low + for low, high in ( + (self._min_x, self._max_x), + (self._min_y, self._max_y), + ) + ], + dim=2, + ) + + stratified_sampling = ( + stratified_sampling + if stratified_sampling is not None + else self._stratified_sampling + ) + + ray_bundle = _xy_to_ray_bundle( + cameras, + rays_xy, + self._min_depth, + self._max_depth, + self._n_pts_per_ray, + self._unit_directions, + stratified_sampling, + ) + + return ( + # pyre-ignore[61] + _pack_ray_bundle(ray_bundle, camera_ids, camera_counts) + if self._n_rays_total + else ray_bundle + ) + + +# Settings for backwards compatibility +def GridRaysampler( + min_x: float, + max_x: float, + min_y: float, + max_y: float, + image_width: int, + image_height: int, + n_pts_per_ray: int, + min_depth: float, + max_depth: float, +) -> "MultinomialRaysampler": + """ + GridRaysampler has been DEPRECATED. Use MultinomialRaysampler instead. + Preserving GridRaysampler for backward compatibility. + """ + + warnings.warn( + """GridRaysampler is deprecated, + Use MultinomialRaysampler instead. + GridRaysampler will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return MultinomialRaysampler( + min_x=min_x, + max_x=max_x, + min_y=min_y, + max_y=max_y, + image_width=image_width, + image_height=image_height, + n_pts_per_ray=n_pts_per_ray, + min_depth=min_depth, + max_depth=max_depth, + ) + + +# Settings for backwards compatibility +def NDCGridRaysampler( + image_width: int, + image_height: int, + n_pts_per_ray: int, + min_depth: float, + max_depth: float, +) -> "NDCMultinomialRaysampler": + """ + NDCGridRaysampler has been DEPRECATED. Use NDCMultinomialRaysampler instead. + Preserving NDCGridRaysampler for backward compatibility. + """ + + warnings.warn( + """NDCGridRaysampler is deprecated, + Use NDCMultinomialRaysampler instead. + NDCGridRaysampler will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return NDCMultinomialRaysampler( + image_width=image_width, + image_height=image_height, + n_pts_per_ray=n_pts_per_ray, + min_depth=min_depth, + max_depth=max_depth, + ) + + +def _safe_multinomial(input: torch.Tensor, num_samples: int) -> torch.Tensor: + """ + Wrapper around torch.multinomial that attempts sampling without replacement + when possible, otherwise resorts to sampling with replacement. + + Args: + input: tensor of shape [B, n] containing non-negative values; + rows are interpreted as unnormalized event probabilities + in categorical distributions. + num_samples: number of samples to take. + + Returns: + LongTensor of shape [B, num_samples] containing + values from {0, ..., n - 1} where the elements [i, :] of row i make + (1) if there are num_samples or more non-zero values in input[i], + a random subset of the indices of those values, with + probabilities proportional to the values in input[i, :]. + + (2) if not, a random sample with replacement of the indices of + those values, with probabilities proportional to them. + This sample might not contain all the indices of the + non-zero values. + Behavior undetermined if there are no non-zero values in a whole row + or if there are negative values. + """ + try: + res = torch.multinomial(input, num_samples, replacement=False) + except RuntimeError: + # this is probably rare, so we don't mind sampling twice + res = torch.multinomial(input, num_samples, replacement=True) + no_repl = (input > 0.0).sum(dim=-1) >= num_samples + res[no_repl] = torch.multinomial(input[no_repl], num_samples, replacement=False) + return res + + # in some versions of Pytorch, zero probabilty samples can be drawn without an error + # due to this bug: https://github.com/pytorch/pytorch/issues/50034. Handle this case: + repl = (input > 0.0).sum(dim=-1) < num_samples + if repl.any(): + res[repl] = torch.multinomial(input[repl], num_samples, replacement=True) + + return res + + +def _xy_to_ray_bundle( + cameras: CamerasBase, + xy_grid: torch.Tensor, + min_depth: float, + max_depth: float, + n_pts_per_ray: int, + unit_directions: bool, + stratified_sampling: bool = False, +) -> RayBundle: + """ + Extends the `xy_grid` input of shape `(batch_size, ..., 2)` to rays. + This adds to each xy location in the grid a vector of `n_pts_per_ray` depths + uniformly spaced between `min_depth` and `max_depth`. + + The extended grid is then unprojected with `cameras` to yield + ray origins, directions and depths. + + Args: + cameras: cameras object representing a batch of cameras. + xy_grid: torch.tensor grid of image xy coords. + min_depth: The minimum depth of each ray-point. + max_depth: The maximum depth of each ray-point. + n_pts_per_ray: The number of points sampled along each ray. + unit_directions: whether to normalize direction vectors in ray bundle. + stratified_sampling: if True, performs stratified sampling in n_pts_per_ray + bins for each ray; otherwise takes n_pts_per_ray deterministic points + on each ray with uniform offsets. + """ + batch_size = xy_grid.shape[0] + spatial_size = xy_grid.shape[1:-1] + n_rays_per_image = spatial_size.numel() + + # ray z-coords + rays_zs = xy_grid.new_empty((0,)) + if n_pts_per_ray > 0: + depths = torch.linspace( + min_depth, + max_depth, + n_pts_per_ray, + dtype=xy_grid.dtype, + device=xy_grid.device, + ) + rays_zs = depths[None, None].expand(batch_size, n_rays_per_image, n_pts_per_ray) + + if stratified_sampling: + rays_zs = _jiggle_within_stratas(rays_zs) + + # make two sets of points at a constant depth=1 and 2 + to_unproject = torch.cat( + ( + xy_grid.view(batch_size, 1, n_rays_per_image, 2) + .expand(batch_size, 2, n_rays_per_image, 2) + .reshape(batch_size, n_rays_per_image * 2, 2), + torch.cat( + ( + xy_grid.new_ones(batch_size, n_rays_per_image, 1), + 2.0 * xy_grid.new_ones(batch_size, n_rays_per_image, 1), + ), + dim=1, + ), + ), + dim=-1, + ) + + # unproject the points + unprojected = cameras.unproject_points(to_unproject, from_ndc=True) + + # split the two planes back + rays_plane_1_world = unprojected[:, :n_rays_per_image] + rays_plane_2_world = unprojected[:, n_rays_per_image:] + + # directions are the differences between the two planes of points + rays_directions_world = rays_plane_2_world - rays_plane_1_world + + # origins are given by subtracting the ray directions from the first plane + rays_origins_world = rays_plane_1_world - rays_directions_world + + if unit_directions: + rays_directions_world = F.normalize(rays_directions_world, dim=-1) + + return RayBundle( + rays_origins_world.view(batch_size, *spatial_size, 3), + rays_directions_world.view(batch_size, *spatial_size, 3), + rays_zs.view(batch_size, *spatial_size, n_pts_per_ray), + xy_grid, + ) + + +def _jiggle_within_stratas(bin_centers: torch.Tensor) -> torch.Tensor: + """ + Performs sampling of 1 point per bin given the bin centers. + + More specifically, it replaces each point's value `z` + with a sample from a uniform random distribution on + `[z - delta_-, z + delta_+]`, where `delta_-` is half of the difference + between `z` and the previous point, and `delta_+` is half of the difference + between the next point and `z`. For the first and last items, the + corresponding boundary deltas are assumed zero. + + Args: + `bin_centers`: The input points of size (..., N); the result is broadcast + along all but the last dimension (the rows). Each row should be + sorted in ascending order. + + Returns: + a tensor of size (..., N) with the locations jiggled within stratas/bins. + """ + # Get intervals between bin centers. + mids = 0.5 * (bin_centers[..., 1:] + bin_centers[..., :-1]) + upper = torch.cat((mids, bin_centers[..., -1:]), dim=-1) + lower = torch.cat((bin_centers[..., :1], mids), dim=-1) + # Samples in those intervals. + jiggled = lower + (upper - lower) * torch.rand_like(lower) + return jiggled + + +def _sample_cameras_and_masks( + n_samples: int, cameras: CamerasBase, mask: Optional[torch.Tensor] = None +) -> Tuple[ + CamerasBase, + Optional[torch.Tensor], + torch.LongTensor, + torch.LongTensor, + torch.LongTensor, +]: + """ + Samples n_rays_total cameras and masks and returns them in a form + (camera_idx, count), where count represents number of times the same camera + has been sampled. + + Args: + n_samples: how many camera and mask pairs to sample + cameras: A batch of `batch_size` cameras from which the rays are emitted. + mask: Optional. Should be of size (batch_size, image_height, image_width). + Returns: + tuple of a form (sampled_cameras, sampled_masks, unique_sampled_camera_ids, + number_of_times_each_sampled_camera_has_been_sampled, + max_number_of_times_camera_has_been_sampled, + ) + """ + sampled_ids = torch.randint( + 0, + len(cameras), + size=(n_samples,), + dtype=torch.long, + ) + unique_ids, counts = torch.unique(sampled_ids, return_counts=True) + # pyre-ignore[7] + return ( + cameras[unique_ids], + mask[unique_ids] if mask is not None else None, + unique_ids, + counts, + torch.max(counts), + ) + + +# TODO: this function can be unified with ImplicitronRayBundle.get_padded_xys +def _pack_ray_bundle( + ray_bundle: RayBundle, camera_ids: torch.LongTensor, camera_counts: torch.LongTensor +) -> HeterogeneousRayBundle: + """ + Pack the raybundle from [n_cameras, max(rays_per_camera), ...] to + [total_num_rays, 1, ...] + + Args: + ray_bundle: A ray_bundle to pack + camera_ids: Unique ids of cameras that were sampled + camera_counts: how many of which camera to pack, each count coresponds to + one 'row' of the ray_bundle and says how many rays wll be taken + from it and packed. + Returns: + HeterogeneousRayBundle where batch_size=sum(camera_counts) and n_rays_per_image=1 + """ + # pyre-ignore[9] + camera_counts = camera_counts.to(ray_bundle.origins.device) + cumsum = torch.cumsum(camera_counts, dim=0, dtype=torch.long) + # pyre-ignore[9] + first_idxs: torch.LongTensor = torch.cat( + (camera_counts.new_zeros((1,), dtype=torch.long), cumsum[:-1]) + ) + num_inputs = int(camera_counts.sum()) + + return HeterogeneousRayBundle( + origins=padded_to_packed(ray_bundle.origins, first_idxs, num_inputs)[:, None], + directions=padded_to_packed(ray_bundle.directions, first_idxs, num_inputs)[ + :, None + ], + lengths=padded_to_packed(ray_bundle.lengths, first_idxs, num_inputs)[:, None], + xys=padded_to_packed(ray_bundle.xys, first_idxs, num_inputs)[:, None], + camera_ids=camera_ids, + camera_counts=camera_counts, + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/sample_pdf.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/sample_pdf.py new file mode 100644 index 0000000000000000000000000000000000000000..7009867d7aa21311fd28986b98108346653fac49 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/sample_pdf.py @@ -0,0 +1,148 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +from pytorch3d import _C + + +def sample_pdf( + bins: torch.Tensor, + weights: torch.Tensor, + n_samples: int, + det: bool = False, + eps: float = 1e-5, +) -> torch.Tensor: + """ + Samples probability density functions defined by bin edges `bins` and + the non-negative per-bin probabilities `weights`. + + Args: + bins: Tensor of shape `(..., n_bins+1)` denoting the edges of the sampling bins. + weights: Tensor of shape `(..., n_bins)` containing non-negative numbers + representing the probability of sampling the corresponding bin. + n_samples: The number of samples to draw from each set of bins. + det: If `False`, the sampling is random. `True` yields deterministic + uniformly-spaced sampling from the inverse cumulative density function. + eps: A constant preventing division by zero in case empty bins are present. + + Returns: + samples: Tensor of shape `(..., n_samples)` containing `n_samples` samples + drawn from each probability distribution. + + Refs: + [1] https://github.com/bmild/nerf/blob/55d8b00244d7b5178f4d003526ab6667683c9da9/run_nerf_helpers.py#L183 # noqa E501 + """ + if torch.is_grad_enabled() and (bins.requires_grad or weights.requires_grad): + raise NotImplementedError("sample_pdf differentiability.") + if weights.min() <= -eps: + raise ValueError("Negative weights provided.") + batch_shape = bins.shape[:-1] + n_bins = weights.shape[-1] + if n_bins + 1 != bins.shape[-1] or weights.shape[:-1] != batch_shape: + shapes = f"{bins.shape}{weights.shape}" + raise ValueError("Inconsistent shapes of bins and weights: " + shapes) + output_shape = batch_shape + (n_samples,) + + if det: + u = torch.linspace(0.0, 1.0, n_samples, device=bins.device, dtype=torch.float32) + output = u.expand(output_shape).contiguous() + else: + output = torch.rand(output_shape, dtype=torch.float32, device=bins.device) + + # pyre-fixme[16]: Module `pytorch3d` has no attribute `_C`. + _C.sample_pdf( + bins.reshape(-1, n_bins + 1), + weights.reshape(-1, n_bins), + output.reshape(-1, n_samples), + eps, + ) + + return output + + +def sample_pdf_python( + bins: torch.Tensor, + weights: torch.Tensor, + N_samples: int, + det: bool = False, + eps: float = 1e-5, +) -> torch.Tensor: + """ + This is a pure python implementation of the `sample_pdf` function. + It may be faster than sample_pdf when the number of bins is very large, + because it behaves as O(batchsize * [n_bins + log(n_bins) * n_samples] ) + whereas sample_pdf behaves as O(batchsize * n_bins * n_samples). + For 64 bins sample_pdf is much faster. + + Samples probability density functions defined by bin edges `bins` and + the non-negative per-bin probabilities `weights`. + + Note: This is a direct conversion of the TensorFlow function from the original + release [1] to PyTorch. It requires PyTorch 1.6 or greater due to the use of + torch.searchsorted. + + Args: + bins: Tensor of shape `(..., n_bins+1)` denoting the edges of the sampling bins. + weights: Tensor of shape `(..., n_bins)` containing non-negative numbers + representing the probability of sampling the corresponding bin. + N_samples: The number of samples to draw from each set of bins. + det: If `False`, the sampling is random. `True` yields deterministic + uniformly-spaced sampling from the inverse cumulative density function. + eps: A constant preventing division by zero in case empty bins are present. + + Returns: + samples: Tensor of shape `(..., N_samples)` containing `N_samples` samples + drawn from each probability distribution. + + Refs: + [1] https://github.com/bmild/nerf/blob/55d8b00244d7b5178f4d003526ab6667683c9da9/run_nerf_helpers.py#L183 # noqa E501 + """ + + # Get pdf + weights = weights + eps # prevent nans + if weights.min() <= 0: + raise ValueError("Negative weights provided.") + pdf = weights / weights.sum(dim=-1, keepdim=True) + cdf = torch.cumsum(pdf, -1) + cdf = torch.cat([torch.zeros_like(cdf[..., :1]), cdf], -1) + + # Take uniform samples u of shape (..., N_samples) + if det: + u = torch.linspace(0.0, 1.0, N_samples, device=cdf.device, dtype=cdf.dtype) + u = u.expand(list(cdf.shape[:-1]) + [N_samples]).contiguous() + else: + u = torch.rand( + list(cdf.shape[:-1]) + [N_samples], device=cdf.device, dtype=cdf.dtype + ) + + # Invert CDF + inds = torch.searchsorted(cdf, u, right=True) + # inds has shape (..., N_samples) identifying the bin of each sample. + below = (inds - 1).clamp(0) + above = inds.clamp(max=cdf.shape[-1] - 1) + # Below and above are of shape (..., N_samples), identifying the bin + # edges surrounding each sample. + + inds_g = torch.stack([below, above], -1).view( + *below.shape[:-1], below.shape[-1] * 2 + ) + cdf_g = torch.gather(cdf, -1, inds_g).view(*below.shape, 2) + bins_g = torch.gather(bins, -1, inds_g).view(*below.shape, 2) + # cdf_g and bins_g are of shape (..., N_samples, 2) and identify + # the cdf and the index of the two bin edges surrounding each sample. + + denom = cdf_g[..., 1] - cdf_g[..., 0] + denom = torch.where(denom < eps, torch.ones_like(denom), denom) + t = (u - cdf_g[..., 0]) / denom + # t is of shape (..., N_samples) and identifies how far through + # each sample is in its bin. + + samples = bins_g[..., 0] + t * (bins_g[..., 1] - bins_g[..., 0]) + + return samples diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bd6bcc31f325fa1521a2e8b5c4e4e0dcd866e2c2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/implicit/utils.py @@ -0,0 +1,173 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import dataclasses +from typing import NamedTuple, Optional, Union + +import torch + + +class RayBundle(NamedTuple): + """ + Parametrizes points along projection rays by storing: + + origins: A tensor of shape `(..., 3)` denoting the + origins of the sampling rays in world coords. + directions: A tensor of shape `(..., 3)` containing the direction + vectors of sampling rays in world coords. They don't have to be normalized; + they define unit vectors in the respective 1D coordinate systems; see + documentation for :func:`ray_bundle_to_ray_points` for the conversion formula. + lengths: A tensor of shape `(..., num_points_per_ray)` + containing the lengths at which the rays are sampled. + xys: A tensor of shape `(..., 2)`, the xy-locations (`xys`) of the ray pixels + """ + + origins: torch.Tensor + directions: torch.Tensor + lengths: torch.Tensor + xys: torch.Tensor + + +@dataclasses.dataclass +class HeterogeneousRayBundle: + """ + Members: + origins: A tensor of shape `(..., 3)` denoting the + origins of the sampling rays in world coords. + directions: A tensor of shape `(..., 3)` containing the direction + vectors of sampling rays in world coords. They don't have to be normalized; + they define unit vectors in the respective 1D coordinate systems; see + documentation for :func:`ray_bundle_to_ray_points` for the conversion formula. + lengths: A tensor of shape `(..., num_points_per_ray)` + containing the lengths at which the rays are sampled. + xys: A tensor of shape `(..., 2)`, the xy-locations (`xys`) of the ray pixels + camera_ids: A tensor of shape (N, ) which indicates which camera + was used to sample the rays. `N` is the number of unique sampled cameras. + camera_counts: A tensor of shape (N, ) which how many times the + coresponding camera in `camera_ids` was sampled. + `sum(camera_counts)==total_number_of_rays` + + If we sample cameras of ids [0, 3, 5, 3, 1, 0, 0] that would be + stored as camera_ids=[1, 3, 5, 0] and camera_counts=[1, 2, 1, 3]. `camera_ids` is a + set like object with no particular ordering of elements. ith element of + `camera_ids` coresponds to the ith element of `camera_counts`. + """ + + origins: torch.Tensor + directions: torch.Tensor + lengths: torch.Tensor + xys: torch.Tensor + camera_ids: Optional[torch.LongTensor] = None + camera_counts: Optional[torch.LongTensor] = None + + +def ray_bundle_to_ray_points( + ray_bundle: Union[RayBundle, HeterogeneousRayBundle] +) -> torch.Tensor: + """ + Converts rays parametrized with a `ray_bundle` (an instance of the `RayBundle` + named tuple or HeterogeneousRayBundle dataclass) to 3D points by + extending each ray according to the corresponding length. + + E.g. for 2 dimensional tensors `ray_bundle.origins`, `ray_bundle.directions` + and `ray_bundle.lengths`, the ray point at position `[i, j]` is:: + + ray_bundle.points[i, j, :] = ( + ray_bundle.origins[i, :] + + ray_bundle.directions[i, :] * ray_bundle.lengths[i, j] + ) + + Note that both the directions and magnitudes of the vectors in + `ray_bundle.directions` matter. + + Args: + ray_bundle: A `RayBundle` or `HeterogeneousRayBundle` object with fields: + origins: A tensor of shape `(..., 3)` + directions: A tensor of shape `(..., 3)` + lengths: A tensor of shape `(..., num_points_per_ray)` + + Returns: + rays_points: A tensor of shape `(..., num_points_per_ray, 3)` + containing the points sampled along each ray. + """ + return ray_bundle_variables_to_ray_points( + ray_bundle.origins, ray_bundle.directions, ray_bundle.lengths + ) + + +def ray_bundle_variables_to_ray_points( + rays_origins: torch.Tensor, + rays_directions: torch.Tensor, + rays_lengths: torch.Tensor, +) -> torch.Tensor: + """ + Converts rays parametrized with origins and directions + to 3D points by extending each ray according to the corresponding + ray length: + + E.g. for 2 dimensional input tensors `rays_origins`, `rays_directions` + and `rays_lengths`, the ray point at position `[i, j]` is:: + + rays_points[i, j, :] = ( + rays_origins[i, :] + + rays_directions[i, :] * rays_lengths[i, j] + ) + + Note that both the directions and magnitudes of the vectors in + `rays_directions` matter. + + Args: + rays_origins: A tensor of shape `(..., 3)` + rays_directions: A tensor of shape `(..., 3)` + rays_lengths: A tensor of shape `(..., num_points_per_ray)` + + Returns: + rays_points: A tensor of shape `(..., num_points_per_ray, 3)` + containing the points sampled along each ray. + """ + rays_points = ( + rays_origins[..., None, :] + + rays_lengths[..., :, None] * rays_directions[..., None, :] + ) + return rays_points + + +def _validate_ray_bundle_variables( + rays_origins: torch.Tensor, + rays_directions: torch.Tensor, + rays_lengths: torch.Tensor, +) -> None: + """ + Validate the shapes of RayBundle variables + `rays_origins`, `rays_directions`, and `rays_lengths`. + """ + ndim = rays_origins.ndim + if any(r.ndim != ndim for r in (rays_directions, rays_lengths)): + raise ValueError( + "rays_origins, rays_directions and rays_lengths" + + " have to have the same number of dimensions." + ) + + if ndim <= 2: + raise ValueError( + "rays_origins, rays_directions and rays_lengths" + + " have to have at least 3 dimensions." + ) + + spatial_size = rays_origins.shape[:-1] + if any(spatial_size != r.shape[:-1] for r in (rays_directions, rays_lengths)): + raise ValueError( + "The shapes of rays_origins, rays_directions and rays_lengths" + + " may differ only in the last dimension." + ) + + if any(r.shape[-1] != 3 for r in (rays_origins, rays_directions)): + raise ValueError( + "The size of the last dimension of rays_origins/rays_directions" + + "has to be 3." + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/lighting.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/lighting.py new file mode 100644 index 0000000000000000000000000000000000000000..4394c3e59020039692a617db49ac74de465414fa --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/lighting.py @@ -0,0 +1,341 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +import torch.nn.functional as F + +from ..common.datatypes import Device +from .utils import convert_to_tensors_and_broadcast, TensorProperties + + +def diffuse(normals, color, direction) -> torch.Tensor: + """ + Calculate the diffuse component of light reflection using Lambert's + cosine law. + + Args: + normals: (N, ..., 3) xyz normal vectors. Normals and points are + expected to have the same shape. + color: (1, 3) or (N, 3) RGB color of the diffuse component of the light. + direction: (x,y,z) direction of the light + + Returns: + colors: (N, ..., 3), same shape as the input points. + + The normals and light direction should be in the same coordinate frame + i.e. if the points have been transformed from world -> view space then + the normals and direction should also be in view space. + + NOTE: to use with the packed vertices (i.e. no batch dimension) reformat the + inputs in the following way. + + .. code-block:: python + + Args: + normals: (P, 3) + color: (N, 3)[batch_idx, :] -> (P, 3) + direction: (N, 3)[batch_idx, :] -> (P, 3) + + Returns: + colors: (P, 3) + + where batch_idx is of shape (P). For meshes, batch_idx can be: + meshes.verts_packed_to_mesh_idx() or meshes.faces_packed_to_mesh_idx() + depending on whether points refers to the vertex coordinates or + average/interpolated face coordinates. + """ + # TODO: handle multiple directional lights per batch element. + # TODO: handle attenuation. + + # Ensure color and location have same batch dimension as normals + normals, color, direction = convert_to_tensors_and_broadcast( + normals, color, direction, device=normals.device + ) + + # Reshape direction and color so they have all the arbitrary intermediate + # dimensions as normals. Assume first dim = batch dim and last dim = 3. + points_dims = normals.shape[1:-1] + expand_dims = (-1,) + (1,) * len(points_dims) + (3,) + if direction.shape != normals.shape: + direction = direction.view(expand_dims) + if color.shape != normals.shape: + color = color.view(expand_dims) + + # Renormalize the normals in case they have been interpolated. + # We tried to replace the following with F.cosine_similarity, but it wasn't faster. + normals = F.normalize(normals, p=2, dim=-1, eps=1e-6) + direction = F.normalize(direction, p=2, dim=-1, eps=1e-6) + angle = F.relu(torch.sum(normals * direction, dim=-1)) + return color * angle[..., None] + + +def specular( + points, normals, direction, color, camera_position, shininess +) -> torch.Tensor: + """ + Calculate the specular component of light reflection. + + Args: + points: (N, ..., 3) xyz coordinates of the points. + normals: (N, ..., 3) xyz normal vectors for each point. + color: (N, 3) RGB color of the specular component of the light. + direction: (N, 3) vector direction of the light. + camera_position: (N, 3) The xyz position of the camera. + shininess: (N) The specular exponent of the material. + + Returns: + colors: (N, ..., 3), same shape as the input points. + + The points, normals, camera_position, and direction should be in the same + coordinate frame i.e. if the points have been transformed from + world -> view space then the normals, camera_position, and light direction + should also be in view space. + + To use with a batch of packed points reindex in the following way. + .. code-block:: python:: + + Args: + points: (P, 3) + normals: (P, 3) + color: (N, 3)[batch_idx] -> (P, 3) + direction: (N, 3)[batch_idx] -> (P, 3) + camera_position: (N, 3)[batch_idx] -> (P, 3) + shininess: (N)[batch_idx] -> (P) + Returns: + colors: (P, 3) + + where batch_idx is of shape (P). For meshes batch_idx can be: + meshes.verts_packed_to_mesh_idx() or meshes.faces_packed_to_mesh_idx(). + """ + # TODO: handle multiple directional lights + # TODO: attenuate based on inverse squared distance to the light source + + if points.shape != normals.shape: + msg = "Expected points and normals to have the same shape: got %r, %r" + raise ValueError(msg % (points.shape, normals.shape)) + + # Ensure all inputs have same batch dimension as points + matched_tensors = convert_to_tensors_and_broadcast( + points, color, direction, camera_position, shininess, device=points.device + ) + _, color, direction, camera_position, shininess = matched_tensors + + # Reshape direction and color so they have all the arbitrary intermediate + # dimensions as points. Assume first dim = batch dim and last dim = 3. + points_dims = points.shape[1:-1] + expand_dims = (-1,) + (1,) * len(points_dims) + if direction.shape != normals.shape: + direction = direction.view(expand_dims + (3,)) + if color.shape != normals.shape: + color = color.view(expand_dims + (3,)) + if camera_position.shape != normals.shape: + camera_position = camera_position.view(expand_dims + (3,)) + if shininess.shape != normals.shape: + shininess = shininess.view(expand_dims) + + # Renormalize the normals in case they have been interpolated. + # We tried a version that uses F.cosine_similarity instead of renormalizing, + # but it was slower. + normals = F.normalize(normals, p=2, dim=-1, eps=1e-6) + direction = F.normalize(direction, p=2, dim=-1, eps=1e-6) + cos_angle = torch.sum(normals * direction, dim=-1) + # No specular highlights if angle is less than 0. + mask = (cos_angle > 0).to(torch.float32) + + # Calculate the specular reflection. + view_direction = camera_position - points + view_direction = F.normalize(view_direction, p=2, dim=-1, eps=1e-6) + reflect_direction = -direction + 2 * (cos_angle[..., None] * normals) + + # Cosine of the angle between the reflected light ray and the viewer + alpha = F.relu(torch.sum(view_direction * reflect_direction, dim=-1)) * mask + return color * torch.pow(alpha, shininess)[..., None] + + +class DirectionalLights(TensorProperties): + def __init__( + self, + ambient_color=((0.5, 0.5, 0.5),), + diffuse_color=((0.3, 0.3, 0.3),), + specular_color=((0.2, 0.2, 0.2),), + direction=((0, 1, 0),), + device: Device = "cpu", + ) -> None: + """ + Args: + ambient_color: RGB color of the ambient component. + diffuse_color: RGB color of the diffuse component. + specular_color: RGB color of the specular component. + direction: (x, y, z) direction vector of the light. + device: Device (as str or torch.device) on which the tensors should be located + + The inputs can each be + - 3 element tuple/list or list of lists + - torch tensor of shape (1, 3) + - torch tensor of shape (N, 3) + The inputs are broadcast against each other so they all have batch + dimension N. + """ + super().__init__( + device=device, + ambient_color=ambient_color, + diffuse_color=diffuse_color, + specular_color=specular_color, + direction=direction, + ) + _validate_light_properties(self) + if self.direction.shape[-1] != 3: + msg = "Expected direction to have shape (N, 3); got %r" + raise ValueError(msg % repr(self.direction.shape)) + + def clone(self): + other = self.__class__(device=self.device) + return super().clone(other) + + def diffuse(self, normals, points=None) -> torch.Tensor: + # NOTE: Points is not used but is kept in the args so that the API is + # the same for directional and point lights. The call sites should not + # need to know the light type. + return diffuse( + normals=normals, + color=self.diffuse_color, + direction=self.direction, + ) + + def specular(self, normals, points, camera_position, shininess) -> torch.Tensor: + return specular( + points=points, + normals=normals, + color=self.specular_color, + direction=self.direction, + camera_position=camera_position, + shininess=shininess, + ) + + +class PointLights(TensorProperties): + def __init__( + self, + ambient_color=((0.5, 0.5, 0.5),), + diffuse_color=((0.3, 0.3, 0.3),), + specular_color=((0.2, 0.2, 0.2),), + location=((0, 1, 0),), + device: Device = "cpu", + ) -> None: + """ + Args: + ambient_color: RGB color of the ambient component + diffuse_color: RGB color of the diffuse component + specular_color: RGB color of the specular component + location: xyz position of the light. + device: Device (as str or torch.device) on which the tensors should be located + + The inputs can each be + - 3 element tuple/list or list of lists + - torch tensor of shape (1, 3) + - torch tensor of shape (N, 3) + The inputs are broadcast against each other so they all have batch + dimension N. + """ + super().__init__( + device=device, + ambient_color=ambient_color, + diffuse_color=diffuse_color, + specular_color=specular_color, + location=location, + ) + _validate_light_properties(self) + if self.location.shape[-1] != 3: + msg = "Expected location to have shape (N, 3); got %r" + raise ValueError(msg % repr(self.location.shape)) + + def clone(self): + other = self.__class__(device=self.device) + return super().clone(other) + + def reshape_location(self, points) -> torch.Tensor: + """ + Reshape the location tensor to have dimensions + compatible with the points which can either be of + shape (P, 3) or (N, H, W, K, 3). + """ + if self.location.ndim == points.ndim: + return self.location + return self.location[:, None, None, None, :] + + def diffuse(self, normals, points) -> torch.Tensor: + location = self.reshape_location(points) + direction = location - points + return diffuse(normals=normals, color=self.diffuse_color, direction=direction) + + def specular(self, normals, points, camera_position, shininess) -> torch.Tensor: + location = self.reshape_location(points) + direction = location - points + return specular( + points=points, + normals=normals, + color=self.specular_color, + direction=direction, + camera_position=camera_position, + shininess=shininess, + ) + + +class AmbientLights(TensorProperties): + """ + A light object representing the same color of light everywhere. + By default, this is white, which effectively means lighting is + not used in rendering. + + Unlike other lights this supports an arbitrary number of channels, not just 3 for RGB. + The ambient_color input determines the number of channels. + """ + + def __init__(self, *, ambient_color=None, device: Device = "cpu") -> None: + """ + If ambient_color is provided, it should be a sequence of + triples of floats. + + Args: + ambient_color: RGB color + device: Device (as str or torch.device) on which the tensors should be located + + The ambient_color if provided, should be + - tuple/list of C-element tuples of floats + - torch tensor of shape (1, C) + - torch tensor of shape (N, C) + where C is the number of channels and N is batch size. + For RGB, C is 3. + """ + if ambient_color is None: + ambient_color = ((1.0, 1.0, 1.0),) + super().__init__(ambient_color=ambient_color, device=device) + + def clone(self): + other = self.__class__(device=self.device) + return super().clone(other) + + def diffuse(self, normals, points) -> torch.Tensor: + return self._zeros_channels(points) + + def specular(self, normals, points, camera_position, shininess) -> torch.Tensor: + return self._zeros_channels(points) + + def _zeros_channels(self, points: torch.Tensor) -> torch.Tensor: + ch = self.ambient_color.shape[-1] + return torch.zeros(*points.shape[:-1], ch, device=points.device) + + +def _validate_light_properties(obj) -> None: + props = ("ambient_color", "diffuse_color", "specular_color") + for n in props: + t = getattr(obj, n) + if t.shape[-1] != 3: + msg = "Expected %s to have shape (N, 3); got %r" + raise ValueError(msg % (n, t.shape)) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/materials.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/materials.py new file mode 100644 index 0000000000000000000000000000000000000000..be85b8ab1442ba0766bd4c2b2c610b8c478fe3c2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/materials.py @@ -0,0 +1,67 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch + +from ..common.datatypes import Device +from .utils import TensorProperties + + +class Materials(TensorProperties): + """ + A class for storing a batch of material properties. Currently only one + material per batch element is supported. + """ + + def __init__( + self, + ambient_color=((1, 1, 1),), + diffuse_color=((1, 1, 1),), + specular_color=((1, 1, 1),), + shininess=64, + device: Device = "cpu", + ) -> None: + """ + Args: + ambient_color: ambient reflectivity of the material + diffuse_color: diffuse reflectivity of the material + specular_color: specular reflectivity of the material + shininess: The specular exponent for the material. This defines + the focus of the specular highlight with a high value + resulting in a concentrated highlight. Shininess values + can range from 0-1000. + device: Device (as str or torch.device) on which the tensors should be located + + ambient_color, diffuse_color and specular_color can be of shape + (1, C) or (N, C) where C is typically 3 (for RGB). shininess can be of shape (1,) + or (N,). + + The colors and shininess are broadcast against each other so need to + have either the same batch dimension or batch dimension = 1. + """ + super().__init__( + device=device, + diffuse_color=diffuse_color, + ambient_color=ambient_color, + specular_color=specular_color, + shininess=shininess, + ) + C = self.ambient_color.shape[-1] + for n in ["ambient_color", "diffuse_color", "specular_color"]: + t = getattr(self, n) + if t.shape[-1] != C: + msg = "Expected %s to have shape (N, %d); got %r" + raise ValueError(msg % (n, C, t.shape)) + if self.shininess.shape != torch.Size([self._N]): + msg = "shininess should have shape (N); got %r" + raise ValueError(msg % repr(self.shininess.shape)) + + def clone(self): + other = Materials(device=self.device) + return super().clone(other) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bf8ced062dafb53a046e673e7e1d1c4bd5a85061 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__init__.py @@ -0,0 +1,40 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .clip import ( + clip_faces, + ClipFrustum, + ClippedFaces, + convert_clipped_rasterization_to_original_faces, +) + +from .rasterize_meshes import rasterize_meshes +from .rasterizer import MeshRasterizer, RasterizationSettings +from .renderer import MeshRenderer, MeshRendererWithFragments +from .shader import ( # DEPRECATED + BlendParams, + HardFlatShader, + HardGouraudShader, + HardPhongShader, + SoftGouraudShader, + SoftPhongShader, + SoftSilhouetteShader, + SplatterPhongShader, + TexturedSoftPhongShader, +) +from .shading import gouraud_shading, phong_shading +from .textures import ( # DEPRECATED + Textures, + TexturesAtlas, + TexturesBase, + TexturesUV, + TexturesVertex, +) + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c88acb490a53ac7b94fc46d7fbc2e8898559020b Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/__init__.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/clip.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0825a5102c8fc189d013abd4190497938b3fcae6 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/clip.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/rasterize_meshes.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/rasterize_meshes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e7eaeda3d7547d10bce887928e64ca379bfbd34 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/rasterize_meshes.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/rasterizer.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/rasterizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9cc65095d95121249a18317a9ffb932cd8eec77 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/rasterizer.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/renderer.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/renderer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3695258766411f0d6d2ac96ec11af8a3393275aa Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/renderer.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/shader.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/shader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..798f7496672b4f1f8d9fa1a58e8e2eb7506fd693 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/shader.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/shading.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/shading.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40b96911b09969fa82c5e3e41340e981ec3a8748 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/shading.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/textures.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/textures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed8f100a06c9137e193ad4548f5a5d06896ae9dc Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/textures.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/utils.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b12e46c1fdacf0484624cd9e8cdaefccece44113 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/__pycache__/utils.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/clip.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/clip.py new file mode 100644 index 0000000000000000000000000000000000000000..9233159c887ef1bd4ffbc06749227a694eb25bd4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/clip.py @@ -0,0 +1,728 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Any, List, Optional, Tuple + +import torch + + +""" +Mesh clipping is done before rasterization and is implemented using 4 cases +(these will be referred to throughout the functions below) + +Case 1: the triangle is completely in front of the clipping plane (it is left + unchanged) +Case 2: the triangle is completely behind the clipping plane (it is culled) +Case 3: the triangle has exactly two vertices behind the clipping plane (it is + clipped into a smaller triangle) +Case 4: the triangle has exactly one vertex behind the clipping plane (it is clipped + into a smaller quadrilateral and divided into two triangular faces) + +After rasterization, the Fragments from the clipped/modified triangles +are mapped back to the triangles in the original mesh. The indices, +barycentric coordinates and distances are all relative to original mesh triangles. + +NOTE: It is assumed that all z-coordinates are in world coordinates (not NDC +coordinates), while x and y coordinates may be in NDC/screen coordinates +(i.e after applying a projective transform e.g. cameras.transform_points(points)). +""" + + +class ClippedFaces: + """ + Helper class to store the data for the clipped version of a Meshes object + (face_verts, mesh_to_face_first_idx, num_faces_per_mesh) along with + conversion information (faces_clipped_to_unclipped_idx, barycentric_conversion, + faces_clipped_to_conversion_idx, clipped_faces_neighbor_idx) required to convert + barycentric coordinates from rasterization of the clipped Meshes to barycentric + coordinates in terms of the unclipped Meshes. + + Args: + face_verts: FloatTensor of shape (F_clipped, 3, 3) giving the verts of + each of the clipped faces + mesh_to_face_first_idx: an tensor of shape (N,), where N is the number of meshes + in the batch. The ith element stores the index into face_verts + of the first face of the ith mesh. + num_faces_per_mesh: a tensor of shape (N,) storing the number of faces in each mesh. + faces_clipped_to_unclipped_idx: (F_clipped,) shaped LongTensor mapping each clipped + face back to the face in faces_unclipped (i.e. the faces in the original meshes + obtained using meshes.faces_packed()) + barycentric_conversion: (T, 3, 3) FloatTensor, where barycentric_conversion[i, :, k] + stores the barycentric weights in terms of the world coordinates of the original + (big) unclipped triangle for the kth vertex in the clipped (small) triangle. + If the rasterizer then expresses some NDC coordinate in terms of barycentric + world coordinates for the clipped (small) triangle as alpha_clipped[i,:], + alpha_unclipped[i, :] = barycentric_conversion[i, :, :]*alpha_clipped[i, :] + faces_clipped_to_conversion_idx: (F_clipped,) shaped LongTensor mapping each clipped + face to the applicable row of barycentric_conversion (or set to -1 if conversion is + not needed). + clipped_faces_neighbor_idx: LongTensor of shape (F_clipped,) giving the index of the + neighboring face for each case 4 triangle. e.g. for a case 4 face with f split + into two triangles (t1, t2): clipped_faces_neighbor_idx[t1_idx] = t2_idx. + Faces which are not clipped and subdivided are set to -1 (i.e cases 1/2/3). + """ + + __slots__ = [ + "face_verts", + "mesh_to_face_first_idx", + "num_faces_per_mesh", + "faces_clipped_to_unclipped_idx", + "barycentric_conversion", + "faces_clipped_to_conversion_idx", + "clipped_faces_neighbor_idx", + ] + + def __init__( + self, + face_verts: torch.Tensor, + mesh_to_face_first_idx: torch.Tensor, + num_faces_per_mesh: torch.Tensor, + faces_clipped_to_unclipped_idx: Optional[torch.Tensor] = None, + barycentric_conversion: Optional[torch.Tensor] = None, + faces_clipped_to_conversion_idx: Optional[torch.Tensor] = None, + clipped_faces_neighbor_idx: Optional[torch.Tensor] = None, + ) -> None: + self.face_verts = face_verts + self.mesh_to_face_first_idx = mesh_to_face_first_idx + self.num_faces_per_mesh = num_faces_per_mesh + self.faces_clipped_to_unclipped_idx = faces_clipped_to_unclipped_idx + self.barycentric_conversion = barycentric_conversion + self.faces_clipped_to_conversion_idx = faces_clipped_to_conversion_idx + self.clipped_faces_neighbor_idx = clipped_faces_neighbor_idx + + +class ClipFrustum: + """ + Helper class to store the information needed to represent a view frustum + (left, right, top, bottom, znear, zfar), which is used to clip or cull triangles. + Values left as None mean that culling should not be performed for that axis. + The parameters perspective_correct, cull, and z_clip_value are used to define + behavior for clipping triangles to the frustum. + + Args: + left: NDC coordinate of the left clipping plane (along x axis) + right: NDC coordinate of the right clipping plane (along x axis) + top: NDC coordinate of the top clipping plane (along y axis) + bottom: NDC coordinate of the bottom clipping plane (along y axis) + znear: world space z coordinate of the near clipping plane + zfar: world space z coordinate of the far clipping plane + perspective_correct: should be set to True for a perspective camera + cull: if True, triangles outside the frustum should be culled + z_clip_value: if not None, then triangles should be clipped (possibly into + smaller triangles) such that z >= z_clip_value. This avoids projections + that go to infinity as z->0 + """ + + __slots__ = [ + "left", + "right", + "top", + "bottom", + "znear", + "zfar", + "perspective_correct", + "cull", + "z_clip_value", + ] + + def __init__( + self, + left: Optional[float] = None, + right: Optional[float] = None, + top: Optional[float] = None, + bottom: Optional[float] = None, + znear: Optional[float] = None, + zfar: Optional[float] = None, + perspective_correct: bool = False, + cull: bool = True, + z_clip_value: Optional[float] = None, + ) -> None: + self.left = left + self.right = right + self.top = top + self.bottom = bottom + self.znear = znear + self.zfar = zfar + self.perspective_correct = perspective_correct + self.cull = cull + self.z_clip_value = z_clip_value + + +def _get_culled_faces(face_verts: torch.Tensor, frustum: ClipFrustum) -> torch.Tensor: + """ + Helper function used to find all the faces in Meshes which are + fully outside the view frustum. A face is culled if all 3 vertices are outside + the same axis of the view frustum. + + Args: + face_verts: An (F,3,3) tensor, where F is the number of faces in + the packed representation of Meshes. The 2nd dimension represents the 3 vertices + of a triangle, and the 3rd dimension stores the xyz locations of each + vertex. + frustum: An instance of the ClipFrustum class with the information on the + position of the clipping planes. + + Returns: + faces_culled: An boolean tensor of size F specifying whether or not each face should be + culled. + """ + clipping_planes = ( + (frustum.left, 0, "<"), + (frustum.right, 0, ">"), + (frustum.top, 1, "<"), + (frustum.bottom, 1, ">"), + (frustum.znear, 2, "<"), + (frustum.zfar, 2, ">"), + ) + faces_culled = torch.zeros( + [face_verts.shape[0]], dtype=torch.bool, device=face_verts.device + ) + for plane in clipping_planes: + clip_value, axis, op = plane + # If clip_value is None then don't clip along that plane + if frustum.cull and clip_value is not None: + if op == "<": + verts_clipped = face_verts[:, axis] < clip_value + else: + verts_clipped = face_verts[:, axis] > clip_value + + # If all verts are clipped then face is outside the frustum + faces_culled |= verts_clipped.sum(1) == 3 + + return faces_culled + + +def _find_verts_intersecting_clipping_plane( + face_verts: torch.Tensor, + p1_face_ind: torch.Tensor, + clip_value: float, + perspective_correct: bool, +) -> Tuple[Tuple[Any, Any, Any, Any, Any], List[Any]]: + r""" + Helper function to find the vertices used to form a new triangle for case 3/case 4 faces. + + Given a list of triangles that are already known to intersect the clipping plane, + solve for the two vertices p4 and p5 where the edges of the triangle intersects the + clipping plane. + + p1 + /\ + / \ + / t \ + _____________p4/______\p5__________ clip_value + / \ + /____ \ + p2 ---____\p3 + + Args: + face_verts: An (F,3,3) tensor, where F is the number of faces in + the packed representation of the Meshes, the 2nd dimension represents + the 3 vertices of the face, and the 3rd dimension stores the xyz locations of each + vertex. The z-coordinates must be represented in world coordinates, while + the xy-coordinates may be in NDC/screen coordinates (i.e. after projection). + p1_face_ind: A tensor of shape (N,) with values in the range of 0 to 2. In each + case 3/case 4 triangle, two vertices are on the same side of the + clipping plane and the 3rd is on the other side. p1_face_ind stores the index of + the vertex that is not on the same side as any other vertex in the triangle. + clip_value: Float, the z-value defining where to clip the triangle. + perspective_correct: Bool, Should be set to true if a perspective camera was + used and xy-coordinates of face_verts_unclipped are in NDC/screen coordinates. + + Returns: + A 2-tuple + p: (p1, p2, p3, p4, p5)) + p_barycentric (p1_bary, p2_bary, p3_bary, p4_bary, p5_bary) + + Each of p1...p5 is an (F,3) tensor of the xyz locations of the 5 points in the + diagram above for case 3/case 4 faces. Each p1_bary...p5_bary is an (F, 3) tensor + storing the barycentric weights used to encode p1...p5 in terms of the the original + unclipped triangle. + """ + + # Let T be number of triangles in face_verts (note that these correspond to the subset + # of case 1 or case 2 triangles). p1_face_ind, p2_face_ind, and p3_face_ind are (T) + # tensors with values in the range of 0 to 2. p1_face_ind stores the index of the + # vertex that is not on the same side as any other vertex in the triangle, and + # p2_face_ind and p3_face_ind are the indices of the other two vertices preserving + # the same counterclockwise or clockwise ordering + T = face_verts.shape[0] + p2_face_ind = torch.remainder(p1_face_ind + 1, 3) + p3_face_ind = torch.remainder(p1_face_ind + 2, 3) + + # p1, p2, p3 are (T, 3) tensors storing the corresponding (x, y, z) coordinates + # of p1_face_ind, p2_face_ind, p3_face_ind + p1 = face_verts.gather(1, p1_face_ind[:, None, None].expand(-1, -1, 3)).squeeze(1) + p2 = face_verts.gather(1, p2_face_ind[:, None, None].expand(-1, -1, 3)).squeeze(1) + p3 = face_verts.gather(1, p3_face_ind[:, None, None].expand(-1, -1, 3)).squeeze(1) + + ################################## + # Solve for intersection point p4 + ################################## + + # p4 is a (T, 3) tensor is the point on the segment between p1 and p2 that + # intersects the clipping plane. + # Solve for the weight w2 such that p1.z*(1-w2) + p2.z*w2 = clip_value. + # Then interpolate p4 = p1*(1-w2) + p2*w2 where it is assumed that z-coordinates + # are expressed in world coordinates (since we want to clip z in world coordinates). + w2 = (p1[:, 2] - clip_value) / (p1[:, 2] - p2[:, 2]) + p4 = p1 * (1 - w2[:, None]) + p2 * w2[:, None] + if perspective_correct: + # It is assumed that all z-coordinates are in world coordinates (not NDC + # coordinates), while x and y coordinates may be in NDC/screen coordinates. + # If x and y are in NDC/screen coordinates and a projective transform was used + # in a perspective camera, then we effectively want to: + # 1. Convert back to world coordinates (by multiplying by z) + # 2. Interpolate using w2 + # 3. Convert back to NDC/screen coordinates (by dividing by the new z=clip_value) + p1_world = p1[:, :2] * p1[:, 2:3] + p2_world = p2[:, :2] * p2[:, 2:3] + p4[:, :2] = (p1_world * (1 - w2[:, None]) + p2_world * w2[:, None]) / clip_value + + ################################## + # Solve for intersection point p5 + ################################## + + # p5 is a (T, 3) tensor representing the point on the segment between p1 and p3 that + # intersects the clipping plane. + # Solve for the weight w3 such that p1.z * (1-w3) + p2.z * w3 = clip_value, + # and then interpolate p5 = p1 * (1-w3) + p3 * w3 + w3 = (p1[:, 2] - clip_value) / (p1[:, 2] - p3[:, 2]) + w3 = w3.detach() + p5 = p1 * (1 - w3[:, None]) + p3 * w3[:, None] + if perspective_correct: + # Again if using a perspective camera, convert back to world coordinates + # interpolate and convert back + p1_world = p1[:, :2] * p1[:, 2:3] + p3_world = p3[:, :2] * p3[:, 2:3] + p5[:, :2] = (p1_world * (1 - w3[:, None]) + p3_world * w3[:, None]) / clip_value + + # Set the barycentric coordinates of p1,p2,p3,p4,p5 in terms of the original + # unclipped triangle in face_verts. + T_idx = torch.arange(T, device=face_verts.device) + p_barycentric = [torch.zeros((T, 3), device=face_verts.device) for i in range(5)] + p_barycentric[0][(T_idx, p1_face_ind)] = 1 + p_barycentric[1][(T_idx, p2_face_ind)] = 1 + p_barycentric[2][(T_idx, p3_face_ind)] = 1 + p_barycentric[3][(T_idx, p1_face_ind)] = 1 - w2 + p_barycentric[3][(T_idx, p2_face_ind)] = w2 + p_barycentric[4][(T_idx, p1_face_ind)] = 1 - w3 + p_barycentric[4][(T_idx, p3_face_ind)] = w3 + + p = (p1, p2, p3, p4, p5) + + return p, p_barycentric + + +################### +# Main Entry point +################### +def clip_faces( + face_verts_unclipped: torch.Tensor, + mesh_to_face_first_idx: torch.Tensor, + num_faces_per_mesh: torch.Tensor, + frustum: ClipFrustum, +) -> ClippedFaces: + """ + Clip a mesh to the portion contained within a view frustum and with z > z_clip_value. + + There are two types of clipping: + 1) Cull triangles that are completely outside the view frustum. This is purely + to save computation by reducing the number of triangles that need to be + rasterized. + 2) Clip triangles into the portion of the triangle where z > z_clip_value. The + clipped region may be a quadrilateral, which results in splitting a triangle + into two triangles. This does not save computation, but is necessary to + correctly rasterize using perspective cameras for triangles that pass through + z <= 0, because NDC/screen coordinates go to infinity at z=0. + + Args: + face_verts_unclipped: An (F, 3, 3) tensor, where F is the number of faces in + the packed representation of Meshes, the 2nd dimension represents the 3 vertices + of the triangle, and the 3rd dimension stores the xyz locations of each + vertex. The z-coordinates must be represented in world coordinates, while + the xy-coordinates may be in NDC/screen coordinates + mesh_to_face_first_idx: an tensor of shape (N,), where N is the number of meshes + in the batch. The ith element stores the index into face_verts_unclipped + of the first face of the ith mesh. + num_faces_per_mesh: a tensor of shape (N,) storing the number of faces in each mesh. + frustum: a ClipFrustum object defining the frustum used to cull faces. + + Returns: + clipped_faces: ClippedFaces object storing a clipped version of the Meshes + along with tensors that can be used to convert barycentric coordinates + returned by rasterization of the clipped meshes into a barycentric + coordinates for the unclipped meshes. + """ + F = face_verts_unclipped.shape[0] + device = face_verts_unclipped.device + + # Triangles completely outside the view frustum will be culled + # faces_culled is of shape (F, ) + faces_culled = _get_culled_faces(face_verts_unclipped, frustum) + + # Triangles that are partially behind the z clipping plane will be clipped to + # smaller triangles + z_clip_value = frustum.z_clip_value + perspective_correct = frustum.perspective_correct + if z_clip_value is not None: + # (F, 3) tensor (where F is the number of triangles) marking whether each vertex + # in a triangle is behind the clipping plane + faces_clipped_verts = face_verts_unclipped[:, :, 2] < z_clip_value + + # (F) dim tensor containing the number of clipped vertices in each triangle + faces_num_clipped_verts = faces_clipped_verts.sum(1) + else: + faces_num_clipped_verts = torch.zeros([F], device=device) + + # If no triangles need to be clipped or culled, avoid unnecessary computation + # and return early + if faces_num_clipped_verts.sum().item() == 0 and faces_culled.sum().item() == 0: + return ClippedFaces( + face_verts=face_verts_unclipped, + mesh_to_face_first_idx=mesh_to_face_first_idx, + num_faces_per_mesh=num_faces_per_mesh, + ) + + ##################################################################################### + # Classify faces into the 4 relevant cases: + # 1) The triangle is completely in front of the clipping plane (it is left + # unchanged) + # 2) The triangle is completely behind the clipping plane (it is culled) + # 3) The triangle has exactly two vertices behind the clipping plane (it is + # clipped into a smaller triangle) + # 4) The triangle has exactly one vertex behind the clipping plane (it is clipped + # into a smaller quadrilateral and split into two triangles) + ##################################################################################### + + faces_unculled = ~faces_culled + # Case 1: no clipped verts or culled faces + cases1_unclipped = (faces_num_clipped_verts == 0) & faces_unculled + case1_unclipped_idx = cases1_unclipped.nonzero(as_tuple=True)[0] + # Case 2: all verts clipped + case2_unclipped = (faces_num_clipped_verts == 3) | faces_culled + # Case 3: two verts clipped + case3_unclipped = (faces_num_clipped_verts == 2) & faces_unculled + case3_unclipped_idx = case3_unclipped.nonzero(as_tuple=True)[0] + # Case 4: one vert clipped + case4_unclipped = (faces_num_clipped_verts == 1) & faces_unculled + case4_unclipped_idx = case4_unclipped.nonzero(as_tuple=True)[0] + + # faces_unclipped_to_clipped_idx is an (F) dim tensor storing the index of each + # face to the corresponding face in face_verts_clipped. + # Each case 2 triangle will be culled (deleted from face_verts_clipped), + # while each case 4 triangle will be split into two smaller triangles + # (replaced by two consecutive triangles in face_verts_clipped) + + # case2_unclipped is an (F,) dim 0/1 tensor of all the case2 faces + # case4_unclipped is an (F,) dim 0/1 tensor of all the case4 faces + faces_delta = case4_unclipped.int() - case2_unclipped.int() + # faces_delta_cum gives the per face change in index. Faces which are + # clipped in the original mesh are mapped to the closest non clipped face + # in face_verts_clipped (this doesn't matter as they are not used + # during rasterization anyway). + faces_delta_cum = faces_delta.cumsum(0) - faces_delta + delta = 1 + case4_unclipped.int() - case2_unclipped.int() + faces_unclipped_to_clipped_idx = delta.cumsum(0) - delta + + ########################################### + # Allocate tensors for the output Meshes. + # These will then be filled in for each case. + ########################################### + F_clipped = ( + F + # pyre-fixme[58]: `+` is not supported for operand types `int` and + # `Union[bool, float, int]`. + + faces_delta_cum[-1].item() + # pyre-fixme[58]: `+` is not supported for operand types `int` and + # `Union[bool, float, int]`. + + faces_delta[-1].item() + ) # Total number of faces in the new Meshes + face_verts_clipped = torch.zeros( + (F_clipped, 3, 3), dtype=face_verts_unclipped.dtype, device=device + ) + faces_clipped_to_unclipped_idx = torch.zeros( + [F_clipped], dtype=torch.int64, device=device + ) + + # Update version of mesh_to_face_first_idx and num_faces_per_mesh applicable to + # face_verts_clipped + mesh_to_face_first_idx_clipped = faces_unclipped_to_clipped_idx[ + mesh_to_face_first_idx + ] + F_clipped_t = torch.full([1], F_clipped, dtype=torch.int64, device=device) + num_faces_next = torch.cat((mesh_to_face_first_idx_clipped[1:], F_clipped_t)) + num_faces_per_mesh_clipped = num_faces_next - mesh_to_face_first_idx_clipped + + ################# Start Case 1 ######################################## + + # Case 1: Triangles are fully visible, copy unchanged triangles into the + # appropriate position in the new list of faces + case1_clipped_idx = faces_unclipped_to_clipped_idx[case1_unclipped_idx] + face_verts_clipped[case1_clipped_idx] = face_verts_unclipped[case1_unclipped_idx] + faces_clipped_to_unclipped_idx[case1_clipped_idx] = case1_unclipped_idx + + # If no triangles need to be clipped but some triangles were culled, avoid + # unnecessary clipping computation + if case3_unclipped_idx.shape[0] + case4_unclipped_idx.shape[0] == 0: + return ClippedFaces( + face_verts=face_verts_clipped, + mesh_to_face_first_idx=mesh_to_face_first_idx_clipped, + num_faces_per_mesh=num_faces_per_mesh_clipped, + faces_clipped_to_unclipped_idx=faces_clipped_to_unclipped_idx, + ) + + ################# End Case 1 ########################################## + + ################# Start Case 3 ######################################## + + # Case 3: exactly two vertices are behind the camera, clipping the triangle into a + # triangle. In the diagram below, we clip the bottom part of the triangle, and add + # new vertices p4 and p5 by intersecting with the clipping plane. The updated + # triangle is the triangle between p4, p1, p5 + # + # p1 (unclipped vertex) + # /\ + # / \ + # / t \ + # _____________p4/______\p5__________ clip_value + # xxxxxxxxxxxxxx/ \xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxx/____ \xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxx p2 xxxx---____\p3 xxxxxxxxxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + faces_case3 = face_verts_unclipped[case3_unclipped_idx] + + # index (0, 1, or 2) of the vertex in front of the clipping plane + # pyre-fixme[61]: `faces_clipped_verts` is undefined, or not always defined. + p1_face_ind = torch.where(~faces_clipped_verts[case3_unclipped_idx])[1] + + # Solve for the points p4, p5 that intersect the clipping plane + p, p_barycentric = _find_verts_intersecting_clipping_plane( + faces_case3, p1_face_ind, z_clip_value, perspective_correct + ) + + p1, _, _, p4, p5 = p + p1_barycentric, _, _, p4_barycentric, p5_barycentric = p_barycentric + + # Store clipped triangle + case3_clipped_idx = faces_unclipped_to_clipped_idx[case3_unclipped_idx] + t_barycentric = torch.stack((p4_barycentric, p5_barycentric, p1_barycentric), 2) + face_verts_clipped[case3_clipped_idx] = torch.stack((p4, p5, p1), 1) + faces_clipped_to_unclipped_idx[case3_clipped_idx] = case3_unclipped_idx + + ################# End Case 3 ########################################## + + ################# Start Case 4 ######################################## + + # Case 4: exactly one vertex is behind the camera, clip the triangle into a + # quadrilateral. In the diagram below, we clip the bottom part of the triangle, + # and add new vertices p4 and p5 by intersecting with the cliiping plane. The + # unclipped region is a quadrilateral, which is split into two triangles: + # t1: p4, p2, p5 + # t2: p5, p2, p3 + # + # p3_____________________p2 + # \ __--/ + # \ t2 __-- / + # \ __-- t1 / + # ______________p5\__--_________/p4_________clip_value + # xxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxxxxx + # p1 (clipped vertex) + + faces_case4 = face_verts_unclipped[case4_unclipped_idx] + + # index (0, 1, or 2) of the vertex behind the clipping plane + # pyre-fixme[61]: `faces_clipped_verts` is undefined, or not always defined. + p1_face_ind = torch.where(faces_clipped_verts[case4_unclipped_idx])[1] + + # Solve for the points p4, p5 that intersect the clipping plane + p, p_barycentric = _find_verts_intersecting_clipping_plane( + faces_case4, p1_face_ind, z_clip_value, perspective_correct + ) + _, p2, p3, p4, p5 = p + _, p2_barycentric, p3_barycentric, p4_barycentric, p5_barycentric = p_barycentric + + # Store clipped triangles + case4_clipped_idx = faces_unclipped_to_clipped_idx[case4_unclipped_idx] + face_verts_clipped[case4_clipped_idx] = torch.stack((p4, p2, p5), 1) + face_verts_clipped[case4_clipped_idx + 1] = torch.stack((p5, p2, p3), 1) + t1_barycentric = torch.stack((p4_barycentric, p2_barycentric, p5_barycentric), 2) + t2_barycentric = torch.stack((p5_barycentric, p2_barycentric, p3_barycentric), 2) + faces_clipped_to_unclipped_idx[case4_clipped_idx] = case4_unclipped_idx + faces_clipped_to_unclipped_idx[case4_clipped_idx + 1] = case4_unclipped_idx + + ##################### End Case 4 ######################### + + # Triangles that were clipped (case 3 & case 4) will require conversion of + # barycentric coordinates from being in terms of the smaller clipped triangle to in terms + # of the original big triangle. If there are T clipped triangles, + # barycentric_conversion is a (T, 3, 3) tensor, where barycentric_conversion[i, :, k] + # stores the barycentric weights in terms of the world coordinates of the original + # (big) triangle for the kth vertex in the clipped (small) triangle. If our + # rasterizer then expresses some NDC coordinate in terms of barycentric + # world coordinates for the clipped (small) triangle as alpha_clipped[i,:], + # alpha_unclipped[i, :] = barycentric_conversion[i, :, :]*alpha_clipped[i, :] + barycentric_conversion = torch.cat((t_barycentric, t1_barycentric, t2_barycentric)) + + # faces_clipped_to_conversion_idx is an (F_clipped,) shape tensor mapping each output + # face to the applicable row of barycentric_conversion (or set to -1 if conversion is + # not needed) + faces_to_convert_idx = torch.cat( + (case3_clipped_idx, case4_clipped_idx, case4_clipped_idx + 1), 0 + ) + barycentric_idx = torch.arange( + barycentric_conversion.shape[0], dtype=torch.int64, device=device + ) + faces_clipped_to_conversion_idx = torch.full( + [F_clipped], -1, dtype=torch.int64, device=device + ) + faces_clipped_to_conversion_idx[faces_to_convert_idx] = barycentric_idx + + # clipped_faces_quadrilateral_ind is an (F_clipped) dim tensor + # For case 4 clipped triangles (where a big triangle is split in two smaller triangles), + # store the index of the neighboring clipped triangle. + # This will be needed because if the soft rasterizer includes both + # triangles in the list of top K nearest triangles, we + # should only use the one with the smaller distance. + clipped_faces_neighbor_idx = torch.full( + [F_clipped], -1, dtype=torch.int64, device=device + ) + clipped_faces_neighbor_idx[case4_clipped_idx] = case4_clipped_idx + 1 + clipped_faces_neighbor_idx[case4_clipped_idx + 1] = case4_clipped_idx + + clipped_faces = ClippedFaces( + face_verts=face_verts_clipped, + mesh_to_face_first_idx=mesh_to_face_first_idx_clipped, + num_faces_per_mesh=num_faces_per_mesh_clipped, + faces_clipped_to_unclipped_idx=faces_clipped_to_unclipped_idx, + barycentric_conversion=barycentric_conversion, + faces_clipped_to_conversion_idx=faces_clipped_to_conversion_idx, + clipped_faces_neighbor_idx=clipped_faces_neighbor_idx, + ) + return clipped_faces + + +def convert_clipped_rasterization_to_original_faces( + pix_to_face_clipped, bary_coords_clipped, clipped_faces: ClippedFaces +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Convert rasterization Fragments (expressed as pix_to_face_clipped, + bary_coords_clipped, dists_clipped) of clipped Meshes computed using clip_faces() + to the corresponding rasterization Fragments where barycentric coordinates and + face indices are in terms of the original unclipped Meshes. The distances are + handled in the rasterizer C++/CUDA kernels (i.e. for Cases 1/3 the distance + can be used directly and for Case 4 triangles the distance of the pixel to + the closest of the two subdivided triangles is used). + + Args: + pix_to_face_clipped: LongTensor of shape (N, image_size, image_size, + faces_per_pixel) giving the indices of the nearest faces at each pixel, + sorted in ascending z-order. Concretely + ``pix_to_face_clipped[n, y, x, k] = f`` means that ``faces_verts_clipped[f]`` + is the kth closest face (in the z-direction) to pixel (y, x). Pixels that + are hit by fewer than faces_per_pixel are padded with -1. + bary_coords_clipped: FloatTensor of shape + (N, image_size, image_size, faces_per_pixel, 3) giving the barycentric + coordinates in world coordinates of the nearest faces at each pixel, sorted + in ascending z-order. Concretely, if ``pix_to_face_clipped[n, y, x, k] = f`` + then ``[w0, w1, w2] = bary_coords_clipped[n, y, x, k]`` gives the + barycentric coords for pixel (y, x) relative to the face defined by + ``unproject(face_verts_clipped[f])``. Pixels hit by fewer than + faces_per_pixel are padded with -1. + clipped_faces: an instance of ClippedFaces class giving the auxillary variables + for converting rasterization outputs from clipped to unclipped Meshes. + + Returns: + 3-tuple: (pix_to_face_unclipped, bary_coords_unclipped, dists_unclipped) that + have the same definition as (pix_to_face_clipped, bary_coords_clipped, + dists_clipped) except that they pertain to faces_verts_unclipped instead of + faces_verts_clipped (i.e the original meshes as opposed to the modified meshes) + """ + faces_clipped_to_unclipped_idx = clipped_faces.faces_clipped_to_unclipped_idx + + # If no clipping then return inputs + if ( + faces_clipped_to_unclipped_idx is None + or faces_clipped_to_unclipped_idx.numel() == 0 + ): + return pix_to_face_clipped, bary_coords_clipped + + device = pix_to_face_clipped.device + + # Convert pix_to_face indices to now refer to the faces in the unclipped Meshes. + # Init empty tensor to fill in all the background values which have pix_to_face=-1. + empty = torch.full(pix_to_face_clipped.shape, -1, device=device, dtype=torch.int64) + pix_to_face_unclipped = torch.where( + pix_to_face_clipped != -1, + faces_clipped_to_unclipped_idx[pix_to_face_clipped], + empty, + ) + + # For triangles that were clipped into smaller triangle(s), convert barycentric + # coordinates from being in terms of the clipped triangle to being in terms of the + # original unclipped triangle. + + # barycentric_conversion is a (T, 3, 3) tensor such that + # alpha_unclipped[i, :] = barycentric_conversion[i, :, :]*alpha_clipped[i, :] + barycentric_conversion = clipped_faces.barycentric_conversion + + # faces_clipped_to_conversion_idx is an (F_clipped,) shape tensor mapping each output + # face to the applicable row of barycentric_conversion (or set to -1 if conversion is + # not needed) + faces_clipped_to_conversion_idx = clipped_faces.faces_clipped_to_conversion_idx + + if barycentric_conversion is not None: + bary_coords_unclipped = bary_coords_clipped.clone() + + # Select the subset of faces that require conversion, where N is the sum + # number of case3/case4 triangles that are in the closest k triangles to some + # rasterized pixel. + pix_to_conversion_idx = torch.where( + pix_to_face_clipped != -1, + faces_clipped_to_conversion_idx[pix_to_face_clipped], + empty, + ) + faces_to_convert_mask = pix_to_conversion_idx != -1 + N = faces_to_convert_mask.sum().item() + + # Expand to (N, H, W, K, 3) to be the same shape as barycentric coordinates + faces_to_convert_mask_expanded = faces_to_convert_mask[:, :, :, :, None].expand( + -1, -1, -1, -1, 3 + ) + + # An (N,) dim tensor of indices into barycentric_conversion + conversion_idx_subset = pix_to_conversion_idx[faces_to_convert_mask] + + # An (N, 3, 1) tensor of barycentric coordinates in terms of the clipped triangles + bary_coords_clipped_subset = bary_coords_clipped[faces_to_convert_mask_expanded] + bary_coords_clipped_subset = bary_coords_clipped_subset.reshape((N, 3, 1)) + + # An (N, 3, 3) tensor storing matrices to convert from clipped to unclipped + # barycentric coordinates + bary_conversion_subset = barycentric_conversion[conversion_idx_subset] + + # An (N, 3, 1) tensor of barycentric coordinates in terms of the unclipped triangle + bary_coords_unclipped_subset = bary_conversion_subset.bmm( + bary_coords_clipped_subset + ) + + bary_coords_unclipped_subset = bary_coords_unclipped_subset.reshape([N * 3]) + bary_coords_unclipped[faces_to_convert_mask_expanded] = ( + bary_coords_unclipped_subset + ) + + # dists for case 4 faces will be handled in the rasterizer + # so no need to modify them here. + else: + bary_coords_unclipped = bary_coords_clipped + + return pix_to_face_unclipped, bary_coords_unclipped diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/rasterize_meshes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/rasterize_meshes.py new file mode 100644 index 0000000000000000000000000000000000000000..88c28fd8b064078baad265a0b65ea3a29caee80b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/rasterize_meshes.py @@ -0,0 +1,767 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +from pytorch3d import _C + +from ..utils import parse_image_size + +from .clip import ( + clip_faces, + ClipFrustum, + convert_clipped_rasterization_to_original_faces, +) + + +# TODO make the epsilon user configurable +kEpsilon = 1e-8 + +# Maximum number of faces per bins for +# coarse-to-fine rasterization +kMaxFacesPerBin = 22 + + +def rasterize_meshes( + meshes, + image_size: Union[int, List[int], Tuple[int, int]] = 256, + blur_radius: float = 0.0, + faces_per_pixel: int = 8, + bin_size: Optional[int] = None, + max_faces_per_bin: Optional[int] = None, + perspective_correct: bool = False, + clip_barycentric_coords: bool = False, + cull_backfaces: bool = False, + z_clip_value: Optional[float] = None, + cull_to_frustum: bool = False, +): + """ + Rasterize a batch of meshes given the shape of the desired output image. + Each mesh is rasterized onto a separate image of shape + (H, W) if `image_size` is a tuple or (image_size, image_size) if it + is an int. + + If the desired image size is non square (i.e. a tuple of (H, W) where H != W) + the aspect ratio needs special consideration. There are two aspect ratios + to be aware of: + - the aspect ratio of each pixel + - the aspect ratio of the output image + The camera can be used to set the pixel aspect ratio. In the rasterizer, + we assume square pixels, but variable image aspect ratio (i.e rectangle images). + + In most cases you will want to set the camera aspect ratio to + 1.0 (i.e. square pixels) and only vary the + `image_size` (i.e. the output image dimensions in pixels). + + Args: + meshes: A Meshes object representing a batch of meshes, batch size N. + image_size: Size in pixels of the output image to be rasterized. + Can optionally be a tuple of (H, W) in the case of non square images. + blur_radius: Float distance in the range [0, 2] used to expand the face + bounding boxes for rasterization. Setting blur radius + results in blurred edges around the shape instead of a + hard boundary. Set to 0 for no blur. + faces_per_pixel (Optional): Number of faces to save per pixel, returning + the nearest faces_per_pixel points along the z-axis. + bin_size: Size of bins to use for coarse-to-fine rasterization. Setting + bin_size=0 uses naive rasterization; setting bin_size=None attempts to + set it heuristically based on the shape of the input. This should not + affect the output, but can affect the speed of the forward pass. + max_faces_per_bin: Only applicable when using coarse-to-fine rasterization + (bin_size > 0); this is the maximum number of faces allowed within each + bin. This should not affect the output values, but can affect + the memory usage in the forward pass. + perspective_correct: Bool, Whether to apply perspective correction when computing + barycentric coordinates for pixels. This should be set to True if a perspective + camera is used. + clip_barycentric_coords: Whether, after any perspective correction is applied + but before the depth is calculated (e.g. for z clipping), + to "correct" a location outside the face (i.e. with a negative + barycentric coordinate) to a position on the edge of the face. + cull_backfaces: Bool, Whether to only rasterize mesh faces which are + visible to the camera. This assumes that vertices of + front-facing triangles are ordered in an anti-clockwise + fashion, and triangles that face away from the camera are + in a clockwise order relative to the current view + direction. NOTE: This will only work if the mesh faces are + consistently defined with counter-clockwise ordering when + viewed from the outside. + z_clip_value: if not None, then triangles will be clipped (and possibly + subdivided into smaller triangles) such that z >= z_clip_value. + This avoids camera projections that go to infinity as z->0. + Default is None as clipping affects rasterization speed and + should only be turned on if explicitly needed. + See clip.py for all the extra computation that is required. + cull_to_frustum: if True, triangles outside the view frustum will be culled. + Culling involves removing all faces which fall outside view frustum. + Default is False so that it is turned on only when needed. + + Returns: + 4-element tuple containing + + - **pix_to_face**: LongTensor of shape + (N, image_size, image_size, faces_per_pixel) + giving the indices of the nearest faces at each pixel, + sorted in ascending z-order. + Concretely ``pix_to_face[n, y, x, k] = f`` means that + ``faces_verts[f]`` is the kth closest face (in the z-direction) + to pixel (y, x). Pixels that are hit by fewer than + faces_per_pixel are padded with -1. + - **zbuf**: FloatTensor of shape (N, image_size, image_size, faces_per_pixel) + giving the NDC z-coordinates of the nearest faces at each pixel, + sorted in ascending z-order. + Concretely, if ``pix_to_face[n, y, x, k] = f`` then + ``zbuf[n, y, x, k] = face_verts[f, 2]``. Pixels hit by fewer than + faces_per_pixel are padded with -1. + - **barycentric**: FloatTensor of shape + (N, image_size, image_size, faces_per_pixel, 3) + giving the barycentric coordinates in NDC units of the + nearest faces at each pixel, sorted in ascending z-order. + Concretely, if ``pix_to_face[n, y, x, k] = f`` then + ``[w0, w1, w2] = barycentric[n, y, x, k]`` gives + the barycentric coords for pixel (y, x) relative to the face + defined by ``face_verts[f]``. Pixels hit by fewer than + faces_per_pixel are padded with -1. + - **pix_dists**: FloatTensor of shape + (N, image_size, image_size, faces_per_pixel) + giving the signed Euclidean distance (in NDC units) in the + x/y plane of each point closest to the pixel. Concretely if + ``pix_to_face[n, y, x, k] = f`` then ``pix_dists[n, y, x, k]`` is the + squared distance between the pixel (y, x) and the face given + by vertices ``face_verts[f]``. Pixels hit with fewer than + ``faces_per_pixel`` are padded with -1. + + In the case that image_size is a tuple of (H, W) then the outputs + will be of shape `(N, H, W, ...)`. + """ + verts_packed = meshes.verts_packed() + faces_packed = meshes.faces_packed() + face_verts = verts_packed[faces_packed] + mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx() + num_faces_per_mesh = meshes.num_faces_per_mesh() + + # In the case that H != W use the max image size to set the bin_size + # to accommodate the num bins constraint in the coarse rasterizer. + # If the ratio of H:W is large this might cause issues as the smaller + # dimension will have fewer bins. + # TODO: consider a better way of setting the bin size. + im_size = parse_image_size(image_size) + max_image_size = max(*im_size) + + clipped_faces_neighbor_idx = None + + if z_clip_value is not None or cull_to_frustum: + # Cull faces outside the view frustum, and clip faces that are partially + # behind the camera into the portion of the triangle in front of the + # camera. This may change the number of faces + frustum = ClipFrustum( + left=-1, + right=1, + top=-1, + bottom=1, + perspective_correct=perspective_correct, + z_clip_value=z_clip_value, + cull=cull_to_frustum, + ) + clipped_faces = clip_faces( + face_verts, mesh_to_face_first_idx, num_faces_per_mesh, frustum=frustum + ) + face_verts = clipped_faces.face_verts + mesh_to_face_first_idx = clipped_faces.mesh_to_face_first_idx + num_faces_per_mesh = clipped_faces.num_faces_per_mesh + + # For case 4 clipped triangles (where a big triangle is split in two smaller triangles), + # need the index of the neighboring clipped triangle as only one can be in + # in the top K closest faces in the rasterization step. + clipped_faces_neighbor_idx = clipped_faces.clipped_faces_neighbor_idx + + if clipped_faces_neighbor_idx is None: + # Set to the default which is all -1s. + clipped_faces_neighbor_idx = torch.full( + size=(face_verts.shape[0],), + fill_value=-1, + device=meshes.device, + dtype=torch.int64, + ) + + # TODO: Choose naive vs coarse-to-fine based on mesh size and image size. + if bin_size is None: + if not verts_packed.is_cuda: + # Binned CPU rasterization is not supported. + bin_size = 0 + else: + # TODO better heuristics for bin size. + if max_image_size <= 64: + bin_size = 8 + else: + # Heuristic based formula maps max_image_size -> bin_size as follows: + # max_image_size < 64 -> 8 + # 16 < max_image_size < 256 -> 16 + # 256 < max_image_size < 512 -> 32 + # 512 < max_image_size < 1024 -> 64 + # 1024 < max_image_size < 2048 -> 128 + bin_size = int(2 ** max(np.ceil(np.log2(max_image_size)) - 4, 4)) + + if bin_size != 0: + # There is a limit on the number of faces per bin in the cuda kernel. + faces_per_bin = 1 + (max_image_size - 1) // bin_size + if faces_per_bin >= kMaxFacesPerBin: + raise ValueError( + "bin_size too small, number of faces per bin must be less than %d; got %d" + % (kMaxFacesPerBin, faces_per_bin) + ) + + if max_faces_per_bin is None: + max_faces_per_bin = int(max(10000, meshes._F / 5)) + + pix_to_face, zbuf, barycentric_coords, dists = _RasterizeFaceVerts.apply( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + clipped_faces_neighbor_idx, + im_size, + blur_radius, + faces_per_pixel, + bin_size, + max_faces_per_bin, + perspective_correct, + clip_barycentric_coords, + cull_backfaces, + ) + + if z_clip_value is not None or cull_to_frustum: + # If faces were clipped, map the rasterization result to be in terms of the + # original unclipped faces. This may involve converting barycentric + # coordinates + outputs = convert_clipped_rasterization_to_original_faces( + pix_to_face, + barycentric_coords, + # pyre-fixme[61]: `clipped_faces` may not be initialized here. + clipped_faces, + ) + pix_to_face, barycentric_coords = outputs + + return pix_to_face, zbuf, barycentric_coords, dists + + +class _RasterizeFaceVerts(torch.autograd.Function): + """ + Torch autograd wrapper for forward and backward pass of rasterize_meshes + implemented in C++/CUDA. + + Args: + face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions + for faces in all the meshes in the batch. Concretely, + face_verts[f, i] = [x, y, z] gives the coordinates for the + ith vertex of the fth face. These vertices are expected to + be in NDC coordinates in the range [-1, 1]. + mesh_to_face_first_idx: LongTensor of shape (N) giving the index in + faces_verts of the first face in each mesh in + the batch. + num_faces_per_mesh: LongTensor of shape (N) giving the number of faces + for each mesh in the batch. + image_size, blur_radius, faces_per_pixel: same as rasterize_meshes. + perspective_correct: same as rasterize_meshes. + cull_backfaces: same as rasterize_meshes. + + Returns: + same as rasterize_meshes function. + """ + + @staticmethod + def forward( + ctx, + face_verts: torch.Tensor, + mesh_to_face_first_idx: torch.Tensor, + num_faces_per_mesh: torch.Tensor, + clipped_faces_neighbor_idx: torch.Tensor, + image_size: Union[List[int], Tuple[int, int]] = (256, 256), + blur_radius: float = 0.01, + faces_per_pixel: int = 0, + bin_size: int = 0, + max_faces_per_bin: int = 0, + perspective_correct: bool = False, + clip_barycentric_coords: bool = False, + cull_backfaces: bool = False, + z_clip_value: Optional[float] = None, + cull_to_frustum: bool = True, + ): + # pyre-fixme[16]: Module `pytorch3d` has no attribute `_C`. + pix_to_face, zbuf, barycentric_coords, dists = _C.rasterize_meshes( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + faces_per_pixel, + bin_size, + max_faces_per_bin, + perspective_correct, + clip_barycentric_coords, + cull_backfaces, + ) + + ctx.save_for_backward(face_verts, pix_to_face) + ctx.mark_non_differentiable(pix_to_face) + ctx.perspective_correct = perspective_correct + ctx.clip_barycentric_coords = clip_barycentric_coords + return pix_to_face, zbuf, barycentric_coords, dists + + @staticmethod + def backward(ctx, grad_pix_to_face, grad_zbuf, grad_barycentric_coords, grad_dists): + grad_face_verts = None + grad_mesh_to_face_first_idx = None + grad_num_faces_per_mesh = None + grad_clipped_faces_neighbor_idx = None + grad_image_size = None + grad_radius = None + grad_faces_per_pixel = None + grad_bin_size = None + grad_max_faces_per_bin = None + grad_perspective_correct = None + grad_clip_barycentric_coords = None + grad_cull_backfaces = None + face_verts, pix_to_face = ctx.saved_tensors + grad_face_verts = _C.rasterize_meshes_backward( + face_verts, + pix_to_face, + grad_zbuf, + grad_barycentric_coords, + grad_dists, + ctx.perspective_correct, + ctx.clip_barycentric_coords, + ) + grads = ( + grad_face_verts, + grad_mesh_to_face_first_idx, + grad_num_faces_per_mesh, + grad_clipped_faces_neighbor_idx, + grad_image_size, + grad_radius, + grad_faces_per_pixel, + grad_bin_size, + grad_max_faces_per_bin, + grad_perspective_correct, + grad_clip_barycentric_coords, + grad_cull_backfaces, + ) + return grads + + +def non_square_ndc_range(S1, S2): + """ + In the case of non square images, we scale the NDC range + to maintain the aspect ratio. The smaller dimension has NDC + range of 2.0. + + Args: + S1: dimension along with the NDC range is needed + S2: the other image dimension + + Returns: + ndc_range: NDC range for dimension S1 + """ + ndc_range = 2.0 + if S1 > S2: + ndc_range = (S1 / S2) * ndc_range + return ndc_range + + +def pix_to_non_square_ndc(i, S1, S2): + """ + The default value of the NDC range is [-1, 1]. + However in the case of non square images, we scale the NDC range + to maintain the aspect ratio. The smaller dimension has NDC + range from [-1, 1] and the other dimension is scaled by + the ratio of H:W. + e.g. for image size (H, W) = (64, 128) + Height NDC range: [-1, 1] + Width NDC range: [-2, 2] + + Args: + i: pixel position on axes S1 + S1: dimension along with i is given + S2: the other image dimension + + Returns: + pixel: NDC coordinate of point i for dimension S1 + """ + # NDC: x-offset + (i * pixel_width + half_pixel_width) + ndc_range = non_square_ndc_range(S1, S2) + offset = ndc_range / 2.0 + return -offset + (ndc_range * i + offset) / S1 + + +def rasterize_meshes_python( # noqa: C901 + meshes, + image_size: Union[int, Tuple[int, int]] = 256, + blur_radius: float = 0.0, + faces_per_pixel: int = 8, + perspective_correct: bool = False, + clip_barycentric_coords: bool = False, + cull_backfaces: bool = False, + z_clip_value: Optional[float] = None, + cull_to_frustum: bool = True, + clipped_faces_neighbor_idx: Optional[torch.Tensor] = None, +): + """ + Naive PyTorch implementation of mesh rasterization with the same inputs and + outputs as the rasterize_meshes function. + + This function is not optimized and is implemented as a comparison for the + C++/CUDA implementations. + """ + N = len(meshes) + H, W = image_size if isinstance(image_size, tuple) else (image_size, image_size) + + K = faces_per_pixel + device = meshes.device + + verts_packed = meshes.verts_packed() + faces_packed = meshes.faces_packed() + faces_verts = verts_packed[faces_packed] + mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx() + num_faces_per_mesh = meshes.num_faces_per_mesh() + + if z_clip_value is not None or cull_to_frustum: + # Cull faces outside the view frustum, and clip faces that are partially + # behind the camera into the portion of the triangle in front of the + # camera. This may change the number of faces + frustum = ClipFrustum( + left=-1, + right=1, + top=-1, + bottom=1, + perspective_correct=perspective_correct, + z_clip_value=z_clip_value, + cull=cull_to_frustum, + ) + clipped_faces = clip_faces( + faces_verts, mesh_to_face_first_idx, num_faces_per_mesh, frustum=frustum + ) + faces_verts = clipped_faces.face_verts + mesh_to_face_first_idx = clipped_faces.mesh_to_face_first_idx + num_faces_per_mesh = clipped_faces.num_faces_per_mesh + + # Initialize output tensors. + face_idxs = torch.full( + (N, H, W, K), fill_value=-1, dtype=torch.int64, device=device + ) + zbuf = torch.full((N, H, W, K), fill_value=-1, dtype=torch.float32, device=device) + bary_coords = torch.full( + (N, H, W, K, 3), fill_value=-1, dtype=torch.float32, device=device + ) + pix_dists = torch.full( + (N, H, W, K), fill_value=-1, dtype=torch.float32, device=device + ) + + # Calculate all face bounding boxes. + x_mins = torch.min(faces_verts[:, :, 0], dim=1, keepdim=True).values + x_maxs = torch.max(faces_verts[:, :, 0], dim=1, keepdim=True).values + y_mins = torch.min(faces_verts[:, :, 1], dim=1, keepdim=True).values + y_maxs = torch.max(faces_verts[:, :, 1], dim=1, keepdim=True).values + z_mins = torch.min(faces_verts[:, :, 2], dim=1, keepdim=True).values + + # Expand by blur radius. + x_mins = x_mins - np.sqrt(blur_radius) - kEpsilon + x_maxs = x_maxs + np.sqrt(blur_radius) + kEpsilon + y_mins = y_mins - np.sqrt(blur_radius) - kEpsilon + y_maxs = y_maxs + np.sqrt(blur_radius) + kEpsilon + + # Loop through meshes in the batch. + for n in range(N): + face_start_idx = mesh_to_face_first_idx[n] + face_stop_idx = face_start_idx + num_faces_per_mesh[n] + + # Iterate through the horizontal lines of the image from top to bottom. + for yi in range(H): + # Y coordinate of one end of the image. Reverse the ordering + # of yi so that +Y is pointing up in the image. + yfix = H - 1 - yi + yf = pix_to_non_square_ndc(yfix, H, W) + + # Iterate through pixels on this horizontal line, left to right. + for xi in range(W): + # X coordinate of one end of the image. Reverse the ordering + # of xi so that +X is pointing to the left in the image. + xfix = W - 1 - xi + xf = pix_to_non_square_ndc(xfix, W, H) + top_k_points = [] + + # Check whether each face in the mesh affects this pixel. + for f in range(face_start_idx, face_stop_idx): + face = faces_verts[f].squeeze() + v0, v1, v2 = face.unbind(0) + + face_area = edge_function(v0, v1, v2) + + # Ignore triangles facing away from the camera. + back_face = face_area < 0 + if cull_backfaces and back_face: + continue + + # Ignore faces which have zero area. + if face_area == 0.0: + continue + + outside_bbox = ( + xf < x_mins[f] + or xf > x_maxs[f] + or yf < y_mins[f] + or yf > y_maxs[f] + ) + + # Faces with at least one vertex behind the camera won't + # render correctly and should be removed or clipped before + # calling the rasterizer + if z_mins[f] < kEpsilon: + continue + + # Check if pixel is outside of face bbox. + if outside_bbox: + continue + + # Compute barycentric coordinates and pixel z distance. + pxy = torch.tensor([xf, yf], dtype=torch.float32, device=device) + + bary = barycentric_coordinates(pxy, v0[:2], v1[:2], v2[:2]) + if perspective_correct: + z0, z1, z2 = v0[2], v1[2], v2[2] + l0, l1, l2 = bary[0], bary[1], bary[2] + top0 = l0 * z1 * z2 + top1 = z0 * l1 * z2 + top2 = z0 * z1 * l2 + bot = top0 + top1 + top2 + bary = torch.stack([top0 / bot, top1 / bot, top2 / bot]) + + # Check if inside before clipping + inside = all(x > 0.0 for x in bary) + + # Barycentric clipping + if clip_barycentric_coords: + bary = barycentric_coordinates_clip(bary) + # use clipped barycentric coords to calculate the z value + pz = bary[0] * v0[2] + bary[1] * v1[2] + bary[2] * v2[2] + + # Check if point is behind the image. + if pz < 0: + continue + + # Calculate signed 2D distance from point to face. + # Points inside the triangle have negative distance. + dist = point_triangle_distance(pxy, v0[:2], v1[:2], v2[:2]) + + # Add an epsilon to prevent errors when comparing distance + # to blur radius. + if not inside and dist >= blur_radius: + continue + + # Handle the case where a face (f) partially behind the image plane is + # clipped to a quadrilateral and then split into two faces (t1, t2). + top_k_idx = -1 + if ( + clipped_faces_neighbor_idx is not None + and clipped_faces_neighbor_idx[f] != -1 + ): + neighbor_idx = clipped_faces_neighbor_idx[f] + # See if neighbor_idx is in top_k and find index + top_k_idx = [ + i + for i, val in enumerate(top_k_points) + if val[1] == neighbor_idx + ] + top_k_idx = top_k_idx[0] if len(top_k_idx) > 0 else -1 + + if top_k_idx != -1 and dist < top_k_points[top_k_idx][3]: + # Overwrite the neighbor with current face info + top_k_points[top_k_idx] = (pz, f, bary, dist, inside) + else: + # Handle as a normal face + top_k_points.append((pz, f, bary, dist, inside)) + + top_k_points.sort() + if len(top_k_points) > K: + top_k_points = top_k_points[:K] + + # Save to output tensors. + for k, (pz, f, bary, dist, inside) in enumerate(top_k_points): + zbuf[n, yi, xi, k] = pz + face_idxs[n, yi, xi, k] = f + bary_coords[n, yi, xi, k, 0] = bary[0] + bary_coords[n, yi, xi, k, 1] = bary[1] + bary_coords[n, yi, xi, k, 2] = bary[2] + # Write the signed distance + pix_dists[n, yi, xi, k] = -dist if inside else dist + + if z_clip_value is not None or cull_to_frustum: + # If faces were clipped, map the rasterization result to be in terms of the + # original unclipped faces. This may involve converting barycentric + # coordinates + ( + face_idxs, + bary_coords, + ) = convert_clipped_rasterization_to_original_faces( + face_idxs, + bary_coords, + # pyre-fixme[61]: `clipped_faces` may not be initialized here. + clipped_faces, + ) + + return face_idxs, zbuf, bary_coords, pix_dists + + +def edge_function(p, v0, v1): + r""" + Determines whether a point p is on the right side of a 2D line segment + given by the end points v0, v1. + + Args: + p: (x, y) Coordinates of a point. + v0, v1: (x, y) Coordinates of the end points of the edge. + + Returns: + area: The signed area of the parallelogram given by the vectors + + .. code-block:: python + + B = p - v0 + A = v1 - v0 + + v1 ________ + /\ / + A / \ / + / \ / + v0 /______\/ + B p + + The area can also be interpreted as the cross product A x B. + If the sign of the area is positive, the point p is on the + right side of the edge. Negative area indicates the point is on + the left side of the edge. i.e. for an edge v1 - v0 + + .. code-block:: python + + v1 + / + / + - / + + / + / + v0 + """ + return (p[0] - v0[0]) * (v1[1] - v0[1]) - (p[1] - v0[1]) * (v1[0] - v0[0]) + + +def barycentric_coordinates_clip(bary): + """ + Clip negative barycentric coordinates to 0.0 and renormalize so + the barycentric coordinates for a point sum to 1. When the blur_radius + is greater than 0, a face will still be recorded as overlapping a pixel + if the pixel is outside the face. In this case at least one of the + barycentric coordinates for the pixel relative to the face will be negative. + Clipping will ensure that the texture and z buffer are interpolated correctly. + + Args: + bary: tuple of barycentric coordinates + + Returns + bary_clip: (w0, w1, w2) barycentric coordinates with no negative values. + """ + # Only negative values are clamped to 0.0. + w0_clip = torch.clamp(bary[0], min=0.0) + w1_clip = torch.clamp(bary[1], min=0.0) + w2_clip = torch.clamp(bary[2], min=0.0) + bary_sum = torch.clamp(w0_clip + w1_clip + w2_clip, min=1e-5) + w0_clip = w0_clip / bary_sum + w1_clip = w1_clip / bary_sum + w2_clip = w2_clip / bary_sum + + return (w0_clip, w1_clip, w2_clip) + + +def barycentric_coordinates(p, v0, v1, v2): + """ + Compute the barycentric coordinates of a point relative to a triangle. + + Args: + p: Coordinates of a point. + v0, v1, v2: Coordinates of the triangle vertices. + + Returns + bary: (w0, w1, w2) barycentric coordinates in the range [0, 1]. + """ + area = edge_function(v2, v0, v1) + kEpsilon # 2 x face area. + w0 = edge_function(p, v1, v2) / area + w1 = edge_function(p, v2, v0) / area + w2 = edge_function(p, v0, v1) / area + return (w0, w1, w2) + + +def point_line_distance(p, v0, v1): + """ + Return minimum distance between line segment (v1 - v0) and point p. + + Args: + p: Coordinates of a point. + v0, v1: Coordinates of the end points of the line segment. + + Returns: + non-square distance to the boundary of the triangle. + + Consider the line extending the segment - this can be parameterized as + ``v0 + t (v1 - v0)``. + + First find the projection of point p onto the line. It falls where + ``t = [(p - v0) . (v1 - v0)] / |v1 - v0|^2`` + where . is the dot product. + + The parameter t is clamped from [0, 1] to handle points outside the + segment (v1 - v0). + + Once the projection of the point on the segment is known, the distance from + p to the projection gives the minimum distance to the segment. + """ + if p.shape != v0.shape != v1.shape: + raise ValueError("All points must have the same number of coordinates") + + v1v0 = v1 - v0 + l2 = v1v0.dot(v1v0) # |v1 - v0|^2 + if l2 <= kEpsilon: + return (p - v1).dot(p - v1) # v0 == v1 + + t = v1v0.dot(p - v0) / l2 + t = torch.clamp(t, min=0.0, max=1.0) + p_proj = v0 + t * v1v0 + delta_p = p_proj - p + return delta_p.dot(delta_p) + + +def point_triangle_distance(p, v0, v1, v2): + """ + Return shortest distance between a point and a triangle. + + Args: + p: Coordinates of a point. + v0, v1, v2: Coordinates of the three triangle vertices. + + Returns: + shortest absolute distance from the point to the triangle. + """ + if p.shape != v0.shape != v1.shape != v2.shape: + raise ValueError("All points must have the same number of coordinates") + + e01_dist = point_line_distance(p, v0, v1) + e02_dist = point_line_distance(p, v0, v2) + e12_dist = point_line_distance(p, v1, v2) + edge_dists_min = torch.min(torch.min(e01_dist, e02_dist), e12_dist) + + return edge_dists_min diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/rasterizer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/rasterizer.py new file mode 100644 index 0000000000000000000000000000000000000000..0e5c9f4dd00e96258286c56e28e48585a16a7ab7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/rasterizer.py @@ -0,0 +1,273 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.nn as nn +from pytorch3d.renderer.cameras import try_get_projection_transform + +from .rasterize_meshes import rasterize_meshes + + +@dataclass(frozen=True) +class Fragments: + """ + A dataclass representing the outputs of a rasterizer. Can be detached from the + computational graph in order to stop the gradients from flowing through the + rasterizer. + + Members: + pix_to_face: + LongTensor of shape (N, image_size, image_size, faces_per_pixel) giving + the indices of the nearest faces at each pixel, sorted in ascending + z-order. Concretely ``pix_to_face[n, y, x, k] = f`` means that + ``faces_verts[f]`` is the kth closest face (in the z-direction) to pixel + (y, x). Pixels that are hit by fewer than faces_per_pixel are padded with + -1. + + zbuf: + FloatTensor of shape (N, image_size, image_size, faces_per_pixel) giving + the NDC z-coordinates of the nearest faces at each pixel, sorted in + ascending z-order. Concretely, if ``pix_to_face[n, y, x, k] = f`` then + ``zbuf[n, y, x, k] = face_verts[f, 2]``. Pixels hit by fewer than + faces_per_pixel are padded with -1. + + bary_coords: + FloatTensor of shape (N, image_size, image_size, faces_per_pixel, 3) + giving the barycentric coordinates in NDC units of the nearest faces at + each pixel, sorted in ascending z-order. Concretely, if ``pix_to_face[n, + y, x, k] = f`` then ``[w0, w1, w2] = barycentric[n, y, x, k]`` gives the + barycentric coords for pixel (y, x) relative to the face defined by + ``face_verts[f]``. Pixels hit by fewer than faces_per_pixel are padded + with -1. + + dists: + FloatTensor of shape (N, image_size, image_size, faces_per_pixel) giving + the signed Euclidean distance (in NDC units) in the x/y plane of each + point closest to the pixel. Concretely if ``pix_to_face[n, y, x, k] = f`` + then ``pix_dists[n, y, x, k]`` is the squared distance between the pixel + (y, x) and the face given by vertices ``face_verts[f]``. Pixels hit with + fewer than ``faces_per_pixel`` are padded with -1. + """ + + pix_to_face: torch.Tensor + zbuf: torch.Tensor + bary_coords: torch.Tensor + dists: Optional[torch.Tensor] + + def detach(self) -> "Fragments": + return Fragments( + pix_to_face=self.pix_to_face, + zbuf=self.zbuf.detach(), + bary_coords=self.bary_coords.detach(), + dists=self.dists.detach() if self.dists is not None else self.dists, + ) + + +@dataclass +class RasterizationSettings: + """ + Class to store the mesh rasterization params with defaults + + Members: + image_size: Either common height and width or (height, width), in pixels. + blur_radius: Float distance in the range [0, 2] used to expand the face + bounding boxes for rasterization. Setting blur radius + results in blurred edges around the shape instead of a + hard boundary. Set to 0 for no blur. + faces_per_pixel: (int) Number of faces to keep track of per pixel. + We return the nearest faces_per_pixel faces along the z-axis. + bin_size: Size of bins to use for coarse-to-fine rasterization. Setting + bin_size=0 uses naive rasterization; setting bin_size=None attempts + to set it heuristically based on the shape of the input. This should + not affect the output, but can affect the speed of the forward pass. + max_faces_opengl: Max number of faces in any mesh we will rasterize. Used only by + MeshRasterizerOpenGL to pre-allocate OpenGL memory. + max_faces_per_bin: Only applicable when using coarse-to-fine + rasterization (bin_size != 0); this is the maximum number of faces + allowed within each bin. This should not affect the output values, + but can affect the memory usage in the forward pass. + Setting max_faces_per_bin=None attempts to set with a heuristic. + perspective_correct: Whether to apply perspective correction when + computing barycentric coordinates for pixels. + None (default) means make correction if the camera uses perspective. + clip_barycentric_coords: Whether, after any perspective correction + is applied but before the depth is calculated (e.g. for + z clipping), to "correct" a location outside the face (i.e. with + a negative barycentric coordinate) to a position on the edge of the + face. None (default) means clip if blur_radius > 0, which is a condition + under which such outside-face-points are likely. + cull_backfaces: Whether to only rasterize mesh faces which are + visible to the camera. This assumes that vertices of + front-facing triangles are ordered in an anti-clockwise + fashion, and triangles that face away from the camera are + in a clockwise order relative to the current view + direction. NOTE: This will only work if the mesh faces are + consistently defined with counter-clockwise ordering when + viewed from the outside. + z_clip_value: if not None, then triangles will be clipped (and possibly + subdivided into smaller triangles) such that z >= z_clip_value. + This avoids camera projections that go to infinity as z->0. + Default is None as clipping affects rasterization speed and + should only be turned on if explicitly needed. + See clip.py for all the extra computation that is required. + cull_to_frustum: Whether to cull triangles outside the view frustum. + Culling involves removing all faces which fall outside view frustum. + Default is False for performance as often not needed. + """ + + image_size: Union[int, Tuple[int, int]] = 256 + blur_radius: float = 0.0 + faces_per_pixel: int = 1 + bin_size: Optional[int] = None + max_faces_opengl: int = 10_000_000 + max_faces_per_bin: Optional[int] = None + perspective_correct: Optional[bool] = None + clip_barycentric_coords: Optional[bool] = None + cull_backfaces: bool = False + z_clip_value: Optional[float] = None + cull_to_frustum: bool = False + + +class MeshRasterizer(nn.Module): + """ + This class implements methods for rasterizing a batch of heterogeneous + Meshes. + """ + + def __init__(self, cameras=None, raster_settings=None) -> None: + """ + Args: + cameras: A cameras object which has a `transform_points` method + which returns the transformed points after applying the + world-to-view and view-to-ndc transformations. + raster_settings: the parameters for rasterization. This should be a + named tuple. + + All these initial settings can be overridden by passing keyword + arguments to the forward function. + """ + super().__init__() + if raster_settings is None: + raster_settings = RasterizationSettings() + + self.cameras = cameras + self.raster_settings = raster_settings + + def to(self, device): + # Manually move to device cameras as it is not a subclass of nn.Module + if self.cameras is not None: + self.cameras = self.cameras.to(device) + return self + + def transform(self, meshes_world, **kwargs) -> torch.Tensor: + """ + Args: + meshes_world: a Meshes object representing a batch of meshes with + vertex coordinates in world space. + + Returns: + meshes_proj: a Meshes object with the vertex positions projected + in NDC space + + NOTE: keeping this as a separate function for readability but it could + be moved into forward. + """ + cameras = kwargs.get("cameras", self.cameras) + if cameras is None: + msg = "Cameras must be specified either at initialization \ + or in the forward pass of MeshRasterizer" + raise ValueError(msg) + + n_cameras = len(cameras) + if n_cameras != 1 and n_cameras != len(meshes_world): + msg = "Wrong number (%r) of cameras for %r meshes" + raise ValueError(msg % (n_cameras, len(meshes_world))) + + verts_world = meshes_world.verts_padded() + + # NOTE: Retaining view space z coordinate for now. + # TODO: Revisit whether or not to transform z coordinate to [-1, 1] or + # [0, 1] range. + eps = kwargs.get("eps", None) + verts_view = cameras.get_world_to_view_transform(**kwargs).transform_points( + verts_world, eps=eps + ) + to_ndc_transform = cameras.get_ndc_camera_transform(**kwargs) + projection_transform = try_get_projection_transform(cameras, kwargs) + if projection_transform is not None: + projection_transform = projection_transform.compose(to_ndc_transform) + verts_ndc = projection_transform.transform_points(verts_view, eps=eps) + else: + # Call transform_points instead of explicitly composing transforms to handle + # the case, where camera class does not have a projection matrix form. + verts_proj = cameras.transform_points(verts_world, eps=eps) + verts_ndc = to_ndc_transform.transform_points(verts_proj, eps=eps) + + verts_ndc[..., 2] = verts_view[..., 2] + meshes_ndc = meshes_world.update_padded(new_verts_padded=verts_ndc) + return meshes_ndc + + def forward(self, meshes_world, **kwargs) -> Fragments: + """ + Args: + meshes_world: a Meshes object representing a batch of meshes with + coordinates in world space. + Returns: + Fragments: Rasterization outputs as a named tuple. + """ + meshes_proj = self.transform(meshes_world, **kwargs) + raster_settings = kwargs.get("raster_settings", self.raster_settings) + + # By default, turn on clip_barycentric_coords if blur_radius > 0. + # When blur_radius > 0, a face can be matched to a pixel that is outside the + # face, resulting in negative barycentric coordinates. + clip_barycentric_coords = raster_settings.clip_barycentric_coords + if clip_barycentric_coords is None: + clip_barycentric_coords = raster_settings.blur_radius > 0.0 + + # If not specified, infer perspective_correct and z_clip_value from the camera + cameras = kwargs.get("cameras", self.cameras) + if raster_settings.perspective_correct is not None: + perspective_correct = raster_settings.perspective_correct + else: + perspective_correct = cameras.is_perspective() + if raster_settings.z_clip_value is not None: + z_clip = raster_settings.z_clip_value + else: + znear = cameras.get_znear() + if isinstance(znear, torch.Tensor): + znear = znear.min().item() + z_clip = None if not perspective_correct or znear is None else znear / 2 + + # By default, turn on clip_barycentric_coords if blur_radius > 0. + # When blur_radius > 0, a face can be matched to a pixel that is outside the + # face, resulting in negative barycentric coordinates. + + pix_to_face, zbuf, bary_coords, dists = rasterize_meshes( + meshes_proj, + image_size=raster_settings.image_size, + blur_radius=raster_settings.blur_radius, + faces_per_pixel=raster_settings.faces_per_pixel, + bin_size=raster_settings.bin_size, + max_faces_per_bin=raster_settings.max_faces_per_bin, + clip_barycentric_coords=clip_barycentric_coords, + perspective_correct=perspective_correct, + cull_backfaces=raster_settings.cull_backfaces, + z_clip_value=z_clip, + cull_to_frustum=raster_settings.cull_to_frustum, + ) + + return Fragments( + pix_to_face=pix_to_face, + zbuf=zbuf, + bary_coords=bary_coords, + dists=dists, + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/renderer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..5b623243a2a5660505fb6476bb31f5d15615d362 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/renderer.py @@ -0,0 +1,112 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Tuple + +import torch +import torch.nn as nn + +from ...structures.meshes import Meshes + +# A renderer class should be initialized with a +# function for rasterization and a function for shading. +# The rasterizer should: +# - transform inputs from world -> screen space +# - rasterize inputs +# - return fragments +# The shader can take fragments as input along with any other properties of +# the scene and generate images. + +# E.g. rasterize inputs and then shade +# +# fragments = self.rasterize(meshes) +# images = self.shader(fragments, meshes) +# return images + + +class MeshRenderer(nn.Module): + """ + A class for rendering a batch of heterogeneous meshes. The class should + be initialized with a rasterizer (a MeshRasterizer or a MeshRasterizerOpenGL) + and shader class which each have a forward function. + """ + + def __init__(self, rasterizer, shader) -> None: + super().__init__() + self.rasterizer = rasterizer + self.shader = shader + + def to(self, device): + # Rasterizer and shader have submodules which are not of type nn.Module + self.rasterizer.to(device) + self.shader.to(device) + return self + + def forward(self, meshes_world: Meshes, **kwargs) -> torch.Tensor: + """ + Render a batch of images from a batch of meshes by rasterizing and then + shading. + + NOTE: If the blur radius for rasterization is > 0.0, some pixels can + have one or more barycentric coordinates lying outside the range [0, 1]. + For a pixel with out of bounds barycentric coordinates with respect to a + face f, clipping is required before interpolating the texture uv + coordinates and z buffer so that the colors and depths are limited to + the range for the corresponding face. + For this set rasterizer.raster_settings.clip_barycentric_coords=True + """ + fragments = self.rasterizer(meshes_world, **kwargs) + images = self.shader(fragments, meshes_world, **kwargs) + + return images + + +class MeshRendererWithFragments(nn.Module): + """ + A class for rendering a batch of heterogeneous meshes. The class should + be initialized with a rasterizer (a MeshRasterizer or a MeshRasterizerOpenGL) + and shader class which each have a forward function. + + In the forward pass this class returns the `fragments` from which intermediate + values such as the depth map can be easily extracted e.g. + + .. code-block:: python + images, fragments = renderer(meshes) + depth = fragments.zbuf + """ + + def __init__(self, rasterizer, shader) -> None: + super().__init__() + self.rasterizer = rasterizer + self.shader = shader + + def to(self, device): + # Rasterizer and shader have submodules which are not of type nn.Module + self.rasterizer.to(device) + self.shader.to(device) + return self + + def forward( + self, meshes_world: Meshes, **kwargs + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Render a batch of images from a batch of meshes by rasterizing and then + shading. + + NOTE: If the blur radius for rasterization is > 0.0, some pixels can + have one or more barycentric coordinates lying outside the range [0, 1]. + For a pixel with out of bounds barycentric coordinates with respect to a + face f, clipping is required before interpolating the texture uv + coordinates and z buffer so that the colors and depths are limited to + the range for the corresponding face. + For this set rasterizer.raster_settings.clip_barycentric_coords=True + """ + fragments = self.rasterizer(meshes_world, **kwargs) + images = self.shader(fragments, meshes_world, **kwargs) + + return images, fragments diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/shader.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/shader.py new file mode 100644 index 0000000000000000000000000000000000000000..77aeba91892b02ff91577eb69b4159854e56053e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/shader.py @@ -0,0 +1,444 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings +from typing import Optional + +import torch +import torch.nn as nn + +from ...common.datatypes import Device +from ...structures.meshes import Meshes +from ..blending import ( + BlendParams, + hard_rgb_blend, + sigmoid_alpha_blend, + softmax_rgb_blend, +) +from ..lighting import PointLights +from ..materials import Materials +from ..splatter_blend import SplatterBlender +from ..utils import TensorProperties +from .rasterizer import Fragments +from .shading import ( + _phong_shading_with_pixels, + flat_shading, + gouraud_shading, + phong_shading, +) + + +# A Shader should take as input fragments from the output of rasterization +# along with scene params and output images. A shader could perform operations +# such as: +# - interpolate vertex attributes for all the fragments +# - sample colors from a texture map +# - apply per pixel lighting +# - blend colors across top K faces per pixel. +class ShaderBase(nn.Module): + def __init__( + self, + device: Device = "cpu", + cameras: Optional[TensorProperties] = None, + lights: Optional[TensorProperties] = None, + materials: Optional[Materials] = None, + blend_params: Optional[BlendParams] = None, + ) -> None: + super().__init__() + self.lights = lights if lights is not None else PointLights(device=device) + self.materials = ( + materials if materials is not None else Materials(device=device) + ) + self.cameras = cameras + self.blend_params = blend_params if blend_params is not None else BlendParams() + + def _get_cameras(self, **kwargs): + cameras = kwargs.get("cameras", self.cameras) + if cameras is None: + msg = "Cameras must be specified either at initialization \ + or in the forward pass of the shader." + raise ValueError(msg) + + return cameras + + # pyre-fixme[14]: `to` overrides method defined in `Module` inconsistently. + def to(self, device: Device): + # Manually move to device modules which are not subclasses of nn.Module + cameras = self.cameras + if cameras is not None: + self.cameras = cameras.to(device) + self.materials = self.materials.to(device) + self.lights = self.lights.to(device) + return self + + +class HardPhongShader(ShaderBase): + """ + Per pixel lighting - the lighting model is applied using the interpolated + coordinates and normals for each pixel. The blending function hard assigns + the color of the closest face for each pixel. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = HardPhongShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + texels = meshes.sample_textures(fragments) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + blend_params = kwargs.get("blend_params", self.blend_params) + colors = phong_shading( + meshes=meshes, + fragments=fragments, + texels=texels, + lights=lights, + cameras=cameras, + materials=materials, + ) + images = hard_rgb_blend(colors, fragments, blend_params) + return images + + +class SoftPhongShader(ShaderBase): + """ + Per pixel lighting - the lighting model is applied using the interpolated + coordinates and normals for each pixel. The blending function returns the + soft aggregated color using all the faces per pixel. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = SoftPhongShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + texels = meshes.sample_textures(fragments) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + blend_params = kwargs.get("blend_params", self.blend_params) + colors = phong_shading( + meshes=meshes, + fragments=fragments, + texels=texels, + lights=lights, + cameras=cameras, + materials=materials, + ) + znear = kwargs.get("znear", getattr(cameras, "znear", 1.0)) + zfar = kwargs.get("zfar", getattr(cameras, "zfar", 100.0)) + images = softmax_rgb_blend( + colors, fragments, blend_params, znear=znear, zfar=zfar + ) + return images + + +class HardGouraudShader(ShaderBase): + """ + Per vertex lighting - the lighting model is applied to the vertex colors and + the colors are then interpolated using the barycentric coordinates to + obtain the colors for each pixel. The blending function hard assigns + the color of the closest face for each pixel. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = HardGouraudShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + blend_params = kwargs.get("blend_params", self.blend_params) + + # As Gouraud shading applies the illumination to the vertex + # colors, the interpolated pixel texture is calculated in the + # shading step. In comparison, for Phong shading, the pixel + # textures are computed first after which the illumination is + # applied. + pixel_colors = gouraud_shading( + meshes=meshes, + fragments=fragments, + lights=lights, + cameras=cameras, + materials=materials, + ) + images = hard_rgb_blend(pixel_colors, fragments, blend_params) + return images + + +class SoftGouraudShader(ShaderBase): + """ + Per vertex lighting - the lighting model is applied to the vertex colors and + the colors are then interpolated using the barycentric coordinates to + obtain the colors for each pixel. The blending function returns the + soft aggregated color using all the faces per pixel. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = SoftGouraudShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + pixel_colors = gouraud_shading( + meshes=meshes, + fragments=fragments, + lights=lights, + cameras=cameras, + materials=materials, + ) + znear = kwargs.get("znear", getattr(cameras, "znear", 1.0)) + zfar = kwargs.get("zfar", getattr(cameras, "zfar", 100.0)) + images = softmax_rgb_blend( + pixel_colors, fragments, self.blend_params, znear=znear, zfar=zfar + ) + return images + + +def TexturedSoftPhongShader( + device: Device = "cpu", + cameras: Optional[TensorProperties] = None, + lights: Optional[TensorProperties] = None, + materials: Optional[Materials] = None, + blend_params: Optional[BlendParams] = None, +) -> SoftPhongShader: + """ + TexturedSoftPhongShader class has been DEPRECATED. Use SoftPhongShader instead. + Preserving TexturedSoftPhongShader as a function for backwards compatibility. + """ + warnings.warn( + """TexturedSoftPhongShader is now deprecated; + use SoftPhongShader instead.""", + PendingDeprecationWarning, + ) + return SoftPhongShader( + device=device, + cameras=cameras, + lights=lights, + materials=materials, + blend_params=blend_params, + ) + + +class HardFlatShader(ShaderBase): + """ + Per face lighting - the lighting model is applied using the average face + position and the face normal. The blending function hard assigns + the color of the closest face for each pixel. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = HardFlatShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + texels = meshes.sample_textures(fragments) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + blend_params = kwargs.get("blend_params", self.blend_params) + colors = flat_shading( + meshes=meshes, + fragments=fragments, + texels=texels, + lights=lights, + cameras=cameras, + materials=materials, + ) + images = hard_rgb_blend(colors, fragments, blend_params) + return images + + +class SoftSilhouetteShader(nn.Module): + """ + Calculate the silhouette by blending the top K faces for each pixel based + on the 2d euclidean distance of the center of the pixel to the mesh face. + + Use this shader for generating silhouettes similar to SoftRasterizer [0]. + + .. note:: + + To be consistent with SoftRasterizer, initialize the + RasterizationSettings for the rasterizer with + `blur_radius = np.log(1. / 1e-4 - 1.) * blend_params.sigma` + + [0] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based + 3D Reasoning', ICCV 2019 + """ + + def __init__(self, blend_params: Optional[BlendParams] = None) -> None: + super().__init__() + self.blend_params = blend_params if blend_params is not None else BlendParams() + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + """ + Only want to render the silhouette so RGB values can be ones. + There is no need for lighting or texturing + """ + colors = torch.ones_like(fragments.bary_coords) + blend_params = kwargs.get("blend_params", self.blend_params) + images = sigmoid_alpha_blend(colors, fragments, blend_params) + return images + + +class SplatterPhongShader(ShaderBase): + """ + Per pixel lighting - the lighting model is applied using the interpolated + coordinates and normals for each pixel. The blending function returns the + color aggregated using splats from surrounding pixels (see [0]). + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = SplatterPhongShader(device=torch.device("cuda:0")) + + [0] Cole, F. et al., "Differentiable Surface Rendering via Non-differentiable + Sampling". + """ + + def __init__(self, **kwargs): + self.splatter_blender = None + super().__init__(**kwargs) + + def to(self, device: Device): + if self.splatter_blender: + self.splatter_blender.to(device) + return super().to(device) + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + texels = meshes.sample_textures(fragments) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + + colors, pixel_coords_cameras = _phong_shading_with_pixels( + meshes=meshes, + fragments=fragments.detach(), + texels=texels, + lights=lights, + cameras=cameras, + materials=materials, + ) + + if not self.splatter_blender: + # Init only once, to avoid re-computing constants. + N, H, W, K, _ = colors.shape + self.splatter_blender = SplatterBlender((N, H, W, K), colors.device) + + blend_params = kwargs.get("blend_params", self.blend_params) + self.check_blend_params(blend_params) + + images = self.splatter_blender( + colors, + pixel_coords_cameras, + cameras, + fragments.pix_to_face < 0, + kwargs.get("blend_params", self.blend_params), + ) + + return images + + def check_blend_params(self, blend_params): + if blend_params.sigma != 0.5: + warnings.warn( + f"SplatterPhongShader received sigma={blend_params.sigma}. sigma is " + "defined in pixel units, and any value other than 0.5 is highly " + "unexpected. Only use other values if you know what you are doing. " + ) + + +class HardDepthShader(ShaderBase): + """ + Renders the Z distances of the closest face for each pixel. If no face is + found it returns the zfar value of the camera. + + Output from this shader is [N, H, W, 1] since it's only depth. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = HardDepthShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + + zfar = kwargs.get("zfar", getattr(cameras, "zfar", 100.0)) + mask = fragments.pix_to_face[..., 0:1] < 0 + + zbuf = fragments.zbuf[..., 0:1].clone() + zbuf[mask] = zfar + return zbuf + + +class SoftDepthShader(ShaderBase): + """ + Renders the Z distances using an aggregate of the distances of each face + based off of the point distance. If no face is found it returns the zfar + value of the camera. + + Output from this shader is [N, H, W, 1] since it's only depth. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = SoftDepthShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + if fragments.dists is None: + raise ValueError("SoftDepthShader requires Fragments.dists to be present.") + + cameras = super()._get_cameras(**kwargs) + + N, H, W, K = fragments.pix_to_face.shape + device = fragments.zbuf.device + mask = fragments.pix_to_face >= 0 + + zfar = kwargs.get("zfar", getattr(cameras, "zfar", 100.0)) + + # Sigmoid probability map based on the distance of the pixel to the face. + prob_map = torch.sigmoid(-fragments.dists / self.blend_params.sigma) * mask + + # append extra face for zfar + dists = torch.cat( + (fragments.zbuf, torch.ones((N, H, W, 1), device=device) * zfar), dim=3 + ) + probs = torch.cat((prob_map, torch.ones((N, H, W, 1), device=device)), dim=3) + + # compute weighting based off of probabilities using cumsum + probs = probs.cumsum(dim=3) + probs = probs.clamp(max=1) + probs = probs.diff(dim=3, prepend=torch.zeros((N, H, W, 1), device=device)) + + return (probs * dists).sum(dim=3).unsqueeze(3) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/shading.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/shading.py new file mode 100644 index 0000000000000000000000000000000000000000..bb113ee12b1682fd6b8e8e9982c2a05562047003 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/shading.py @@ -0,0 +1,225 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import Tuple + +import torch +from pytorch3d.ops import interpolate_face_attributes + +from .textures import TexturesVertex + + +def _apply_lighting( + points, normals, lights, cameras, materials +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Args: + points: torch tensor of shape (N, ..., 3) or (P, 3). + normals: torch tensor of shape (N, ..., 3) or (P, 3) + lights: instance of the Lights class. + cameras: instance of the Cameras class. + materials: instance of the Materials class. + + Returns: + ambient_color: same shape as materials.ambient_color + diffuse_color: same shape as the input points + specular_color: same shape as the input points + """ + light_diffuse = lights.diffuse(normals=normals, points=points) + light_specular = lights.specular( + normals=normals, + points=points, + camera_position=cameras.get_camera_center(), + shininess=materials.shininess, + ) + ambient_color = materials.ambient_color * lights.ambient_color + diffuse_color = materials.diffuse_color * light_diffuse + specular_color = materials.specular_color * light_specular + + if normals.dim() == 2 and points.dim() == 2: + # If given packed inputs remove batch dim in output. + return ( + ambient_color.squeeze(), + diffuse_color.squeeze(), + specular_color.squeeze(), + ) + + if ambient_color.ndim != diffuse_color.ndim: + # Reshape from (N, 3) to have dimensions compatible with + # diffuse_color which is of shape (N, H, W, K, 3) + ambient_color = ambient_color[:, None, None, None, :] + return ambient_color, diffuse_color, specular_color + + +def _phong_shading_with_pixels( + meshes, fragments, lights, cameras, materials, texels +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Apply per pixel shading. First interpolate the vertex normals and + vertex coordinates using the barycentric coordinates to get the position + and normal at each pixel. Then compute the illumination for each pixel. + The pixel color is obtained by multiplying the pixel textures by the ambient + and diffuse illumination and adding the specular component. + + Args: + meshes: Batch of meshes + fragments: Fragments named tuple with the outputs of rasterization + lights: Lights class containing a batch of lights + cameras: Cameras class containing a batch of cameras + materials: Materials class containing a batch of material properties + texels: texture per pixel of shape (N, H, W, K, 3) + + Returns: + colors: (N, H, W, K, 3) + pixel_coords: (N, H, W, K, 3), camera coordinates of each intersection. + """ + verts = meshes.verts_packed() # (V, 3) + faces = meshes.faces_packed() # (F, 3) + vertex_normals = meshes.verts_normals_packed() # (V, 3) + faces_verts = verts[faces] + faces_normals = vertex_normals[faces] + pixel_coords_in_camera = interpolate_face_attributes( + fragments.pix_to_face, fragments.bary_coords, faces_verts + ) + pixel_normals = interpolate_face_attributes( + fragments.pix_to_face, fragments.bary_coords, faces_normals + ) + ambient, diffuse, specular = _apply_lighting( + pixel_coords_in_camera, pixel_normals, lights, cameras, materials + ) + colors = (ambient + diffuse) * texels + specular + return colors, pixel_coords_in_camera + + +def phong_shading( + meshes, fragments, lights, cameras, materials, texels +) -> torch.Tensor: + """ + Apply per pixel shading. First interpolate the vertex normals and + vertex coordinates using the barycentric coordinates to get the position + and normal at each pixel. Then compute the illumination for each pixel. + The pixel color is obtained by multiplying the pixel textures by the ambient + and diffuse illumination and adding the specular component. + + Args: + meshes: Batch of meshes + fragments: Fragments named tuple with the outputs of rasterization + lights: Lights class containing a batch of lights + cameras: Cameras class containing a batch of cameras + materials: Materials class containing a batch of material properties + texels: texture per pixel of shape (N, H, W, K, 3) + + Returns: + colors: (N, H, W, K, 3) + """ + colors, _ = _phong_shading_with_pixels( + meshes, fragments, lights, cameras, materials, texels + ) + return colors + + +def gouraud_shading(meshes, fragments, lights, cameras, materials) -> torch.Tensor: + """ + Apply per vertex shading. First compute the vertex illumination by applying + ambient, diffuse and specular lighting. If vertex color is available, + combine the ambient and diffuse vertex illumination with the vertex color + and add the specular component to determine the vertex shaded color. + Then interpolate the vertex shaded colors using the barycentric coordinates + to get a color per pixel. + + Gouraud shading is only supported for meshes with texture type `TexturesVertex`. + This is because the illumination is applied to the vertex colors. + + Args: + meshes: Batch of meshes + fragments: Fragments named tuple with the outputs of rasterization + lights: Lights class containing a batch of lights parameters + cameras: Cameras class containing a batch of cameras parameters + materials: Materials class containing a batch of material properties + + Returns: + colors: (N, H, W, K, 3) + """ + if not isinstance(meshes.textures, TexturesVertex): + raise ValueError("Mesh textures must be an instance of TexturesVertex") + + faces = meshes.faces_packed() # (F, 3) + verts = meshes.verts_packed() # (V, 3) + verts_normals = meshes.verts_normals_packed() # (V, 3) + verts_colors = meshes.textures.verts_features_packed() # (V, D) + vert_to_mesh_idx = meshes.verts_packed_to_mesh_idx() + + # Format properties of lights and materials so they are compatible + # with the packed representation of the vertices. This transforms + # all tensor properties in the class from shape (N, ...) -> (V, ...) where + # V is the number of packed vertices. If the number of meshes in the + # batch is one then this is not necessary. + if len(meshes) > 1: + lights = lights.clone().gather_props(vert_to_mesh_idx) + cameras = cameras.clone().gather_props(vert_to_mesh_idx) + materials = materials.clone().gather_props(vert_to_mesh_idx) + + # Calculate the illumination at each vertex + ambient, diffuse, specular = _apply_lighting( + verts, verts_normals, lights, cameras, materials + ) + + verts_colors_shaded = verts_colors * (ambient + diffuse) + specular + face_colors = verts_colors_shaded[faces] + colors = interpolate_face_attributes( + fragments.pix_to_face, fragments.bary_coords, face_colors + ) + return colors + + +def flat_shading(meshes, fragments, lights, cameras, materials, texels) -> torch.Tensor: + """ + Apply per face shading. Use the average face position and the face normals + to compute the ambient, diffuse and specular lighting. Apply the ambient + and diffuse color to the pixel color and add the specular component to + determine the final pixel color. + + Args: + meshes: Batch of meshes + fragments: Fragments named tuple with the outputs of rasterization + lights: Lights class containing a batch of lights parameters + cameras: Cameras class containing a batch of cameras parameters + materials: Materials class containing a batch of material properties + texels: texture per pixel of shape (N, H, W, K, 3) + + Returns: + colors: (N, H, W, K, 3) + """ + verts = meshes.verts_packed() # (V, 3) + faces = meshes.faces_packed() # (F, 3) + face_normals = meshes.faces_normals_packed() # (V, 3) + faces_verts = verts[faces] + face_coords = faces_verts.mean(dim=-2) # (F, 3, XYZ) mean xyz across verts + + # Replace empty pixels in pix_to_face with 0 in order to interpolate. + mask = fragments.pix_to_face == -1 + pix_to_face = fragments.pix_to_face.clone() + pix_to_face[mask] = 0 + + N, H, W, K = pix_to_face.shape + idx = pix_to_face.view(N * H * W * K, 1).expand(N * H * W * K, 3) + + # gather pixel coords + pixel_coords = face_coords.gather(0, idx).view(N, H, W, K, 3) + pixel_coords[mask] = 0.0 + # gather pixel normals + pixel_normals = face_normals.gather(0, idx).view(N, H, W, K, 3) + pixel_normals[mask] = 0.0 + + # Calculate the illumination at each face + ambient, diffuse, specular = _apply_lighting( + pixel_coords, pixel_normals, lights, cameras, materials + ) + colors = (ambient + diffuse) * texels + specular + return colors diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/textures.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/textures.py new file mode 100644 index 0000000000000000000000000000000000000000..736be41df27c008c5cc2e1b98b2715cbb5409b29 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/textures.py @@ -0,0 +1,1933 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import itertools +import warnings +from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Union + +import torch +import torch.nn.functional as F +from pytorch3d.ops import interpolate_face_attributes +from pytorch3d.structures.utils import list_to_packed, list_to_padded, padded_to_list +from torch.nn.functional import interpolate + +from .utils import pack_unique_rectangles, PackedRectangle, Rectangle + + +# This file contains classes and helper functions for texturing. +# There are three types of textures: TexturesVertex, TexturesAtlas +# and TexturesUV which inherit from a base textures class TexturesBase. +# +# Each texture class has a method 'sample_textures' to sample a +# value given barycentric coordinates. +# +# All the textures accept either list or padded inputs. The values +# are stored as either per face values (TexturesAtlas, TexturesUV), +# or per face vertex features (TexturesVertex). + + +def _list_to_padded_wrapper( + x: List[torch.Tensor], + pad_size: Union[list, tuple, None] = None, + pad_value: float = 0.0, +) -> torch.Tensor: + r""" + This is a wrapper function for + pytorch3d.structures.utils.list_to_padded function which only accepts + 3-dimensional inputs. + + For this use case, the input x is of shape (F, 3, ...) where only F + is different for each element in the list + + Transforms a list of N tensors each of shape (Mi, ...) into a single tensor + of shape (N, pad_size, ...), or (N, max(Mi), ...) + if pad_size is None. + + Args: + x: list of Tensors + pad_size: int specifying the size of the first dimension + of the padded tensor + pad_value: float value to be used to fill the padded tensor + + Returns: + x_padded: tensor consisting of padded input tensors + """ + N = len(x) + dims = x[0].ndim + reshape_dims = x[0].shape[1:] + D = torch.prod(torch.tensor(reshape_dims)).item() + x_reshaped = [] + for y in x: + if y.ndim != dims and y.shape[1:] != reshape_dims: + msg = ( + "list_to_padded requires tensors to have the same number of dimensions" + ) + raise ValueError(msg) + # pyre-fixme[6]: For 2nd param expected `int` but got `Union[bool, float, int]`. + x_reshaped.append(y.reshape(-1, D)) + x_padded = list_to_padded(x_reshaped, pad_size=pad_size, pad_value=pad_value) + # pyre-fixme[58]: `+` is not supported for operand types `Tuple[int, int]` and + # `Size`. + return x_padded.reshape((N, -1) + reshape_dims) + + +def _padded_to_list_wrapper( + x: torch.Tensor, split_size: Union[list, tuple, None] = None +) -> List[torch.Tensor]: + r""" + This is a wrapper function for pytorch3d.structures.utils.padded_to_list + which only accepts 3-dimensional inputs. + + For this use case, the input x is of shape (N, F, ...) where F + is the number of faces which is different for each tensor in the batch. + + This function transforms a padded tensor of shape (N, M, ...) into a + list of N tensors of shape (Mi, ...) where (Mi) is specified in + split_size(i), or of shape (M,) if split_size is None. + + Args: + x: padded Tensor + split_size: list of ints defining the number of items for each tensor + in the output list. + + Returns: + x_list: a list of tensors + """ + N, M = x.shape[:2] + reshape_dims = x.shape[2:] + D = torch.prod(torch.tensor(reshape_dims)).item() + # pyre-fixme[6]: For 3rd param expected `int` but got `Union[bool, float, int]`. + x_reshaped = x.reshape(N, M, D) + x_list = padded_to_list(x_reshaped, split_size=split_size) + # pyre-fixme[58]: `+` is not supported for operand types `Tuple[typing.Any]` and + # `Size`. + x_list = [xl.reshape((xl.shape[0],) + reshape_dims) for xl in x_list] + return x_list + + +def _pad_texture_maps( + images: Union[Tuple[torch.Tensor], List[torch.Tensor]], align_corners: bool +) -> torch.Tensor: + """ + Pad all texture images so they have the same height and width. + + Args: + images: list of N tensors of shape (H_i, W_i, C) + align_corners: used for interpolation + + Returns: + tex_maps: Tensor of shape (N, max_H, max_W, C) + """ + tex_maps = [] + max_H = 0 + max_W = 0 + for im in images: + h, w, _C = im.shape + if h > max_H: + max_H = h + if w > max_W: + max_W = w + tex_maps.append(im) + max_shape = (max_H, max_W) + + for i, image in enumerate(tex_maps): + if image.shape[:2] != max_shape: + image_BCHW = image.permute(2, 0, 1)[None] + new_image_BCHW = interpolate( + image_BCHW, + size=max_shape, + mode="bilinear", + align_corners=align_corners, + ) + tex_maps[i] = new_image_BCHW[0].permute(1, 2, 0) + tex_maps = torch.stack(tex_maps, dim=0) # (num_tex_maps, max_H, max_W, C) + return tex_maps + + +def _pad_texture_multiple_maps( + multiple_texture_maps: Union[Tuple[torch.Tensor], List[torch.Tensor]], + align_corners: bool, +) -> torch.Tensor: + """ + Pad all texture images so they have the same height and width. + + Args: + images: list of N tensors of shape (M_i, H_i, W_i, C) + M_i : Number of texture maps:w + + align_corners: used for interpolation + + Returns: + tex_maps: Tensor of shape (N, max_M, max_H, max_W, C) + """ + tex_maps = [] + max_M = 0 + max_H = 0 + max_W = 0 + C = 0 + for im in multiple_texture_maps: + m, h, w, C = im.shape + if m > max_M: + max_M = m + if h > max_H: + max_H = h + if w > max_W: + max_W = w + tex_maps.append(im) + max_shape = (max_M, max_H, max_W, C) + max_im_shape = (max_H, max_W) + for i, tms in enumerate(tex_maps): + new_tex_maps = torch.zeros(max_shape) + for j in range(tms.shape[0]): + im = tms[j] + if im.shape[:2] != max_im_shape: + image_BCHW = im.permute(2, 0, 1)[None] + new_image_BCHW = interpolate( + image_BCHW, + size=max_im_shape, + mode="bilinear", + align_corners=align_corners, + ) + new_tex_maps[j] = new_image_BCHW[0].permute(1, 2, 0) + else: + new_tex_maps[j] = im + tex_maps[i] = new_tex_maps + tex_maps = torch.stack(tex_maps, dim=0) # (num_tex_maps, max_H, max_W, C) + return tex_maps + + +# A base class for defining a batch of textures +# with helper methods. +# This is also useful to have so that inside `Meshes` +# we can allow the input textures to be any texture +# type which is an instance of the base class. +class TexturesBase: + def isempty(self): + if self._N is not None and self.valid is not None: + return self._N == 0 or self.valid.eq(False).all() + return False + + def to(self, device): + for k in dir(self): + v = getattr(self, k) + if isinstance(v, (list, tuple)) and all( + torch.is_tensor(elem) for elem in v + ): + v = [elem.to(device) for elem in v] + setattr(self, k, v) + if torch.is_tensor(v) and v.device != device: + setattr(self, k, v.to(device)) + self.device = device + return self + + def _extend(self, N: int, props: List[str]) -> Dict[str, Union[torch.Tensor, List]]: + """ + Create a dict with the specified properties + repeated N times per batch element. + + Args: + N: number of new copies of each texture + in the batch. + props: a List of strings which refer to either + class attributes or class methods which + return tensors or lists. + + Returns: + Dict with the same keys as props. The values are the + extended properties. + """ + if not isinstance(N, int): + raise ValueError("N must be an integer.") + if N <= 0: + raise ValueError("N must be > 0.") + + new_props = {} + for p in props: + t = getattr(self, p) + if callable(t): + t = t() # class method + if t is None: + new_props[p] = None + elif isinstance(t, list): + if not all(isinstance(elem, (int, float)) for elem in t): + raise ValueError("Extend only supports lists of scalars") + t = [[ti] * N for ti in t] + new_props[p] = list(itertools.chain(*t)) + elif torch.is_tensor(t): + new_props[p] = t.repeat_interleave(N, dim=0) + else: + raise ValueError( + f"Property {p} has unsupported type {type(t)}." + "Only tensors and lists are supported." + ) + return new_props + + def _getitem(self, index: Union[int, slice], props: List[str]): + """ + Helper function for __getitem__ + """ + new_props = {} + if isinstance(index, (int, slice)): + for p in props: + t = getattr(self, p) + if callable(t): + t = t() # class method + new_props[p] = t[index] if t is not None else None + elif isinstance(index, list): + index = torch.tensor(index) + if isinstance(index, torch.Tensor): + if index.dtype == torch.bool: + index = index.nonzero() + index = index.squeeze(1) if index.numel() > 0 else index + index = index.tolist() + for p in props: + t = getattr(self, p) + if callable(t): + t = t() # class method + new_props[p] = [t[i] for i in index] if t is not None else None + return new_props + + def sample_textures(self) -> torch.Tensor: + """ + Different texture classes sample textures in different ways + e.g. for vertex textures, the values at each vertex + are interpolated across the face using the barycentric + coordinates. + Each texture class should implement a sample_textures + method to take the `fragments` from rasterization. + Using `fragments.pix_to_face` and `fragments.bary_coords` + this function should return the sampled texture values for + each pixel in the output image. + """ + raise NotImplementedError() + + def submeshes( + self, + vertex_ids_list: List[List[torch.LongTensor]], + faces_ids_list: List[List[torch.LongTensor]], + ) -> "TexturesBase": + """ + Extract sub-textures used for submeshing. + """ + raise NotImplementedError(f"{self.__class__} does not support submeshes") + + def faces_verts_textures_packed(self) -> torch.Tensor: + """ + Returns the texture for each vertex for each face in the mesh. + For N meshes, this function returns sum(Fi)x3xC where Fi is the + number of faces in the i-th mesh and C is the dimensional of + the feature (C = 3 for RGB textures). + You can use the utils function in structures.utils to convert the + packed representation to a list or padded. + """ + raise NotImplementedError() + + def clone(self) -> "TexturesBase": + """ + Each texture class should implement a method + to clone all necessary internal tensors. + """ + raise NotImplementedError() + + def detach(self) -> "TexturesBase": + """ + Each texture class should implement a method + to detach all necessary internal tensors. + """ + raise NotImplementedError() + + def __getitem__(self, index) -> "TexturesBase": + """ + Each texture class should implement a method + to get the texture properties for the + specified elements in the batch. + The TexturesBase._getitem(i) method + can be used as a helper function to retrieve the + class attributes for item i. Then, a new + instance of the child class can be created with + the attributes. + """ + raise NotImplementedError() + + +def Textures( + maps: Optional[Union[List[torch.Tensor], torch.Tensor]] = None, + faces_uvs: Optional[torch.Tensor] = None, + verts_uvs: Optional[torch.Tensor] = None, + verts_rgb: Optional[torch.Tensor] = None, +) -> TexturesBase: + """ + Textures class has been DEPRECATED. + Preserving Textures as a function for backwards compatibility. + + Args: + maps: texture map per mesh. This can either be a list of maps + [(H, W, C)] or a padded tensor of shape (N, H, W, C). + faces_uvs: (N, F, 3) tensor giving the index into verts_uvs for each + vertex in the face. Padding value is assumed to be -1. + verts_uvs: (N, V, 2) tensor giving the uv coordinate per vertex. + verts_rgb: (N, V, C) tensor giving the color per vertex. Padding + value is assumed to be -1. (C=3 for RGB.) + + + Returns: + a Textures class which is an instance of TexturesBase e.g. TexturesUV, + TexturesAtlas, TexturesVertex + + """ + + warnings.warn( + """Textures class is deprecated, + use TexturesUV, TexturesAtlas, TexturesVertex instead. + Textures class will be removed in future releases.""", + PendingDeprecationWarning, + ) + + if faces_uvs is not None and verts_uvs is not None and maps is not None: + return TexturesUV(maps=maps, faces_uvs=faces_uvs, verts_uvs=verts_uvs) + + if verts_rgb is not None: + return TexturesVertex(verts_features=verts_rgb) + + raise ValueError( + "Textures either requires all three of (faces uvs, verts uvs, maps) or verts rgb" + ) + + +class TexturesAtlas(TexturesBase): + def __init__(self, atlas: Union[torch.Tensor, List[torch.Tensor]]) -> None: + """ + A texture representation where each face has a square texture map. + This is based on the implementation from SoftRasterizer [1]. + + Args: + atlas: (N, F, R, R, C) tensor giving the per face texture map. + The atlas can be created during obj loading with the + pytorch3d.io.load_obj function - in the input arguments + set `create_texture_atlas=True`. The atlas will be + returned in aux.texture_atlas. + + + The padded and list representations of the textures are stored + and the packed representations is computed on the fly and + not cached. + + [1] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based + 3D Reasoning', ICCV 2019 + See also https://github.com/ShichenLiu/SoftRas/issues/21 + """ + if isinstance(atlas, (list, tuple)): + correct_format = all( + ( + torch.is_tensor(elem) + and elem.ndim == 4 + and elem.shape[1] == elem.shape[2] + and elem.shape[1] == atlas[0].shape[1] + ) + for elem in atlas + ) + if not correct_format: + msg = ( + "Expected atlas to be a list of tensors of shape (F, R, R, C) " + "with the same value of R." + ) + raise ValueError(msg) + self._atlas_list = atlas + self._atlas_padded = None + self.device = torch.device("cpu") + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(atlas) + self._num_faces_per_mesh = [len(a) for a in atlas] + + if self._N > 0: + self.device = atlas[0].device + + elif torch.is_tensor(atlas): + if atlas.ndim != 5: + msg = "Expected atlas to be of shape (N, F, R, R, C); got %r" + raise ValueError(msg % repr(atlas.ndim)) + self._atlas_padded = atlas + self._atlas_list = None + self.device = atlas.device + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(atlas) + max_F = atlas.shape[1] + self._num_faces_per_mesh = [max_F] * self._N + else: + raise ValueError("Expected atlas to be a tensor or list") + + # The num_faces_per_mesh, N and valid + # are reset inside the Meshes object when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device) + + def clone(self) -> "TexturesAtlas": + tex = self.__class__(atlas=self.atlas_padded().clone()) + if self._atlas_list is not None: + tex._atlas_list = [atlas.clone() for atlas in self._atlas_list] + num_faces = ( + self._num_faces_per_mesh.clone() + if torch.is_tensor(self._num_faces_per_mesh) + else self._num_faces_per_mesh + ) + tex.valid = self.valid.clone() + tex._num_faces_per_mesh = num_faces + return tex + + def detach(self) -> "TexturesAtlas": + tex = self.__class__(atlas=self.atlas_padded().detach()) + if self._atlas_list is not None: + tex._atlas_list = [atlas.detach() for atlas in self._atlas_list] + num_faces = ( + self._num_faces_per_mesh.detach() + if torch.is_tensor(self._num_faces_per_mesh) + else self._num_faces_per_mesh + ) + tex.valid = self.valid.detach() + tex._num_faces_per_mesh = num_faces + return tex + + def __getitem__(self, index) -> "TexturesAtlas": + props = ["atlas_list", "_num_faces_per_mesh"] + new_props = self._getitem(index, props=props) + atlas = new_props["atlas_list"] + if isinstance(atlas, list): + # multiple batch elements + new_tex = self.__class__(atlas=atlas) + elif torch.is_tensor(atlas): + # single element + new_tex = self.__class__(atlas=[atlas]) + else: + raise ValueError("Not all values are provided in the correct format") + new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"] + return new_tex + + def atlas_padded(self) -> torch.Tensor: + if self._atlas_padded is None: + if self.isempty(): + self._atlas_padded = torch.zeros( + (self._N, 0, 0, 0, 3), dtype=torch.float32, device=self.device + ) + else: + self._atlas_padded = _list_to_padded_wrapper( + self._atlas_list, pad_value=0.0 + ) + return self._atlas_padded + + def atlas_list(self) -> List[torch.Tensor]: + if self._atlas_list is None: + if self.isempty(): + self._atlas_padded = [ + torch.empty((0, 0, 0, 3), dtype=torch.float32, device=self.device) + ] * self._N + self._atlas_list = _padded_to_list_wrapper( + self._atlas_padded, split_size=self._num_faces_per_mesh + ) + return self._atlas_list + + def atlas_packed(self) -> torch.Tensor: + if self.isempty(): + return torch.zeros( + (self._N, 0, 0, 3), dtype=torch.float32, device=self.device + ) + atlas_list = self.atlas_list() + return list_to_packed(atlas_list)[0] + + def extend(self, N: int) -> "TexturesAtlas": + new_props = self._extend(N, ["atlas_padded", "_num_faces_per_mesh"]) + new_tex = self.__class__(atlas=new_props["atlas_padded"]) + new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"] + return new_tex + + # pyre-fixme[14]: `sample_textures` overrides method defined in `TexturesBase` + # inconsistently. + def sample_textures(self, fragments, **kwargs) -> torch.Tensor: + """ + This is similar to a nearest neighbor sampling and involves a + discretization step. The barycentric coordinates from + rasterization are used to find the nearest grid cell in the texture + atlas and the RGB is returned as the color. + This means that this step is differentiable with respect to the RGB + values of the texture atlas but not differentiable with respect to the + barycentric coordinates. + + TODO: Add a different sampling mode which interpolates the barycentric + coordinates to sample the texture and will be differentiable w.r.t + the barycentric coordinates. + + Args: + fragments: + The outputs of rasterization. From this we use + + - pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. + - barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying + the barycentric coordinates of each pixel + relative to the faces (in the packed + representation) which overlap the pixel. + + Returns: + texels: (N, H, W, K, C) + """ + N, H, W, K = fragments.pix_to_face.shape + atlas_packed = self.atlas_packed() + R = atlas_packed.shape[1] + bary = fragments.bary_coords + pix_to_face = fragments.pix_to_face + + bary_w01 = bary[..., :2] + # pyre-fixme[16]: `bool` has no attribute `__getitem__`. + mask = (pix_to_face < 0)[..., None] + bary_w01 = torch.where(mask, torch.zeros_like(bary_w01), bary_w01) + # If barycentric coordinates are > 1.0 (in the case of + # blur_radius > 0.0), wxy might be > R. We need to clamp this + # index to R-1 to index into the texture atlas. + w_xy = (bary_w01 * R).to(torch.int64).clamp(max=R - 1) # (N, H, W, K, 2) + + below_diag = ( + bary_w01.sum(dim=-1) * R - w_xy.float().sum(dim=-1) + ) <= 1.0 # (N, H, W, K) + w_x, w_y = w_xy.unbind(-1) + w_x = torch.where(below_diag, w_x, (R - 1 - w_x)) + w_y = torch.where(below_diag, w_y, (R - 1 - w_y)) + + texels = atlas_packed[pix_to_face, w_y, w_x] + texels = texels * (pix_to_face >= 0)[..., None].float() + + return texels + + def submeshes( + self, + vertex_ids_list: List[List[torch.LongTensor]], + faces_ids_list: List[List[torch.LongTensor]], + ) -> "TexturesAtlas": + """ + Extract a sub-texture for use in a submesh. + + If the meshes batch corresponding to this TextureAtlas contains + `n = len(faces_ids_list)` meshes, then self.atlas_list() + will be of length n. After submeshing, we obtain a batch of + `k = sum(len(v) for v in atlas_list` submeshes (see Meshes.submeshes). This + function creates a corresponding TexturesAtlas object with `atlas_list` + of length `k`. + """ + if len(faces_ids_list) != len(self.atlas_list()): + raise IndexError( + "faces_ids_list must be of " "the same length as atlas_list." + ) + + sub_features = [] + for atlas, faces_ids in zip(self.atlas_list(), faces_ids_list): + for faces_ids_submesh in faces_ids: + sub_features.append(atlas[faces_ids_submesh]) + + return self.__class__(sub_features) + + def faces_verts_textures_packed(self) -> torch.Tensor: + """ + Samples texture from each vertex for each face in the mesh. + For N meshes with {Fi} number of faces, it returns a + tensor of shape sum(Fi)x3xC (C = 3 for RGB). + You can use the utils function in structures.utils to convert the + packed representation to a list or padded. + """ + atlas_packed = self.atlas_packed() + # assume each face consists of (v0, v1, v2). + # to sample from the atlas we only need the first two barycentric coordinates. + # for details on how this texture sample works refer to the sample_textures function. + t0 = atlas_packed[:, 0, -1] # corresponding to v0 with bary = (1, 0) + t1 = atlas_packed[:, -1, 0] # corresponding to v1 with bary = (0, 1) + t2 = atlas_packed[:, 0, 0] # corresponding to v2 with bary = (0, 0) + return torch.stack((t0, t1, t2), dim=1) + + def join_batch(self, textures: List["TexturesAtlas"]) -> "TexturesAtlas": + """ + Join the list of textures given by `textures` to + self to create a batch of textures. Return a new + TexturesAtlas object with the combined textures. + + Args: + textures: List of TexturesAtlas objects + + Returns: + new_tex: TexturesAtlas object with the combined + textures from self and the list `textures`. + """ + tex_types_same = all(isinstance(tex, TexturesAtlas) for tex in textures) + if not tex_types_same: + raise ValueError("All textures must be of type TexturesAtlas.") + + atlas_list = [] + atlas_list += self.atlas_list() + num_faces_per_mesh = self._num_faces_per_mesh.copy() + for tex in textures: + atlas_list += tex.atlas_list() + num_faces_per_mesh += tex._num_faces_per_mesh + new_tex = self.__class__(atlas=atlas_list) + new_tex._num_faces_per_mesh = num_faces_per_mesh + return new_tex + + def join_scene(self) -> "TexturesAtlas": + """ + Return a new TexturesAtlas amalgamating the batch. + """ + return self.__class__(atlas=[torch.cat(self.atlas_list())]) + + def check_shapes( + self, batch_size: int, max_num_verts: int, max_num_faces: int + ) -> bool: + """ + Check if the dimensions of the atlas match that of the mesh faces + """ + # (N, F) should be the same + return self.atlas_padded().shape[0:2] == (batch_size, max_num_faces) + + +class TexturesUV(TexturesBase): + def __init__( + self, + maps: Union[torch.Tensor, List[torch.Tensor]], + faces_uvs: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]], + verts_uvs: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]], + *, + maps_ids: Optional[ + Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]] + ] = None, + padding_mode: str = "border", + align_corners: bool = True, + sampling_mode: str = "bilinear", + ) -> None: + """ + Textures are represented as a per mesh texture map and uv coordinates for each + vertex in each face. NOTE: this class only supports one texture map per mesh. + + Args: + maps: Either (1) a texture map per mesh. This can either be a list of maps + [(H, W, C)] or a padded tensor of shape (N, H, W, C). + For RGB, C = 3. In this case maps_ids must be None. + Or (2) a set of M texture maps per mesh. This can either be a list of sets + [(M, H, W, C)] or a padded tensor of shape (N, M, H, W, C). + For RGB, C = 3. In this case maps_ids must be provided to + identify which is relevant to each face. + faces_uvs: (N, F, 3) LongTensor giving the index into verts_uvs + for each face + verts_uvs: (N, V, 2) tensor giving the uv coordinates per vertex + (a FloatTensor with values between 0 and 1). + maps_ids: Used if there are to be multiple maps per face. This can be either a list of map_ids [(F,)] + or a long tensor of shape (N, F) giving the id of the texture map + for each face. If maps_ids is present, the maps has an extra dimension M + (so maps_padded is (N, M, H, W, C) and maps_list has elements of + shape (M, H, W, C)). + Specifically, the color + of a vertex V is given by an average of maps_padded[i, maps_ids[i, f], u, v, :] + over u and v integers adjacent to + _verts_uvs_padded[i, _faces_uvs_padded[i, f, 0], :] . + align_corners: If true, the extreme values 0 and 1 for verts_uvs + indicate the centers of the edge pixels in the maps. + padding_mode: padding mode for outside grid values + ("zeros", "border" or "reflection"). + sampling_mode: type of interpolation used to sample the texture. + Corresponds to the mode parameter in PyTorch's + grid_sample ("nearest" or "bilinear"). + + The align_corners and padding_mode arguments correspond to the arguments + of the `grid_sample` torch function. There is an informative illustration of + the two align_corners options at + https://discuss.pytorch.org/t/22663/9 . + + An example of how the indexing into the maps, with align_corners=True, + works is as follows. + If maps[i] has shape [1001, 101] and the value of verts_uvs[i][j] + is [0.4, 0.3], then a value of j in faces_uvs[i] means a vertex + whose color is given by maps[i][700, 40]. padding_mode affects what + happens if a value in verts_uvs is less than 0 or greater than 1. + Note that increasing a value in verts_uvs[..., 0] increases an index + in maps, whereas increasing a value in verts_uvs[..., 1] _decreases_ + an _earlier_ index in maps. + + If align_corners=False, an example would be as follows. + If maps[i] has shape [1000, 100] and the value of verts_uvs[i][j] + is [0.405, 0.2995], then a value of j in faces_uvs[i] means a vertex + whose color is given by maps[i][700, 40]. + When align_corners=False, padding_mode even matters for values in + verts_uvs slightly above 0 or slightly below 1. In this case, the + padding_mode matters if the first value is outside the interval + [0.0005, 0.9995] or if the second is outside the interval + [0.005, 0.995]. + """ + self.padding_mode = padding_mode + self.align_corners = align_corners + self.sampling_mode = sampling_mode + if isinstance(faces_uvs, (list, tuple)): + for fv in faces_uvs: + if fv.ndim != 2 or fv.shape[-1] != 3: + msg = "Expected faces_uvs to be of shape (F, 3); got %r" + raise ValueError(msg % repr(fv.shape)) + self._faces_uvs_list = faces_uvs + self._faces_uvs_padded = None + self.device = torch.device("cpu") + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(faces_uvs) + self._num_faces_per_mesh = [len(fv) for fv in faces_uvs] + + if self._N > 0: + self.device = faces_uvs[0].device + + elif torch.is_tensor(faces_uvs): + if faces_uvs.ndim != 3 or faces_uvs.shape[-1] != 3: + msg = "Expected faces_uvs to be of shape (N, F, 3); got %r" + raise ValueError(msg % repr(faces_uvs.shape)) + self._faces_uvs_padded = faces_uvs + self._faces_uvs_list = None + self.device = faces_uvs.device + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(faces_uvs) + max_F = faces_uvs.shape[1] + self._num_faces_per_mesh = [max_F] * self._N + else: + raise ValueError("Expected faces_uvs to be a tensor or list") + + if isinstance(verts_uvs, (list, tuple)): + for fv in verts_uvs: + if fv.ndim != 2 or fv.shape[-1] != 2: + msg = "Expected verts_uvs to be of shape (V, 2); got %r" + raise ValueError(msg % repr(fv.shape)) + self._verts_uvs_list = verts_uvs + self._verts_uvs_padded = None + + if len(verts_uvs) != self._N: + raise ValueError( + "verts_uvs and faces_uvs must have the same batch dimension" + ) + if not all(v.device == self.device for v in verts_uvs): + raise ValueError("verts_uvs and faces_uvs must be on the same device") + + elif torch.is_tensor(verts_uvs): + if ( + verts_uvs.ndim != 3 + or verts_uvs.shape[-1] != 2 + or verts_uvs.shape[0] != self._N + ): + msg = "Expected verts_uvs to be of shape (N, V, 2); got %r" + raise ValueError(msg % repr(verts_uvs.shape)) + self._verts_uvs_padded = verts_uvs + self._verts_uvs_list = None + + if verts_uvs.device != self.device: + raise ValueError("verts_uvs and faces_uvs must be on the same device") + else: + raise ValueError("Expected verts_uvs to be a tensor or list") + + self._maps_ids_padded, self._maps_ids_list = self._format_maps_ids(maps_ids) + + if isinstance(maps, (list, tuple)): + self._maps_list = maps + else: + self._maps_list = None + self._maps_padded = self._format_maps_padded(maps) + + if self._maps_padded.device != self.device: + raise ValueError("maps must be on the same device as verts/faces uvs.") + self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device) + + def _format_maps_ids( + self, + maps_ids: Optional[ + Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]] + ], + ) -> Tuple[ + Optional[torch.Tensor], Optional[Union[List[torch.Tensor], Tuple[torch.Tensor]]] + ]: + if maps_ids is None: + return None, None + elif isinstance(maps_ids, (list, tuple)): + for mid in maps_ids: + if mid.ndim != 1: + msg = "Expected maps_ids to be of shape (F,); got %r" + raise ValueError(msg % repr(mid.shape)) + if len(maps_ids) != self._N: + raise ValueError( + "map_ids, faces_uvs and verts_uvs must have the same batch dimension" + ) + if not all(mid.device == self.device for mid in maps_ids): + raise ValueError( + "maps_ids and verts/faces uvs must be on the same device" + ) + + if not all( + mid.shape[0] == nfm + for mid, nfm in zip(maps_ids, self._num_faces_per_mesh) + ): + raise ValueError( + "map_ids and faces_uvs must have the same number of faces per mesh" + ) + if not all(mid.device == self.device for mid in maps_ids): + raise ValueError( + "maps_ids and verts/faces uvs must be on the same device" + ) + if not self._num_faces_per_mesh: + return torch.Tensor(), maps_ids + return list_to_padded(maps_ids, pad_value=0), maps_ids + elif isinstance(maps_ids, torch.Tensor): + if maps_ids.ndim != 2 or maps_ids.shape[0] != self._N: + msg = "Expected maps_ids to be of shape (N, F); got %r" + raise ValueError(msg % repr(maps_ids.shape)) + maps_ids_padded = maps_ids + max_F = max(self._num_faces_per_mesh) + if not maps_ids.shape[1] == max_F: + raise ValueError( + "map_ids and faces_uvs must have the same number of faces per mesh" + ) + if maps_ids.device != self.device: + raise ValueError( + "maps_ids and verts/faces uvs must be on the same device" + ) + return maps_ids_padded, None + raise ValueError("Expected maps_ids to be a tensor or list") + + def _format_maps_padded( + self, maps: Union[torch.Tensor, List[torch.Tensor]] + ) -> torch.Tensor: + maps_ids_none = self._maps_ids_padded is None + if isinstance(maps, torch.Tensor): + if not maps_ids_none: + if maps.ndim != 5 or maps.shape[0] != self._N: + msg = "Expected maps to be of shape (N, M, H, W, C); got %r" + raise ValueError(msg % repr(maps.shape)) + elif maps.ndim != 4 or maps.shape[0] != self._N: + msg = "Expected maps to be of shape (N, H, W, C); got %r" + raise ValueError(msg % repr(maps.shape)) + return maps + + if isinstance(maps, (list, tuple)): + if len(maps) != self._N: + raise ValueError("Expected one texture map per mesh in the batch.") + if self._N > 0: + ndim = 3 if maps_ids_none else 4 + if not all(map.ndim == ndim for map in maps): + raise ValueError("Invalid number of dimensions in texture maps") + if not all(map.shape[-1] == maps[0].shape[-1] for map in maps): + raise ValueError("Inconsistent number of channels in maps") + maps_padded = ( + _pad_texture_maps(maps, align_corners=self.align_corners) + if maps_ids_none + else _pad_texture_multiple_maps( + maps, align_corners=self.align_corners + ) + ) + else: + if maps_ids_none: + maps_padded = torch.empty( + (self._N, 0, 0, 3), dtype=torch.float32, device=self.device + ) + else: + maps_padded = torch.empty( + (self._N, 0, 0, 0, 3), dtype=torch.float32, device=self.device + ) + return maps_padded + + raise ValueError("Expected maps to be a tensor or list of tensors.") + + def clone(self) -> "TexturesUV": + tex = self.__class__( + self.maps_padded().clone(), + self.faces_uvs_padded().clone(), + self.verts_uvs_padded().clone(), + maps_ids=( + self._maps_ids_padded.clone() + if self._maps_ids_padded is not None + else None + ), + align_corners=self.align_corners, + padding_mode=self.padding_mode, + sampling_mode=self.sampling_mode, + ) + if self._maps_list is not None: + tex._maps_list = [m.clone() for m in self._maps_list] + if self._verts_uvs_list is not None: + tex._verts_uvs_list = [v.clone() for v in self._verts_uvs_list] + if self._faces_uvs_list is not None: + tex._faces_uvs_list = [f.clone() for f in self._faces_uvs_list] + if self._maps_ids_list is not None: + tex._maps_ids_list = [f.clone() for f in self._maps_ids_list] + num_faces = ( + self._num_faces_per_mesh.clone() + if torch.is_tensor(self._num_faces_per_mesh) + else self._num_faces_per_mesh + ) + tex._num_faces_per_mesh = num_faces + tex.valid = self.valid.clone() + return tex + + def detach(self) -> "TexturesUV": + tex = self.__class__( + self.maps_padded().detach(), + self.faces_uvs_padded().detach(), + self.verts_uvs_padded().detach(), + maps_ids=( + self._maps_ids_padded.detach() + if self._maps_ids_padded is not None + else None + ), + align_corners=self.align_corners, + padding_mode=self.padding_mode, + sampling_mode=self.sampling_mode, + ) + if self._maps_list is not None: + tex._maps_list = [m.detach() for m in self._maps_list] + if self._verts_uvs_list is not None: + tex._verts_uvs_list = [v.detach() for v in self._verts_uvs_list] + if self._faces_uvs_list is not None: + tex._faces_uvs_list = [f.detach() for f in self._faces_uvs_list] + if self._maps_ids_list is not None: + tex._maps_ids_list = [mi.detach() for mi in self._maps_ids_list] + num_faces = ( + self._num_faces_per_mesh.detach() + if torch.is_tensor(self._num_faces_per_mesh) + else self._num_faces_per_mesh + ) + tex._num_faces_per_mesh = num_faces + tex.valid = self.valid.detach() + return tex + + def __getitem__(self, index) -> "TexturesUV": + props = [ + "faces_uvs_list", + "verts_uvs_list", + "maps_list", + "maps_ids_list", + "_num_faces_per_mesh", + ] + new_props = self._getitem(index, props) + faces_uvs = new_props["faces_uvs_list"] + verts_uvs = new_props["verts_uvs_list"] + maps = new_props["maps_list"] + maps_ids = new_props["maps_ids_list"] + + # if index has multiple values then faces/verts/maps may be a list of tensors + if all(isinstance(f, (list, tuple)) for f in [faces_uvs, verts_uvs, maps]): + if maps_ids is not None and not isinstance(maps_ids, (list, tuple)): + raise ValueError( + "Maps ids are not in the correct format expected list or tuple" + ) + new_tex = self.__class__( + faces_uvs=faces_uvs, + verts_uvs=verts_uvs, + maps=maps, + maps_ids=maps_ids, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + sampling_mode=self.sampling_mode, + ) + elif all(torch.is_tensor(f) for f in [faces_uvs, verts_uvs, maps]): + if maps_ids is not None and not torch.is_tensor(maps_ids): + raise ValueError( + "Maps ids are not in the correct format expected tensor" + ) + new_tex = self.__class__( + faces_uvs=[faces_uvs], + verts_uvs=[verts_uvs], + maps=[maps], + maps_ids=[maps_ids] if maps_ids is not None else None, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + sampling_mode=self.sampling_mode, + ) + else: + raise ValueError("Not all values are provided in the correct format") + new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"] + return new_tex + + def faces_uvs_padded(self) -> torch.Tensor: + if self._faces_uvs_padded is None: + if self.isempty(): + self._faces_uvs_padded = torch.zeros( + (self._N, 0, 3), dtype=torch.float32, device=self.device + ) + else: + self._faces_uvs_padded = list_to_padded( + self._faces_uvs_list, pad_value=0.0 + ) + return self._faces_uvs_padded + + def faces_uvs_list(self) -> List[torch.Tensor]: + if self._faces_uvs_list is None: + if self.isempty(): + self._faces_uvs_list = [ + torch.empty((0, 3), dtype=torch.float32, device=self.device) + ] * self._N + else: + self._faces_uvs_list = padded_to_list( + self._faces_uvs_padded, split_size=self._num_faces_per_mesh + ) + return self._faces_uvs_list + + def verts_uvs_padded(self) -> torch.Tensor: + if self._verts_uvs_padded is None: + if self.isempty(): + self._verts_uvs_padded = torch.zeros( + (self._N, 0, 2), dtype=torch.float32, device=self.device + ) + else: + self._verts_uvs_padded = list_to_padded( + self._verts_uvs_list, pad_value=0.0 + ) + return self._verts_uvs_padded + + def verts_uvs_list(self) -> List[torch.Tensor]: + if self._verts_uvs_list is None: + if self.isempty(): + self._verts_uvs_list = [ + torch.empty((0, 2), dtype=torch.float32, device=self.device) + ] * self._N + else: + # The number of vertices in the mesh and in verts_uvs can differ + # e.g. if a vertex is shared between 3 faces, it can + # have up to 3 different uv coordinates. + self._verts_uvs_list = list(self._verts_uvs_padded.unbind(0)) + return self._verts_uvs_list + + def maps_ids_padded(self) -> Optional[torch.Tensor]: + return self._maps_ids_padded + + def maps_ids_list(self) -> Optional[List[torch.Tensor]]: + if self._maps_ids_list is not None: + return self._maps_ids_list + elif self._maps_ids_padded is not None: + return self._maps_ids_padded.unbind(0) + else: + return None + + # Currently only the padded maps are used. + def maps_padded(self) -> torch.Tensor: + return self._maps_padded + + def maps_list(self) -> List[torch.Tensor]: + if self._maps_list is not None: + return self._maps_list + return self._maps_padded.unbind(0) + + def extend(self, N: int) -> "TexturesUV": + new_props = self._extend( + N, + [ + "maps_padded", + "verts_uvs_padded", + "faces_uvs_padded", + "maps_ids_padded", + "_num_faces_per_mesh", + ], + ) + new_tex = self.__class__( + maps=new_props["maps_padded"], + faces_uvs=new_props["faces_uvs_padded"], + verts_uvs=new_props["verts_uvs_padded"], + maps_ids=new_props["maps_ids_padded"], + padding_mode=self.padding_mode, + align_corners=self.align_corners, + sampling_mode=self.sampling_mode, + ) + + new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"] + return new_tex + + # pyre-fixme[14]: `sample_textures` overrides method defined in `TexturesBase` + # inconsistently. + def sample_textures(self, fragments, **kwargs) -> torch.Tensor: + """ + Interpolate a 2D texture map using uv vertex texture coordinates for each + face in the mesh. First interpolate the vertex uvs using barycentric coordinates + for each pixel in the rasterized output. Then interpolate the texture map + using the uv coordinate for each pixel. + + Args: + fragments: + The outputs of rasterization. From this we use + + - pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. + - barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying + the barycentric coordinates of each pixel + relative to the faces (in the packed + representation) which overlap the pixel. + + Returns: + texels: tensor of shape (N, H, W, K, C) giving the interpolated + texture for each pixel in the rasterized image. + """ + if self.isempty(): + faces_verts_uvs = torch.zeros( + (self._N, 3, 2), dtype=torch.float32, device=self.device + ) + else: + packing_list = [ + i[j] for i, j in zip(self.verts_uvs_list(), self.faces_uvs_list()) + ] + faces_verts_uvs = torch.cat(packing_list) + + # pixel_uvs: (N, H, W, K, 2) + pixel_uvs = interpolate_face_attributes( + fragments.pix_to_face, fragments.bary_coords, faces_verts_uvs + ) + + N, H_out, W_out, K = fragments.pix_to_face.shape + + texture_maps = self.maps_padded() + maps_ids_padded = self.maps_ids_padded() + if maps_ids_padded is None: + # pixel_uvs: (N, H, W, K, 2) -> (N, K, H, W, 2) -> (NK, H, W, 2) + pixel_uvs = pixel_uvs.permute(0, 3, 1, 2, 4).reshape(N * K, H_out, W_out, 2) + N, H_in, W_in, C = texture_maps.shape # 3 for RGB + + # textures.map: + # (N, H, W, C) -> (N, C, H, W) -> (1, N, C, H, W) + # -> expand (K, N, C, H, W) -> reshape (N*K, C, H, W) + texture_maps = ( + texture_maps.permute(0, 3, 1, 2)[None, ...] + .expand(K, -1, -1, -1, -1) + .transpose(0, 1) + .reshape(N * K, C, H_in, W_in) + ) + # Textures: (N*K, C, H, W), pixel_uvs: (N*K, H, W, 2) + # Now need to format the pixel uvs and the texture map correctly! + # From pytorch docs, grid_sample takes `grid` and `input`: + # grid specifies the sampling pixel locations normalized by + # the input spatial dimensions It should have most + # values in the range of [-1, 1]. Values x = -1, y = -1 + # is the left-top pixel of input, and values x = 1, y = 1 is the + # right-bottom pixel of input. + + # map to a range of [-1, 1] and flip the y axis + pixel_uvs = torch.lerp( + pixel_uvs.new_tensor([-1.0, 1.0]), + pixel_uvs.new_tensor([1.0, -1.0]), + pixel_uvs, + ) + + if texture_maps.device != pixel_uvs.device: + texture_maps = texture_maps.to(pixel_uvs.device) + texels = F.grid_sample( + texture_maps, + pixel_uvs, + mode=self.sampling_mode, + align_corners=self.align_corners, + padding_mode=self.padding_mode, + ) + # texels now has shape (NK, C, H_out, W_out) + texels = texels.reshape(N, K, C, H_out, W_out).permute(0, 3, 4, 1, 2) + return texels + else: + # We have maps_ids_padded: (N, F), textures_map: (N, M, Hi, Wi, C),fragmenmts.pix_to_face: (N, Ho, Wo, K) + # Get pixel_to_map_ids: (N, K, Ho, Wo) by indexing pix_to_face into maps_ids + N, M, H_in, W_in, C = texture_maps.shape # 3 for RGB + + mask = fragments.pix_to_face < 0 + pix_to_face = fragments.pix_to_face.clone() + pix_to_face[mask] = 0 + + pixel_to_map_ids = ( + maps_ids_padded.flatten() + .gather(0, pix_to_face.flatten()) + .view(N, K, H_out, W_out) + ) + + # Normalize between -1 and 1 with M (number of maps) + pixel_to_map_ids = (2.0 * pixel_to_map_ids.float() / float(M - 1)) - 1 + pixel_uvs = pixel_uvs.permute(0, 3, 1, 2, 4) + pixel_uvs = torch.lerp( + pixel_uvs.new_tensor([-1.0, 1.0]), + pixel_uvs.new_tensor([1.0, -1.0]), + pixel_uvs, + ) + + # N x H_out x W_out x K x 3 + pixel_uvms = torch.cat((pixel_uvs, pixel_to_map_ids.unsqueeze(4)), dim=4) + # (N, M, H, W, C) -> (N, C, M, H, W) + texture_maps = texture_maps.permute(0, 4, 1, 2, 3) + if texture_maps.device != pixel_uvs.device: + texture_maps = texture_maps.to(pixel_uvs.device) + texels = F.grid_sample( + texture_maps, + pixel_uvms, + mode=self.sampling_mode, + align_corners=self.align_corners, + padding_mode=self.padding_mode, + ) + # (N, C, K, H_out, W_out) -> (N, H_out, W_out, K, C) + texels = texels.permute(0, 3, 4, 2, 1).contiguous() + return texels + + def faces_verts_textures_packed(self) -> torch.Tensor: + """ + Samples texture from each vertex and for each face in the mesh. + For N meshes with {Fi} number of faces, it returns a + tensor of shape sum(Fi)x3xC (C = 3 for RGB). + You can use the utils function in structures.utils to convert the + packed representation to a list or padded. + """ + if self.isempty(): + return torch.zeros( + (0, 3, self.maps_padded().shape[-1]), + dtype=torch.float32, + device=self.device, + ) + else: + packing_list = [ + i[j] for i, j in zip(self.verts_uvs_list(), self.faces_uvs_list()) + ] + faces_verts_uvs = _list_to_padded_wrapper( + packing_list, pad_value=0.0 + ) # Nxmax(Fi)x3x2 + # map to a range of [-1, 1] and flip the y axis + faces_verts_uvs = torch.lerp( + faces_verts_uvs.new_tensor([-1.0, 1.0]), + faces_verts_uvs.new_tensor([1.0, -1.0]), + faces_verts_uvs, + ) + texture_maps = self.maps_padded() # NxHxWxC or NxMxHxWxC + maps_ids_padded = self.maps_ids_padded() + if maps_ids_padded is None: + texture_maps = texture_maps.permute(0, 3, 1, 2) # NxCxHxW + else: + M = texture_maps.shape[1] + # (N, M, H, W, C) -> (N, C, M, H, W) + texture_maps = texture_maps.permute(0, 4, 1, 2, 3) + # expand maps_ids to (N, F, 3, 1) + maps_ids_padded = maps_ids_padded[:, :, None, None].expand(-1, -1, 3, -1) + maps_ids_padded = (2.0 * maps_ids_padded.float() / float(M - 1)) - 1.0 + + # (N, F, 3, 2+1) -> (N, 1, F, 3, 3) + faces_verts_uvs = torch.cat( + (faces_verts_uvs, maps_ids_padded), dim=3 + ).unsqueeze(1) + # (N, M, H, W, C) -> (N, C, H, W, M) + # texture_maps = texture_maps.permute(0, 4, 2, 3, 1) + textures = F.grid_sample( + texture_maps, + faces_verts_uvs, + mode=self.sampling_mode, + align_corners=self.align_corners, + padding_mode=self.padding_mode, + ) # (N, C, max(Fi), 3) + if maps_ids_padded is not None: + textures = textures.squeeze(dim=2) + # (N, C, max(Fi), 3) -> (N, max(Fi), 3, C) + textures = textures.permute(0, 2, 3, 1) + textures = _padded_to_list_wrapper( + textures, split_size=self._num_faces_per_mesh + ) # list of N {Fix3xC} tensors + return list_to_packed(textures)[0] + + def join_batch(self, textures: List["TexturesUV"]) -> "TexturesUV": + """ + Join the list of textures given by `textures` to + self to create a batch of textures. Return a new + TexturesUV object with the combined textures. + + Args: + textures: List of TexturesUV objects + + Returns: + new_tex: TexturesUV object with the combined + textures from self and the list `textures`. + """ + if self.maps_ids_padded() is not None: + # TODO + raise NotImplementedError( + "join_batch does not support TexturesUV with multiple maps" + ) + tex_types_same = all(isinstance(tex, TexturesUV) for tex in textures) + if not tex_types_same: + raise ValueError("All textures must be of type TexturesUV.") + + padding_modes_same = all( + tex.padding_mode == self.padding_mode for tex in textures + ) + if not padding_modes_same: + raise ValueError("All textures must have the same padding_mode.") + align_corners_same = all( + tex.align_corners == self.align_corners for tex in textures + ) + if not align_corners_same: + raise ValueError("All textures must have the same align_corners value.") + sampling_mode_same = all( + tex.sampling_mode == self.sampling_mode for tex in textures + ) + if not sampling_mode_same: + raise ValueError("All textures must have the same sampling_mode.") + + verts_uvs_list = [] + faces_uvs_list = [] + maps_list = [] + faces_uvs_list += self.faces_uvs_list() + verts_uvs_list += self.verts_uvs_list() + maps_list += self.maps_list() + num_faces_per_mesh = self._num_faces_per_mesh.copy() + for tex in textures: + verts_uvs_list += tex.verts_uvs_list() + faces_uvs_list += tex.faces_uvs_list() + num_faces_per_mesh += tex._num_faces_per_mesh + maps_list += tex.maps_list() + + new_tex = self.__class__( + maps=maps_list, + faces_uvs=faces_uvs_list, + verts_uvs=verts_uvs_list, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + sampling_mode=self.sampling_mode, + ) + new_tex._num_faces_per_mesh = num_faces_per_mesh + return new_tex + + def _place_map_into_single_map( + self, single_map: torch.Tensor, map_: torch.Tensor, location: PackedRectangle + ) -> None: + """ + Copy map into a larger tensor single_map at the destination specified by location. + If align_corners is False, we add the needed border around the destination. + + Used by join_scene. + + Args: + single_map: (total_H, total_W, C) + map_: (H, W, C) source data + location: where to place map + """ + do_flip = location.flipped + source = map_.transpose(0, 1) if do_flip else map_ + border_width = 0 if self.align_corners else 1 + lower_u = location.x + border_width + lower_v = location.y + border_width + upper_u = lower_u + source.shape[0] + upper_v = lower_v + source.shape[1] + single_map[lower_u:upper_u, lower_v:upper_v] = source + + if self.padding_mode != "zeros" and not self.align_corners: + single_map[lower_u - 1, lower_v:upper_v] = single_map[ + lower_u, lower_v:upper_v + ] + single_map[upper_u, lower_v:upper_v] = single_map[ + upper_u - 1, lower_v:upper_v + ] + single_map[lower_u:upper_u, lower_v - 1] = single_map[ + lower_u:upper_u, lower_v + ] + single_map[lower_u:upper_u, upper_v] = single_map[ + lower_u:upper_u, upper_v - 1 + ] + single_map[lower_u - 1, lower_v - 1] = single_map[lower_u, lower_v] + single_map[lower_u - 1, upper_v] = single_map[lower_u, upper_v - 1] + single_map[upper_u, lower_v - 1] = single_map[upper_u - 1, lower_v] + single_map[upper_u, upper_v] = single_map[upper_u - 1, upper_v - 1] + + def join_scene(self) -> "TexturesUV": + """ + Return a new TexturesUV amalgamating the batch. + + We calculate a large single map which contains the original maps, + and find verts_uvs to point into it. This will not replicate + behavior of padding for verts_uvs values outside [0,1]. + + If align_corners=False, we need to add an artificial border around + every map. + + We use the function `pack_unique_rectangles` to provide a layout for + the single map. This means that if self was created with a list of maps, + and to() has not been called, and there were two maps which were exactly + the same tensor object, then they will become the same data in the unified map. + _place_map_into_single_map is used to copy the maps into the single map. + The merging of verts_uvs and faces_uvs is handled locally in this function. + """ + if self.maps_ids_padded() is not None: + # TODO + raise NotImplementedError("join_scene does not support multiple maps.") + maps = self.maps_list() + heights_and_widths = [] + extra_border = 0 if self.align_corners else 2 + for map_ in maps: + heights_and_widths.append( + Rectangle( + map_.shape[0] + extra_border, map_.shape[1] + extra_border, id(map_) + ) + ) + merging_plan = pack_unique_rectangles(heights_and_widths) + C = maps[0].shape[-1] + single_map = maps[0].new_zeros((*merging_plan.total_size, C)) + verts_uvs = self.verts_uvs_list() + verts_uvs_merged = [] + + for map_, loc, uvs in zip(maps, merging_plan.locations, verts_uvs): + new_uvs = uvs.clone() + if loc.is_first: + self._place_map_into_single_map(single_map, map_, loc) + do_flip = loc.flipped + x_shape = map_.shape[1] if do_flip else map_.shape[0] + y_shape = map_.shape[0] if do_flip else map_.shape[1] + + if do_flip: + # Here we have flipped / transposed the map. + # In uvs, the y values are decreasing from 1 to 0 and the x + # values increase from 0 to 1. We subtract all values from 1 + # as the x's become y's and the y's become x's. + new_uvs = 1.0 - new_uvs[:, [1, 0]] + if TYPE_CHECKING: + new_uvs = torch.Tensor(new_uvs) + + # If align_corners is True, then an index of x (where x is in + # the range 0 .. map_.shape[1]-1) in one of the input maps + # was hit by a u of x/(map_.shape[1]-1). + # That x is located at the index loc[1] + x in the single_map, and + # to hit that we need u to equal (loc[1] + x) / (total_size[1]-1) + # so the old u should be mapped to + # { u*(map_.shape[1]-1) + loc[1] } / (total_size[1]-1) + + # Also, an index of y (where y is in + # the range 0 .. map_.shape[0]-1) in one of the input maps + # was hit by a v of 1 - y/(map_.shape[0]-1). + # That y is located at the index loc[0] + y in the single_map, and + # to hit that we need v to equal 1 - (loc[0] + y) / (total_size[0]-1) + # so the old v should be mapped to + # 1 - { (1-v)*(map_.shape[0]-1) + loc[0] } / (total_size[0]-1) + # = + # { v*(map_.shape[0]-1) + total_size[0] - map.shape[0] - loc[0] } + # / (total_size[0]-1) + + # If align_corners is False, then an index of x (where x is in + # the range 1 .. map_.shape[1]-2) in one of the input maps + # was hit by a u of (x+0.5)/(map_.shape[1]). + # That x is located at the index loc[1] + 1 + x in the single_map, + # (where the 1 is for the border) + # and to hit that we need u to equal (loc[1] + 1 + x + 0.5) / (total_size[1]) + # so the old u should be mapped to + # { loc[1] + 1 + u*map_.shape[1]-0.5 + 0.5 } / (total_size[1]) + # = { loc[1] + 1 + u*map_.shape[1] } / (total_size[1]) + + # Also, an index of y (where y is in + # the range 1 .. map_.shape[0]-2) in one of the input maps + # was hit by a v of 1 - (y+0.5)/(map_.shape[0]). + # That y is located at the index loc[0] + 1 + y in the single_map, + # (where the 1 is for the border) + # and to hit that we need v to equal 1 - (loc[0] + 1 + y + 0.5) / (total_size[0]) + # so the old v should be mapped to + # 1 - { loc[0] + 1 + (1-v)*map_.shape[0]-0.5 + 0.5 } / (total_size[0]) + # = { total_size[0] - loc[0] -1 - (1-v)*map_.shape[0] } + # / (total_size[0]) + # = { total_size[0] - loc[0] - map.shape[0] - 1 + v*map_.shape[0] } + # / (total_size[0]) + + # We change the y's in new_uvs for the scaling of height, + # and the x's for the scaling of width. + # That is why the 1's and 0's are mismatched in these lines. + one_if_align = 1 if self.align_corners else 0 + one_if_not_align = 1 - one_if_align + denom_x = merging_plan.total_size[0] - one_if_align + scale_x = x_shape - one_if_align + denom_y = merging_plan.total_size[1] - one_if_align + scale_y = y_shape - one_if_align + new_uvs[:, 1] *= scale_x / denom_x + new_uvs[:, 1] += ( + merging_plan.total_size[0] - x_shape - loc.x - one_if_not_align + ) / denom_x + new_uvs[:, 0] *= scale_y / denom_y + new_uvs[:, 0] += (loc.y + one_if_not_align) / denom_y + + verts_uvs_merged.append(new_uvs) + + faces_uvs_merged = [] + offset = 0 + for faces_uvs_, verts_uvs_ in zip(self.faces_uvs_list(), verts_uvs): + faces_uvs_merged.append(offset + faces_uvs_) + offset += verts_uvs_.shape[0] + + return self.__class__( + maps=[single_map], + faces_uvs=[torch.cat(faces_uvs_merged)], + verts_uvs=[torch.cat(verts_uvs_merged)], + align_corners=self.align_corners, + padding_mode=self.padding_mode, + sampling_mode=self.sampling_mode, + ) + + def centers_for_image(self, index: int) -> torch.Tensor: + """ + Return the locations in the texture map which correspond to the given + verts_uvs, for one of the meshes. This is potentially useful for + visualizing the data. See the texturesuv_image_matplotlib and + texturesuv_image_PIL functions. + + Args: + index: batch index of the mesh whose centers to return. + + Returns: + centers: coordinates of points in the texture image + - a FloatTensor of shape (V,2) + """ + if self.maps_ids_padded() is not None: + # TODO: invent a visualization for the multiple maps case + raise NotImplementedError("This function does not support multiple maps.") + if self._N != 1: + raise ValueError( + "This function only supports plotting textures for one mesh." + ) + texture_image = self.maps_padded() + verts_uvs = self.verts_uvs_list()[index][None] + _, H, W, _3 = texture_image.shape + coord1 = torch.arange(W).expand(H, W) + coord2 = torch.arange(H)[:, None].expand(H, W) + coords = torch.stack([coord1, coord2])[None] + with torch.no_grad(): + # Get xy cartesian coordinates based on the uv coordinates + centers = F.grid_sample( + torch.flip(coords.to(texture_image), [2]), + # Convert from [0, 1] -> [-1, 1] range expected by grid sample + verts_uvs[:, None] * 2.0 - 1, + mode=self.sampling_mode, + align_corners=self.align_corners, + padding_mode=self.padding_mode, + ).cpu() + centers = centers[0, :, 0].T + return centers + + def check_shapes( + self, batch_size: int, max_num_verts: int, max_num_faces: int + ) -> bool: + """ + Check if the dimensions of the verts/faces uvs match that of the mesh + """ + # (N, F) should be the same + # (N, V) is not guaranteed to be the same + return (self.faces_uvs_padded().shape[0:2] == (batch_size, max_num_faces)) and ( + self.verts_uvs_padded().shape[0] == batch_size + ) + + def submeshes( + self, + vertex_ids_list: List[List[torch.LongTensor]], + faces_ids_list: List[List[torch.LongTensor]], + ) -> "TexturesUV": + """ + Extract a sub-texture for use in a submesh. + + If the meshes batch corresponding to this TexturesUV contains + `n = len(faces_ids_list)` meshes, then self.faces_uvs_padded() + will be of length n. After submeshing, we obtain a batch of + `k = sum(len(f) for f in faces_ids_list` submeshes (see Meshes.submeshes). This + function creates a corresponding TexturesUV object with `faces_uvs_padded` + of length `k`. + + Args: + vertex_ids_list: Not used when submeshing TexturesUV. + + face_ids_list: A list of length equal to self.faces_uvs_padded. Each + element is a LongTensor listing the face ids that the submesh keeps in + each respective mesh. + + + Returns: + A "TexturesUV in which faces_uvs_padded, verts_uvs_padded, and maps_padded + have length sum(len(faces) for faces in faces_ids_list) + """ + if self.maps_ids_padded() is not None: + # TODO + raise NotImplementedError("This function does not support multiple maps.") + if len(faces_ids_list) != len(self.faces_uvs_padded()): + raise IndexError( + "faces_uvs_padded must be of " "the same length as face_ids_list." + ) + + sub_faces_uvs, sub_verts_uvs, sub_maps = [], [], [] + for faces_ids, faces_uvs, verts_uvs, map_ in zip( + faces_ids_list, + self.faces_uvs_padded(), + self.verts_uvs_padded(), + self.maps_padded(), + ): + for faces_ids_submesh in faces_ids: + sub_faces_uvs.append(faces_uvs[faces_ids_submesh]) + sub_verts_uvs.append(verts_uvs) + sub_maps.append(map_) + + return self.__class__( + maps=sub_maps, + faces_uvs=sub_faces_uvs, + verts_uvs=sub_verts_uvs, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + sampling_mode=self.sampling_mode, + ) + + +class TexturesVertex(TexturesBase): + def __init__( + self, + verts_features: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]], + ) -> None: + """ + Batched texture representation where each vertex in a mesh + has a C dimensional feature vector. + + Args: + verts_features: list of (Vi, C) or (N, V, C) tensor giving a feature + vector with arbitrary dimensions for each vertex. + """ + if isinstance(verts_features, (tuple, list)): + correct_shape = all( + (torch.is_tensor(v) and v.ndim == 2) for v in verts_features + ) + if not correct_shape: + raise ValueError( + "Expected verts_features to be a list of tensors of shape (V, C)." + ) + + self._verts_features_list = verts_features + self._verts_features_padded = None + self.device = torch.device("cpu") + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(verts_features) + self._num_verts_per_mesh = [len(fv) for fv in verts_features] + + if self._N > 0: + self.device = verts_features[0].device + + elif torch.is_tensor(verts_features): + if verts_features.ndim != 3: + msg = "Expected verts_features to be of shape (N, V, C); got %r" + raise ValueError(msg % repr(verts_features.shape)) + self._verts_features_padded = verts_features + self._verts_features_list = None + self.device = verts_features.device + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(verts_features) + max_F = verts_features.shape[1] + self._num_verts_per_mesh = [max_F] * self._N + else: + raise ValueError("verts_features must be a tensor or list of tensors") + + # This is set inside the Meshes object when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device) + + def clone(self) -> "TexturesVertex": + tex = self.__class__(self.verts_features_padded().clone()) + if self._verts_features_list is not None: + tex._verts_features_list = [f.clone() for f in self._verts_features_list] + tex._num_verts_per_mesh = self._num_verts_per_mesh.copy() + tex.valid = self.valid.clone() + return tex + + def detach(self) -> "TexturesVertex": + tex = self.__class__(self.verts_features_padded().detach()) + if self._verts_features_list is not None: + tex._verts_features_list = [f.detach() for f in self._verts_features_list] + tex._num_verts_per_mesh = self._num_verts_per_mesh.copy() + tex.valid = self.valid.detach() + return tex + + def __getitem__(self, index) -> "TexturesVertex": + props = ["verts_features_list", "_num_verts_per_mesh"] + new_props = self._getitem(index, props) + verts_features = new_props["verts_features_list"] + if isinstance(verts_features, list): + # Handle the case of an empty list + if len(verts_features) == 0: + verts_features = torch.empty( + size=(0, 0, 3), + dtype=torch.float32, + device=self.verts_features_padded().device, + ) + new_tex = self.__class__(verts_features=verts_features) + elif torch.is_tensor(verts_features): + new_tex = self.__class__(verts_features=[verts_features]) + else: + raise ValueError("Not all values are provided in the correct format") + new_tex._num_verts_per_mesh = new_props["_num_verts_per_mesh"] + return new_tex + + def verts_features_padded(self) -> torch.Tensor: + if self._verts_features_padded is None: + if self.isempty(): + self._verts_features_padded = torch.zeros( + (self._N, 0, 3, 0), dtype=torch.float32, device=self.device + ) + else: + self._verts_features_padded = list_to_padded( + self._verts_features_list, pad_value=0.0 + ) + return self._verts_features_padded + + def verts_features_list(self) -> List[torch.Tensor]: + if self._verts_features_list is None: + if self.isempty(): + self._verts_features_list = [ + torch.empty((0, 3), dtype=torch.float32, device=self.device) + ] * self._N + else: + self._verts_features_list = padded_to_list( + self._verts_features_padded, split_size=self._num_verts_per_mesh + ) + return self._verts_features_list + + def verts_features_packed(self) -> torch.Tensor: + if self.isempty(): + return torch.zeros((self._N, 3, 0), dtype=torch.float32, device=self.device) + verts_features_list = self.verts_features_list() + return list_to_packed(verts_features_list)[0] + + def extend(self, N: int) -> "TexturesVertex": + new_props = self._extend(N, ["verts_features_padded", "_num_verts_per_mesh"]) + new_tex = self.__class__(verts_features=new_props["verts_features_padded"]) + new_tex._num_verts_per_mesh = new_props["_num_verts_per_mesh"] + return new_tex + + # pyre-fixme[14]: `sample_textures` overrides method defined in `TexturesBase` + # inconsistently. + def sample_textures(self, fragments, faces_packed=None) -> torch.Tensor: + """ + Determine the color for each rasterized face. Interpolate the colors for + vertices which form the face using the barycentric coordinates. + Args: + fragments: + The outputs of rasterization. From this we use + + - pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. + - barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying + the barycentric coordinates of each pixel + relative to the faces (in the packed + representation) which overlap the pixel. + + Returns: + texels: An texture per pixel of shape (N, H, W, K, C). + There will be one C dimensional value for each element in + fragments.pix_to_face. + """ + verts_features_packed = self.verts_features_packed() + faces_verts_features = verts_features_packed[faces_packed] + + texels = interpolate_face_attributes( + fragments.pix_to_face, fragments.bary_coords, faces_verts_features + ) + return texels + + def submeshes( + self, + vertex_ids_list: List[List[torch.LongTensor]], + faces_ids_list: List[List[torch.LongTensor]], + ) -> "TexturesVertex": + """ + Extract a sub-texture for use in a submesh. + + If the meshes batch corresponding to this TexturesVertex contains + `n = len(vertex_ids_list)` meshes, then self.verts_features_list() + will be of length n. After submeshing, we obtain a batch of + `k = sum(len(v) for v in vertex_ids_list` submeshes (see Meshes.submeshes). This + function creates a corresponding TexturesVertex object with `verts_features_list` + of length `k`. + + Args: + vertex_ids_list: A list of length equal to self.verts_features_list. Each + element is a LongTensor listing the vertices that the submesh keeps in + each respective mesh. + + face_ids_list: Not used when submeshing TexturesVertex. + + Returns: + A TexturesVertex in which verts_features_list has length + sum(len(vertices) for vertices in vertex_ids_list). Each element contains + vertex features corresponding to the subset of vertices in that submesh. + """ + if len(vertex_ids_list) != len(self.verts_features_list()): + raise IndexError( + "verts_features_list must be of " "the same length as vertex_ids_list." + ) + + sub_features = [] + for vertex_ids, features in zip(vertex_ids_list, self.verts_features_list()): + for vertex_ids_submesh in vertex_ids: + sub_features.append(features[vertex_ids_submesh]) + + return self.__class__(sub_features) + + def faces_verts_textures_packed(self, faces_packed=None) -> torch.Tensor: + """ + Samples texture from each vertex and for each face in the mesh. + For N meshes with {Fi} number of faces, it returns a + tensor of shape sum(Fi)x3xC (C = 3 for RGB). + You can use the utils function in structures.utils to convert the + packed representation to a list or padded. + """ + verts_features_packed = self.verts_features_packed() + faces_verts_features = verts_features_packed[faces_packed] + return faces_verts_features + + def join_batch(self, textures: List["TexturesVertex"]) -> "TexturesVertex": + """ + Join the list of textures given by `textures` to + self to create a batch of textures. Return a new + TexturesVertex object with the combined textures. + + Args: + textures: List of TexturesVertex objects + + Returns: + new_tex: TexturesVertex object with the combined + textures from self and the list `textures`. + """ + tex_types_same = all(isinstance(tex, TexturesVertex) for tex in textures) + if not tex_types_same: + raise ValueError("All textures must be of type TexturesVertex.") + + verts_features_list = [] + verts_features_list += self.verts_features_list() + num_verts_per_mesh = self._num_verts_per_mesh.copy() + for tex in textures: + verts_features_list += tex.verts_features_list() + num_verts_per_mesh += tex._num_verts_per_mesh + + new_tex = self.__class__(verts_features=verts_features_list) + new_tex._num_verts_per_mesh = num_verts_per_mesh + return new_tex + + def join_scene(self) -> "TexturesVertex": + """ + Return a new TexturesVertex amalgamating the batch. + """ + return self.__class__(verts_features=[torch.cat(self.verts_features_list())]) + + def check_shapes( + self, batch_size: int, max_num_verts: int, max_num_faces: int + ) -> bool: + """ + Check if the dimensions of the verts features match that of the mesh verts + """ + # (N, V) should be the same + return self.verts_features_padded().shape[:-1] == (batch_size, max_num_verts) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..73b91c495b0b94a8d70184e526d67ca64ad6db64 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/mesh/utils.py @@ -0,0 +1,320 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import List, NamedTuple, Tuple + +import torch +from pytorch3d.ops import interpolate_face_attributes + + +def _clip_barycentric_coordinates(bary) -> torch.Tensor: + """ + Args: + bary: barycentric coordinates of shape (...., 3) where `...` represents + an arbitrary number of dimensions + + Returns: + bary: Barycentric coordinates clipped (i.e any values < 0 are set to 0) + and renormalized. We only clip the negative values. Values > 1 will fall + into the [0, 1] range after renormalization. + The output is the same shape as the input. + """ + if bary.shape[-1] != 3: + msg = "Expected barycentric coords to have last dim = 3; got %r" + raise ValueError(msg % (bary.shape,)) + ndims = bary.ndim - 1 + mask = bary.eq(-1).all(dim=-1, keepdim=True).expand(*((-1,) * ndims + (3,))) + clipped = bary.clamp(min=0.0) + clipped[mask] = 0.0 + clipped_sum = torch.clamp(clipped.sum(dim=-1, keepdim=True), min=1e-5) + clipped = clipped / clipped_sum + clipped[mask] = -1.0 + return clipped + + +def _interpolate_zbuf( + pix_to_face: torch.Tensor, barycentric_coords: torch.Tensor, meshes +) -> torch.Tensor: + """ + A helper function to calculate the z buffer for each pixel in the + rasterized output. + + Args: + pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. + barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying + the barycentric coordinates of each pixel + relative to the faces (in the packed + representation) which overlap the pixel. + meshes: Meshes object representing a batch of meshes. + + Returns: + zbuffer: (N, H, W, K) FloatTensor + """ + verts = meshes.verts_packed() + faces = meshes.faces_packed() + faces_verts_z = verts[faces][..., 2][..., None] # (F, 3, 1) + zbuf = interpolate_face_attributes(pix_to_face, barycentric_coords, faces_verts_z)[ + ..., 0 + ] # (1, H, W, K) + zbuf[pix_to_face == -1] = -1 + return zbuf + + +# ----------- Rectangle Packing -------------------- # + + +class Rectangle(NamedTuple): + xsize: int + ysize: int + identifier: int + + +class PackedRectangle(NamedTuple): + x: int + y: int + flipped: bool + is_first: bool + + +class PackedRectangles(NamedTuple): + total_size: Tuple[int, int] + locations: List[PackedRectangle] + + +# Note the order of members matters here because it determines the queue order. +# We want to place longer rectangles first. +class _UnplacedRectangle(NamedTuple): + size: Tuple[int, int] + ind: int + flipped: bool + + +def _try_place_rectangle( + rect: _UnplacedRectangle, + placed_so_far: List[PackedRectangle], + occupied: List[Tuple[int, int]], +) -> bool: + """ + Try to place rect within the current bounding box. + Part of the implementation of pack_rectangles. + + Note that the arguments `placed_so_far` and `occupied` are modified. + + Args: + rect: rectangle to place + placed_so_far: the locations decided upon so far - a list of + (x, y, whether flipped). The nth element is the + location of the nth rectangle if it has been decided. + (modified in place) + occupied: the nodes of the graph of extents of rightmost placed + rectangles - (modified in place) + + Returns: + True on success. + + Example: + (We always have placed the first rectangle horizontally and other + rectangles above it.) + Let's say the placed boxes 1-4 are laid out like this. + The coordinates of the points marked X are stored in occupied. + It is to the right of the X's that we seek to place rect. + + +-----------------------X + |2 | + | +---X + | |4 | + | | | + | +---+X + | |3 | + | | | + +-----------------------+----+------X + y |1 | + ^ | --->x | + | +-----------------------------------+ + + We want to place this rectangle. + + +-+ + |5| + | | + | | = rect + | | + | | + | | + +-+ + + The call will succeed, returning True, leaving us with + + +-----------------------X + |2 | +-X + | +---+|5| + | |4 || | + | | || | + | +---++ | + | |3 | | + | | | | + +-----------------------+----+-+----X + |1 | + | | + +-----------------------------------+ . + + """ + total_width = occupied[0][0] + needed_height = rect.size[1] + current_start_idx = None + current_max_width = 0 + previous_height = 0 + currently_packed = 0 + for idx, interval in enumerate(occupied): + if interval[0] <= total_width - rect.size[0]: + currently_packed += interval[1] - previous_height + current_max_width = max(interval[0], current_max_width) + if current_start_idx is None: + current_start_idx = idx + if currently_packed >= needed_height: + current_max_width = max(interval[0], current_max_width) + placed_so_far[rect.ind] = PackedRectangle( + current_max_width, + occupied[current_start_idx - 1][1], + rect.flipped, + True, + ) + new_occupied = ( + current_max_width + rect.size[0], + occupied[current_start_idx - 1][1] + needed_height, + ) + if currently_packed == needed_height: + occupied[idx] = new_occupied + del occupied[current_start_idx:idx] + elif idx > current_start_idx: + occupied[idx - 1] = new_occupied + del occupied[current_start_idx : (idx - 1)] + else: + occupied.insert(idx, new_occupied) + return True + else: + current_start_idx = None + current_max_width = 0 + currently_packed = 0 + previous_height = interval[1] + return False + + +def pack_rectangles(sizes: List[Tuple[int, int]]) -> PackedRectangles: + """ + Naive rectangle packing in to a large rectangle. Flipping (i.e. rotating + a rectangle by 90 degrees) is allowed. + + This is used to join several uv maps into a single scene, see + TexturesUV.join_scene. + + Args: + sizes: List of sizes of rectangles to pack + + Returns: + total_size: size of total large rectangle + rectangles: location for each of the input rectangles. + This includes whether they are flipped. + The is_first field is always True. + """ + + if len(sizes) < 2: + raise ValueError("Cannot pack less than two boxes") + + queue = [] + for i, size in enumerate(sizes): + if size[0] < size[1]: + queue.append(_UnplacedRectangle((size[1], size[0]), i, True)) + else: + queue.append(_UnplacedRectangle((size[0], size[1]), i, False)) + queue.sort() + placed_so_far = [PackedRectangle(-1, -1, False, False)] * len(sizes) + + biggest = queue.pop() + total_width, current_height = biggest.size + placed_so_far[biggest.ind] = PackedRectangle(0, 0, biggest.flipped, True) + + second = queue.pop() + placed_so_far[second.ind] = PackedRectangle(0, current_height, second.flipped, True) + current_height += second.size[1] + occupied = [biggest.size, (second.size[0], current_height)] + + for rect in reversed(queue): + if _try_place_rectangle(rect, placed_so_far, occupied): + continue + + rotated = _UnplacedRectangle( + (rect.size[1], rect.size[0]), rect.ind, not rect.flipped + ) + if _try_place_rectangle(rotated, placed_so_far, occupied): + continue + + # rect wasn't placed in the current bounding box, + # so we add extra space to fit it in. + placed_so_far[rect.ind] = PackedRectangle(0, current_height, rect.flipped, True) + current_height += rect.size[1] + occupied.append((rect.size[0], current_height)) + + return PackedRectangles((total_width, current_height), placed_so_far) + + +def pack_unique_rectangles(rectangles: List[Rectangle]) -> PackedRectangles: + """ + Naive rectangle packing in to a large rectangle. Flipping (i.e. rotating + a rectangle by 90 degrees) is allowed. Inputs are deduplicated by their + identifier. + + This is a wrapper around pack_rectangles, where inputs come with an + identifier. In particular, it calls pack_rectangles for the deduplicated inputs, + then returns the values for all the inputs. The output for all rectangles with + the same identifier will be the same, except that only the first one will have + the is_first field True. + + This is used to join several uv maps into a single scene, see + TexturesUV.join_scene. + + Args: + rectangles: List of sizes of rectangles to pack + + Returns: + total_size: size of total large rectangle + rectangles: location for each of the input rectangles. + This includes whether they are flipped. + The is_first field is true for the first rectangle + with each identifier. + """ + + if len(rectangles) < 2: + raise ValueError("Cannot pack less than two boxes") + + input_map = {} + input_indices: List[Tuple[int, bool]] = [] + unique_input_sizes: List[Tuple[int, int]] = [] + for rectangle in rectangles: + if rectangle.identifier not in input_map: + unique_index = len(unique_input_sizes) + unique_input_sizes.append((rectangle.xsize, rectangle.ysize)) + input_map[rectangle.identifier] = unique_index + input_indices.append((unique_index, True)) + else: + unique_index = input_map[rectangle.identifier] + input_indices.append((unique_index, False)) + + if len(unique_input_sizes) == 1: + first = [PackedRectangle(0, 0, False, True)] + rest = (len(rectangles) - 1) * [PackedRectangle(0, 0, False, False)] + return PackedRectangles(unique_input_sizes[0], first + rest) + + total_size, unique_locations = pack_rectangles(unique_input_sizes) + full_locations = [] + for input_index, first in input_indices: + full_locations.append(unique_locations[input_index]._replace(is_first=first)) + + return PackedRectangles(total_size, full_locations) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2185f8f94499221c181a3f74c2e5a4ac2573a661 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/__init__.py @@ -0,0 +1,22 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch + +from .compositor import AlphaCompositor, NormWeightedCompositor + +# Pulsar not enabled on amd. +if not torch.version.hip: + from .pulsar.unified import PulsarPointsRenderer + +from .rasterize_points import rasterize_points +from .rasterizer import PointsRasterizationSettings, PointsRasterizer +from .renderer import PointsRenderer + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/compositor.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/compositor.py new file mode 100644 index 0000000000000000000000000000000000000000..14187eee279c330ce5291cfd6ddcc7db8a598841 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/compositor.py @@ -0,0 +1,116 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ..compositing import alpha_composite, norm_weighted_sum + + +# A compositor should take as input 3D points and some corresponding information. +# Given this information, the compositor can: +# - blend colors across the top K vertices at a pixel + + +class AlphaCompositor(nn.Module): + """ + Accumulate points using alpha compositing. + """ + + def __init__( + self, background_color: Optional[Union[Tuple, List, torch.Tensor]] = None + ) -> None: + super().__init__() + self.background_color = background_color + + def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor: + background_color = kwargs.get("background_color", self.background_color) + images = alpha_composite(fragments, alphas, ptclds) + + # images are of shape (N, C, H, W) + # check for background color & feature size C (C=4 indicates rgba) + if background_color is not None: + return _add_background_color_to_images(fragments, images, background_color) + return images + + +class NormWeightedCompositor(nn.Module): + """ + Accumulate points using a normalized weighted sum. + """ + + def __init__( + self, background_color: Optional[Union[Tuple, List, torch.Tensor]] = None + ) -> None: + super().__init__() + self.background_color = background_color + + def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor: + background_color = kwargs.get("background_color", self.background_color) + images = norm_weighted_sum(fragments, alphas, ptclds) + + # images are of shape (N, C, H, W) + # check for background color & feature size C (C=4 indicates rgba) + if background_color is not None: + return _add_background_color_to_images(fragments, images, background_color) + return images + + +def _add_background_color_to_images(pix_idxs, images, background_color): + """ + Mask pixels in images without corresponding points with a given background_color. + + Args: + pix_idxs: int32 Tensor of shape (N, points_per_pixel, image_size, image_size) + giving the indices of the nearest points at each pixel, sorted in z-order. + images: Tensor of shape (N, 4, image_size, image_size) giving the + accumulated features at each point, where 4 refers to a rgba feature. + background_color: Tensor, list, or tuple with 3 or 4 values indicating the rgb/rgba + value for the new background. Values should be in the interval [0,1]. + Returns: + images: Tensor of shape (N, 4, image_size, image_size), where pixels with + no nearest points have features set to the background color, and other + pixels with accumulated features have unchanged values. + """ + # Initialize background mask + background_mask = pix_idxs[:, 0] < 0 # (N, H, W) + + # Convert background_color to an appropriate tensor and check shape + if not torch.is_tensor(background_color): + background_color = images.new_tensor(background_color) + + if background_color.ndim == 0: + background_color = background_color.expand(images.shape[1]) + + if background_color.ndim > 1: + raise ValueError("Wrong shape of background_color") + + background_color = background_color.to(images) + + # add alpha channel if needed + if background_color.shape[0] + 1 == images.shape[1]: + alpha = images.new_ones(1) + background_color = torch.cat([background_color, alpha]) + + if images.shape[1] != background_color.shape[0]: + raise ValueError( + "Background color has %s channels not %s" + % (background_color.shape[0], images.shape[1]) + ) + + num_background_pixels = background_mask.sum() + + # permute so that features are the last dimension for masked_scatter to work + masked_images = images.permute(0, 2, 3, 1).masked_scatter( + background_mask[..., None], + background_color[None, :].expand(num_background_pixels, -1), + ) + + return masked_images.permute(0, 3, 1, 2) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/rasterize_points.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/rasterize_points.py new file mode 100644 index 0000000000000000000000000000000000000000..3d2c6b7ca59326e8f743ee164a8832bd6ede5c1d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/rasterize_points.py @@ -0,0 +1,322 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +from pytorch3d import _C + +from pytorch3d.renderer.mesh.rasterize_meshes import pix_to_non_square_ndc + +from ..utils import parse_image_size + + +# Maximum number of faces per bins for +# coarse-to-fine rasterization +kMaxPointsPerBin = 22 + + +def rasterize_points( + pointclouds, + image_size: Union[int, List[int], Tuple[int, int]] = 256, + radius: Union[float, List, Tuple, torch.Tensor] = 0.01, + points_per_pixel: int = 8, + bin_size: Optional[int] = None, + max_points_per_bin: Optional[int] = None, +): + """ + Each pointcloud is rasterized onto a separate image of shape + (H, W) if `image_size` is a tuple or (image_size, image_size) if it + is an int. + + If the desired image size is non square (i.e. a tuple of (H, W) where H != W) + the aspect ratio needs special consideration. There are two aspect ratios + to be aware of: + - the aspect ratio of each pixel + - the aspect ratio of the output image + The camera can be used to set the pixel aspect ratio. In the rasterizer, + we assume square pixels, but variable image aspect ratio (i.e rectangle images). + + In most cases you will want to set the camera aspect ratio to + 1.0 (i.e. square pixels) and only vary the + `image_size` (i.e. the output image dimensions in pix + + Args: + pointclouds: A Pointclouds object representing a batch of point clouds to be + rasterized. This is a batch of N pointclouds, where each point cloud + can have a different number of points; the coordinates of each point + are (x, y, z). The coordinates are expected to + be in normalized device coordinates (NDC): [-1, 1]^3 with the camera at + (0, 0, 0); In the camera coordinate frame the x-axis goes from right-to-left, + the y-axis goes from bottom-to-top, and the z-axis goes from back-to-front. + image_size: Size in pixels of the output image to be rasterized. + Can optionally be a tuple of (H, W) in the case of non square images. + radius (Optional): The radius (in NDC units) of the disk to + be rasterized. This can either be a float in which case the same radius is used + for each point, or a torch.Tensor of shape (N, P) giving a radius per point + in the batch. + points_per_pixel (Optional): We will keep track of this many points per + pixel, returning the nearest points_per_pixel points along the z-axis + bin_size: Size of bins to use for coarse-to-fine rasterization. Setting + bin_size=0 uses naive rasterization; setting bin_size=None attempts to + set it heuristically based on the shape of the input. This should not + affect the output, but can affect the speed of the forward pass. + max_points_per_bin: Only applicable when using coarse-to-fine rasterization + (bin_size > 0); this is the maximum number of points allowed within each + bin. This should not affect the output values, but can affect + the memory usage in the forward pass. + + Returns: + 3-element tuple containing + + - **idx**: int32 Tensor of shape (N, image_size, image_size, points_per_pixel) + giving the indices of the nearest points at each pixel, in ascending + z-order. Concretely `idx[n, y, x, k] = p` means that `points[p]` is the kth + closest point (along the z-direction) to pixel (y, x) - note that points + represents the packed points of shape (P, 3). + Pixels that are hit by fewer than points_per_pixel are padded with -1. + - **zbuf**: Tensor of shape (N, image_size, image_size, points_per_pixel) + giving the z-coordinates of the nearest points at each pixel, sorted in + z-order. Concretely, if `idx[n, y, x, k] = p` then + `zbuf[n, y, x, k] = points[n, p, 2]`. Pixels hit by fewer than + points_per_pixel are padded with -1 + - **dists2**: Tensor of shape (N, image_size, image_size, points_per_pixel) + giving the squared Euclidean distance (in NDC units) in the x/y plane + for each point closest to the pixel. Concretely if `idx[n, y, x, k] = p` + then `dists[n, y, x, k]` is the squared distance between the pixel (y, x) + and the point `(points[n, p, 0], points[n, p, 1])`. Pixels hit with fewer + than points_per_pixel are padded with -1. + + In the case that image_size is a tuple of (H, W) then the outputs + will be of shape `(N, H, W, ...)`. + """ + points_packed = pointclouds.points_packed() + cloud_to_packed_first_idx = pointclouds.cloud_to_packed_first_idx() + num_points_per_cloud = pointclouds.num_points_per_cloud() + + radius = _format_radius(radius, pointclouds) + + # In the case that H != W use the max image size to set the bin_size + # to accommodate the num bins constraint in the coarse rasterizer. + # If the ratio of H:W is large this might cause issues as the smaller + # dimension will have fewer bins. + # TODO: consider a better way of setting the bin size. + im_size = parse_image_size(image_size) + max_image_size = max(*im_size) + + if bin_size is None: + if not points_packed.is_cuda: + # Binned CPU rasterization not fully implemented + bin_size = 0 + else: + bin_size = int(2 ** max(np.ceil(np.log2(max_image_size)) - 4, 4)) + + if bin_size != 0: + # There is a limit on the number of points per bin in the cuda kernel. + points_per_bin = 1 + (max_image_size - 1) // bin_size + if points_per_bin >= kMaxPointsPerBin: + raise ValueError( + "bin_size too small, number of points per bin must be less than %d; got %d" + % (kMaxPointsPerBin, points_per_bin) + ) + + if max_points_per_bin is None: + max_points_per_bin = int(max(10000, pointclouds._P / 5)) + + # Function.apply cannot take keyword args, so we handle defaults in this + # wrapper and call apply with positional args only + return _RasterizePoints.apply( + points_packed, + cloud_to_packed_first_idx, + num_points_per_cloud, + im_size, + radius, + points_per_pixel, + bin_size, + max_points_per_bin, + ) + + +def _format_radius( + radius: Union[float, List, Tuple, torch.Tensor], pointclouds +) -> torch.Tensor: + """ + Format the radius as a torch tensor of shape (P_packed,) + where P_packed is the total number of points in the + batch (i.e. pointclouds.points_packed().shape[0]). + + This will enable support for a different size radius + for each point in the batch. + + Args: + radius: can be a float, List, Tuple or tensor of + shape (N, P_padded) where P_padded is the + maximum number of points for each pointcloud + in the batch. + + Returns: + radius: torch.Tensor of shape (P_packed) + """ + N, P_padded = pointclouds._N, pointclouds._P + points_packed = pointclouds.points_packed() + P_packed = points_packed.shape[0] + if isinstance(radius, (list, tuple)): + radius = torch.tensor(radius).type_as(points_packed) + if isinstance(radius, torch.Tensor): + if N == 1 and radius.ndim == 1: + radius = radius[None, ...] + if radius.shape != (N, P_padded): + msg = "radius must be of shape (N, P): got %s" + raise ValueError(msg % (repr(radius.shape))) + else: + padded_to_packed_idx = pointclouds.padded_to_packed_idx() + radius = radius.view(-1)[padded_to_packed_idx] + elif isinstance(radius, float): + radius = torch.full((P_packed,), fill_value=radius).type_as(points_packed) + else: + msg = "radius must be a float, list, tuple or tensor; got %s" + raise ValueError(msg % type(radius)) + return radius + + +class _RasterizePoints(torch.autograd.Function): + @staticmethod + def forward( + ctx, + points, # (P, 3) + cloud_to_packed_first_idx, + num_points_per_cloud, + image_size: Union[List[int], Tuple[int, int]] = (256, 256), + radius: Union[float, torch.Tensor] = 0.01, + points_per_pixel: int = 8, + bin_size: int = 0, + max_points_per_bin: int = 0, + ): + # TODO: Add better error handling for when there are more than + # max_points_per_bin in any bin. + args = ( + points, + cloud_to_packed_first_idx, + num_points_per_cloud, + image_size, + radius, + points_per_pixel, + bin_size, + max_points_per_bin, + ) + # pyre-fixme[16]: Module `pytorch3d` has no attribute `_C`. + idx, zbuf, dists = _C.rasterize_points(*args) + ctx.save_for_backward(points, idx) + ctx.mark_non_differentiable(idx) + return idx, zbuf, dists + + @staticmethod + def backward(ctx, grad_idx, grad_zbuf, grad_dists): + grad_points = None + grad_cloud_to_packed_first_idx = None + grad_num_points_per_cloud = None + grad_image_size = None + grad_radius = None + grad_points_per_pixel = None + grad_bin_size = None + grad_max_points_per_bin = None + points, idx = ctx.saved_tensors + args = (points, idx, grad_zbuf, grad_dists) + grad_points = _C.rasterize_points_backward(*args) + grads = ( + grad_points, + grad_cloud_to_packed_first_idx, + grad_num_points_per_cloud, + grad_image_size, + grad_radius, + grad_points_per_pixel, + grad_bin_size, + grad_max_points_per_bin, + ) + return grads + + +def rasterize_points_python( + pointclouds, + image_size: Union[int, Tuple[int, int]] = 256, + radius: Union[float, torch.Tensor] = 0.01, + points_per_pixel: int = 8, +): + """ + Naive pure PyTorch implementation of pointcloud rasterization. + + Inputs / Outputs: Same as above + """ + N = len(pointclouds) + H, W = ( + image_size + if isinstance(image_size, (tuple, list)) + else (image_size, image_size) + ) + K = points_per_pixel + device = pointclouds.device + + points_packed = pointclouds.points_packed() + cloud_to_packed_first_idx = pointclouds.cloud_to_packed_first_idx() + num_points_per_cloud = pointclouds.num_points_per_cloud() + + # Support variable size radius for each point in the batch + radius = _format_radius(radius, pointclouds) + + # Initialize output tensors. + point_idxs = torch.full( + (N, H, W, K), fill_value=-1, dtype=torch.int32, device=device + ) + zbuf = torch.full((N, H, W, K), fill_value=-1, dtype=torch.float32, device=device) + pix_dists = torch.full( + (N, H, W, K), fill_value=-1, dtype=torch.float32, device=device + ) + + # NDC is from [-1, 1]. Get pixel size using specified image size. + radius2 = radius * radius + + # Iterate through the batch of point clouds. + for n in range(N): + point_start_idx = cloud_to_packed_first_idx[n] + point_stop_idx = point_start_idx + num_points_per_cloud[n] + + # Iterate through the horizontal lines of the image from top to bottom. + for yi in range(H): + # Y coordinate of one end of the image. Reverse the ordering + # of yi so that +Y is pointing up in the image. + yfix = H - 1 - yi + yf = pix_to_non_square_ndc(yfix, H, W) + + # Iterate through pixels on this horizontal line, left to right. + for xi in range(W): + # X coordinate of one end of the image. Reverse the ordering + # of xi so that +X is pointing to the left in the image. + xfix = W - 1 - xi + xf = pix_to_non_square_ndc(xfix, W, H) + + top_k_points = [] + # Check whether each point in the batch affects this pixel. + for p in range(point_start_idx, point_stop_idx): + px, py, pz = points_packed[p, :] + r = radius2[p] + if pz < 0: + continue + dx = px - xf + dy = py - yf + dist2 = dx * dx + dy * dy + if dist2 < r: + top_k_points.append((pz, p, dist2)) + top_k_points.sort() + if len(top_k_points) > K: + top_k_points = top_k_points[:K] + for k, (pz, p, dist2) in enumerate(top_k_points): + zbuf[n, yi, xi, k] = pz + point_idxs[n, yi, xi, k] = p + pix_dists[n, yi, xi, k] = dist2 + return point_idxs, zbuf, pix_dists diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/rasterizer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/rasterizer.py new file mode 100644 index 0000000000000000000000000000000000000000..8c141b9012be0b36301f33210596a980d49db0dc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/rasterizer.py @@ -0,0 +1,169 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from dataclasses import dataclass +from typing import NamedTuple, Optional, Tuple, Union + +import torch +import torch.nn as nn +from pytorch3d.renderer.cameras import try_get_projection_transform +from pytorch3d.structures import Pointclouds + +from .rasterize_points import rasterize_points + + +class PointFragments(NamedTuple): + """ + Class to store the outputs of point rasterization + + Members: + idx: int32 Tensor of shape (N, image_size, image_size, points_per_pixel) + giving the indices of the nearest points at each pixel, in ascending + z-order. Concretely `idx[n, y, x, k] = p` means that `points[p]` is the kth + closest point (along the z-direction) to pixel (y, x) - note that points + represents the packed points of shape (P, 3). + Pixels that are hit by fewer than points_per_pixel are padded with -1. + zbuf: Tensor of shape (N, image_size, image_size, points_per_pixel) + giving the z-coordinates of the nearest points at each pixel, sorted in + z-order. Concretely, if `idx[n, y, x, k] = p` then + `zbuf[n, y, x, k] = points[n, p, 2]`. Pixels hit by fewer than + points_per_pixel are padded with -1. + dists: Tensor of shape (N, image_size, image_size, points_per_pixel) + giving the squared Euclidean distance (in NDC units) in the x/y plane + for each point closest to the pixel. Concretely if `idx[n, y, x, k] = p` + then `dists[n, y, x, k]` is the squared distance between the pixel (y, x) + and the point `(points[n, p, 0], points[n, p, 1])`. Pixels hit with fewer + than points_per_pixel are padded with -1. + """ + + idx: torch.Tensor + zbuf: torch.Tensor + dists: torch.Tensor + + +@dataclass +class PointsRasterizationSettings: + """ + Class to store the point rasterization params with defaults + + Members: + image_size: Either common height and width or (height, width), in pixels. + radius: The radius (in NDC units) of each disk to be rasterized. + This can either be a float in which case the same radius is used + for each point, or a torch.Tensor of shape (N, P) giving a radius + per point in the batch. + points_per_pixel: (int) Number of points to keep track of per pixel. + We return the nearest points_per_pixel points along the z-axis. + bin_size: Size of bins to use for coarse-to-fine rasterization. Setting + bin_size=0 uses naive rasterization; setting bin_size=None attempts + to set it heuristically based on the shape of the input. This should + not affect the output, but can affect the speed of the forward pass. + max_points_per_bin: Only applicable when using coarse-to-fine + rasterization (bin_size != 0); this is the maximum number of points + allowed within each bin. This should not affect the output values, + but can affect the memory usage in the forward pass. + Setting max_points_per_bin=None attempts to set with a heuristic. + """ + + image_size: Union[int, Tuple[int, int]] = 256 + radius: Union[float, torch.Tensor] = 0.01 + points_per_pixel: int = 8 + bin_size: Optional[int] = None + max_points_per_bin: Optional[int] = None + + +class PointsRasterizer(nn.Module): + """ + This class implements methods for rasterizing a batch of pointclouds. + """ + + def __init__(self, cameras=None, raster_settings=None) -> None: + """ + cameras: A cameras object which has a `transform_points` method + which returns the transformed points after applying the + world-to-view and view-to-ndc transformations. + raster_settings: the parameters for rasterization. This should be a + named tuple. + + All these initial settings can be overridden by passing keyword + arguments to the forward function. + """ + super().__init__() + if raster_settings is None: + raster_settings = PointsRasterizationSettings() + + self.cameras = cameras + self.raster_settings = raster_settings + + def transform(self, point_clouds, **kwargs) -> Pointclouds: + """ + Args: + point_clouds: a set of point clouds + + Returns: + points_proj: the points with positions projected + in NDC space + + NOTE: keeping this as a separate function for readability but it could + be moved into forward. + """ + cameras = kwargs.get("cameras", self.cameras) + if cameras is None: + msg = "Cameras must be specified either at initialization \ + or in the forward pass of PointsRasterizer" + raise ValueError(msg) + + pts_world = point_clouds.points_padded() + # NOTE: Retaining view space z coordinate for now. + # TODO: Remove this line when the convention for the z coordinate in + # the rasterizer is decided. i.e. retain z in view space or transform + # to a different range. + eps = kwargs.get("eps", None) + pts_view = cameras.get_world_to_view_transform(**kwargs).transform_points( + pts_world, eps=eps + ) + to_ndc_transform = cameras.get_ndc_camera_transform(**kwargs) + projection_transform = try_get_projection_transform(cameras, kwargs) + if projection_transform is not None: + projection_transform = projection_transform.compose(to_ndc_transform) + pts_ndc = projection_transform.transform_points(pts_view, eps=eps) + else: + # Call transform_points instead of explicitly composing transforms to handle + # the case, where camera class does not have a projection matrix form. + pts_proj = cameras.transform_points(pts_world, eps=eps) + pts_ndc = to_ndc_transform.transform_points(pts_proj, eps=eps) + + pts_ndc[..., 2] = pts_view[..., 2] + point_clouds = point_clouds.update_padded(pts_ndc) + return point_clouds + + def to(self, device): + # Manually move to device cameras as it is not a subclass of nn.Module + if self.cameras is not None: + self.cameras = self.cameras.to(device) + return self + + def forward(self, point_clouds, **kwargs) -> PointFragments: + """ + Args: + point_clouds: a set of point clouds with coordinates in world space. + Returns: + PointFragments: Rasterization outputs as a named tuple. + """ + points_proj = self.transform(point_clouds, **kwargs) + raster_settings = kwargs.get("raster_settings", self.raster_settings) + idx, zbuf, dists2 = rasterize_points( + points_proj, + image_size=raster_settings.image_size, + radius=raster_settings.radius, + points_per_pixel=raster_settings.points_per_pixel, + bin_size=raster_settings.bin_size, + max_points_per_bin=raster_settings.max_points_per_bin, + ) + return PointFragments(idx=idx, zbuf=zbuf, dists=dists2) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/renderer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..4ce5360514ea52155c6518d132e9dd236fe03443 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/points/renderer.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch +import torch.nn as nn + + +# A renderer class should be initialized with a +# function for rasterization and a function for compositing. +# The rasterizer should: +# - transform inputs from world -> screen space +# - rasterize inputs +# - return fragments +# The compositor can take fragments as input along with any other properties of +# the scene and generate images. + +# E.g. rasterize inputs and then shade +# +# fragments = self.rasterize(point_clouds) +# images = self.compositor(fragments, point_clouds) +# return images + + +class PointsRenderer(nn.Module): + """ + A class for rendering a batch of points. The class should + be initialized with a rasterizer and compositor class which each have a forward + function. + + The points are rendered with with varying alpha (weights) values depending on + the distance of the pixel center to the true point in the xy plane. The purpose + of this is to soften the hard decision boundary, for differentiability. + See Section 3.2 of "SynSin: End-to-end View Synthesis from a Single Image" + (https://arxiv.org/pdf/1912.08804.pdf) for more details. + """ + + def __init__(self, rasterizer, compositor) -> None: + super().__init__() + self.rasterizer = rasterizer + self.compositor = compositor + + def to(self, device): + # Manually move to device rasterizer as the cameras + # within the class are not of type nn.Module + self.rasterizer = self.rasterizer.to(device) + self.compositor = self.compositor.to(device) + return self + + def forward(self, point_clouds, **kwargs) -> torch.Tensor: + fragments = self.rasterizer(point_clouds, **kwargs) + + # Construct weights based on the distance of a point to the true point. + # However, this could be done differently: e.g. predicted as opposed + # to a function of the weights. + r = self.rasterizer.raster_settings.radius + + dists2 = fragments.dists.permute(0, 3, 1, 2) + weights = 1 - dists2 / (r * r) + images = self.compositor( + fragments.idx.long().permute(0, 3, 1, 2), + weights, + point_clouds.features_packed().permute(1, 0), + **kwargs, + ) + + # permute so image comes at the end + images = images.permute(0, 2, 3, 1) + + return images diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/splatter_blend.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/splatter_blend.py new file mode 100644 index 0000000000000000000000000000000000000000..e753d29ac45861f5d9c29a30ec63d7f65151304d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/splatter_blend.py @@ -0,0 +1,568 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +# This file defines SplatterBlender, which is used for blending in SplatterPhongShader. + +import itertools +from typing import Tuple + +import torch +import torch.nn.functional as F +from pytorch3d.common.datatypes import Device +from pytorch3d.renderer import BlendParams +from pytorch3d.renderer.cameras import FoVPerspectiveCameras + +from .blending import _get_background_color + + +def _precompute( + input_shape: Tuple[int, int, int, int], device: Device +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Precompute padding and offset constants that won't change for a given NHWK shape. + + Args: + input_shape: Tuple indicating N (batch size), H, W (image size) and K (number of + intersections) output by the rasterizer. + device: Device to store the tensors on. + + returns: + crop_ids_h: An (N, H, W+2, K, 9, 5) tensor, used during splatting to offset the + p-pixels (splatting pixels) in one of the 9 splatting directions within a + call to torch.gather. See comments and offset_splats for details. + crop_ids_w: An (N, H, W, K, 9, 5) tensor, used similarly to crop_ids_h. + offsets: A (1, 1, 1, 1, 9, 2) tensor (shaped so for broadcasting) containing va- + lues [-1, -1], [-1, 0], [-1, 1], [0, -1], ..., [1, 1] which correspond to + the nine splatting directions. + """ + N, H, W, K = input_shape + + # (N, H, W+2, K, 9, 5) tensor, used to reduce a tensor from (N, H+2, W+2...) to + # (N, H, W+2, ...) in torch.gather. If only torch.gather broadcasted, we wouldn't + # need the tiling. But it doesn't. + crop_ids_h = ( + torch.arange(0, H, device=device).view(1, H, 1, 1, 1, 1) + + torch.tensor([0, 1, 2, 0, 1, 2, 0, 1, 2], device=device).view( + 1, 1, 1, 1, 9, 1 + ) + ).expand(N, H, W + 2, K, 9, 5) + + # (N, H, W, K, 9, 5) tensor, used to reduce a tensor from (N, H, W+2, ...) to + # (N, H, W, ...) in torch.gather. + crop_ids_w = ( + torch.arange(0, W, device=device).view(1, 1, W, 1, 1, 1) + + torch.tensor([0, 0, 0, 1, 1, 1, 2, 2, 2], device=device).view( + 1, 1, 1, 1, 9, 1 + ) + ).expand(N, H, W, K, 9, 5) + + offsets = torch.tensor( + list(itertools.product((-1, 0, 1), repeat=2)), + dtype=torch.long, + device=device, + ) + + return crop_ids_h, crop_ids_w, offsets + + +def _prepare_pixels_and_colors( + pixel_coords_cameras: torch.Tensor, + colors: torch.Tensor, + cameras: FoVPerspectiveCameras, + background_mask: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Project pixel coords into the un-inverted screen frame of reference, and set + background pixel z-values to 1.0 and alphas to 0.0. + + Args: + pixel_coords_cameras: (N, H, W, K, 3) float tensor. + colors: (N, H, W, K, 3) float tensor. + cameras: PyTorch3D cameras, for now we assume FoVPerspectiveCameras. + background_mask: (N, H, W, K) boolean tensor. + + Returns: + pixel_coords_screen: (N, H, W, K, 3) float tensor. Background pixels have + x=y=z=1.0. + colors: (N, H, W, K, 4). Alpha is set to 1 for foreground pixels and 0 for back- + ground pixels. + """ + + N, H, W, K, C = colors.shape + # pixel_coords_screen will contain invalid values at background + # intersections, and [H+0.5, W+0.5, z] at valid intersections. It is important + # to not flip the xy axes, otherwise the gradients will be inverted when the + # splatter works with a detached rasterizer. + pixel_coords_screen = cameras.transform_points_screen( + pixel_coords_cameras.view([N, -1, 3]), image_size=(H, W), with_xyflip=False + ).reshape(pixel_coords_cameras.shape) + + # Set colors' alpha to 1 and background to 0. + colors = torch.cat( + [colors, torch.ones_like(colors[..., :1])], dim=-1 + ) # (N, H, W, K, 4) + + # The hw values of background don't matter because their alpha is set + # to 0 in the next step (which means that no matter what their splatting kernel + # value is, they will not splat as the kernel is multiplied by alpha). However, + # their z-values need to be at max depth. Otherwise, we could incorrectly compute + # occlusion layer linkage. + pixel_coords_screen[background_mask] = 1.0 + + # Any background color value value with alpha=0 will do, as anything with + # alpha=0 will have a zero-weight splatting power. Note that neighbors can still + # splat on zero-alpha pixels: that's the way we get non-zero gradients at the + # boundary with the background. + colors[background_mask] = 0.0 + + return pixel_coords_screen, colors + + +def _get_splat_kernel_normalization( + offsets: torch.Tensor, + sigma: float = 0.5, +): + if sigma <= 0.0: + raise ValueError("Only positive standard deviations make sense.") + + epsilon = 0.05 + normalization_constant = torch.exp( + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + -(offsets**2).sum(dim=1) + / (2 * sigma**2) + ).sum() + + # We add an epsilon to the normalization constant to ensure the gradient will travel + # through non-boundary pixels' normalization factor, see Sec. 3.3.1 in "Differentia- + # ble Surface Rendering via Non-Differentiable Sampling", Cole et al. + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + return (1 + epsilon) / normalization_constant + + +def _compute_occlusion_layers( + q_depth: torch.Tensor, +) -> torch.Tensor: + """ + For each splatting pixel, decide whether it splats from a background, surface, or + foreground depth relative to the splatted pixel. See unit tests in + test_splatter_blend for some enlightening examples. + + Args: + q_depth: (N, H, W, K) tensor of z-values of the splatted pixels. + + Returns: + occlusion_layers: (N, H, W, 9) long tensor. Each of the 9 values corresponds to + one of the nine splatting directions ([-1, -1], [-1, 0], ..., [1, + 1]). The value at nhwd (where d is the splatting direction) is 0 if + the splat in direction d is on the same surface level as the pixel at + hw. The value is negative if the splat is in the background (occluded + by another splat above it that is at the same surface level as the + pixel splatted on), and the value is positive if the splat is in the + foreground. + """ + N, H, W, K = q_depth.shape + + # q are the "center pixels" and p the pixels splatting onto them. Use `unfold` to + # create `p_depth`, a tensor with 9 layers, each of which corresponds to the + # depth of a neighbor of q in one of the 9 directions. For example, p_depth[nk0hw] + # is the depth of the pixel splatting onto pixel nhwk from the [-1, -1] direction, + # and p_depth[nk4hw] the depth of q (self-splatting onto itself). + # More concretely, imagine the pixel depths in a 2x2 image's k-th layer are + # .1 .2 + # .3 .4 + # Then (remembering that we pad with zeros when a pixel has fewer than 9 neighbors): + # + # p_depth[n, k, :, 0, 0] = [ 0 0 0 0 .1 .2 0 .3 .4] - neighbors of .1 + # p_depth[n, k, :, 0, 1] = [ 0 0 0 .1 .2 0 .3 .4 0] - neighbors of .2 + # p_depth[n, k, :, 1, 0] = [ 0 .1 .2 0 .3 .4 0 0 0] - neighbors of .3 + # p_depth[n, k, :, 0, 1] = [.1 .2 0 .3 .4 0 0 0 0] - neighbors of .4 + q_depth = q_depth.permute(0, 3, 1, 2) # (N, K, H, W) + p_depth = F.unfold(q_depth, kernel_size=3, padding=1) # (N, 3^2 * K, H * W) + q_depth = q_depth.view(N, K, 1, H, W) + p_depth = p_depth.view(N, K, 9, H, W) + + # Take the center pixel q's top rasterization layer. This is the "surface layer" + # that we're splatting on. For each of the nine splatting directions p, find which + # of the K splatting rasterization layers is closest in depth to the surface + # splatted layer. + qtop_to_p_zdist = torch.abs(p_depth - q_depth[:, 0:1]) # (N, K, 9, H, W) + qtop_to_p_closest_zdist, qtop_to_p_closest_id = qtop_to_p_zdist.min(dim=1) + + # For each of the nine splatting directions p, take the top of the K rasterization + # layers. Check which of the K q-layers (that the given direction is splatting on) + # is closest in depth to the top splatting layer. + ptop_to_q_zdist = torch.abs(p_depth[:, 0:1] - q_depth) # (N, K, 9, H, W) + ptop_to_q_closest_zdist, ptop_to_q_closest_id = ptop_to_q_zdist.min(dim=1) + + # Decide whether each p is on the same level, below, or above the q it is splatting + # on. See Fig. 4 in [0] for an illustration. Briefly: say we're interested in pixel + # p_{h, w} = [10, 32] splatting onto its neighbor q_{h, w} = [11, 33]. The splat is + # coming from direction [-1, -1], which has index 0 in our enumeration of splatting + # directions. Hence, we are interested in + # + # P = p_depth[n, :, d=0, 11, 33] - a vector of K depth values, and + # Q = q_depth.squeeze()[n, :, 11, 33] - a vector of K depth values. + # + # If Q[0] is closest, say, to P[2], then we assume the 0th surface layer of Q is + # the same surface as P[2] that's splatting onto it, and P[:2] are foreground splats + # and P[3:] are background splats. + # + # If instead say Q[2] is closest to P[0], then all the splats are background splats, + # because the top splatting layer is the same surface as a non-top splatted layer. + # + # Finally, if Q[0] is closest to P[0], then the top-level P is splatting onto top- + # level Q, and P[1:] are all background splats. + occlusion_offsets = torch.where( # noqa + ptop_to_q_closest_zdist < qtop_to_p_closest_zdist, + -ptop_to_q_closest_id, + qtop_to_p_closest_id, + ) # (N, 9, H, W) + + occlusion_layers = occlusion_offsets.permute((0, 2, 3, 1)) # (N, H, W, 9) + return occlusion_layers + + +def _compute_splatting_colors_and_weights( + pixel_coords_screen: torch.Tensor, + colors: torch.Tensor, + sigma: float, + offsets: torch.Tensor, +) -> torch.Tensor: + """ + For each center pixel q, compute the splatting weights of its surrounding nine spla- + tting pixels p, as well as their splatting colors (which are just their colors re- + weighted by the splatting weights). + + Args: + pixel_coords_screen: (N, H, W, K, 2) tensor of pixel screen coords. + colors: (N, H, W, K, 4) RGBA tensor of pixel colors. + sigma: splatting kernel variance. + offsets: (9, 2) tensor computed by _precompute, indicating the nine + splatting directions ([-1, -1], ..., [1, 1]). + + Returns: + splat_colors_and_weights: (N, H, W, K, 9, 5) tensor. + splat_colors_and_weights[..., :4] corresponds to the splatting colors, and + splat_colors_and_weights[..., 4:5] to the splatting weights. The "9" di- + mension corresponds to the nine splatting directions. + """ + N, H, W, K, C = colors.shape + splat_kernel_normalization = _get_splat_kernel_normalization(offsets, sigma) + + # Distance from each barycentric-interpolated triangle vertices' triplet from its + # "ideal" pixel-center location. pixel_coords_screen are in screen coordinates, and + # should be at the "ideal" locations on the forward pass -- e.g. + # pixel_coords_screen[n, 24, 31, k] = [24.5, 31.5]. For this reason, q_to_px_center + # should equal torch.zeros during the forward pass. On the backwards pass, these + # coordinates will be adjusted and non-zero, allowing the gradients to flow back + # to the mesh vertex coordinates. + q_to_px_center = ( + torch.floor(pixel_coords_screen[..., :2]) - pixel_coords_screen[..., :2] + 0.5 + ).view((N, H, W, K, 1, 2)) + + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + dist2_p_q = torch.sum((q_to_px_center + offsets) ** 2, dim=5) # (N, H, W, K, 9) + splat_weights = torch.exp(-dist2_p_q / (2 * sigma**2)) + alpha = colors[..., 3:4] + splat_weights = (alpha * splat_kernel_normalization * splat_weights).unsqueeze( + 5 + ) # (N, H, W, K, 9, 1) + + # splat_colors[n, h, w, direction, :] contains the splatting color (weighted by the + # splatting weight) that pixel h, w will splat in one of the nine possible + # directions (e.g. nhw0 corresponds to splatting in [-1, 1] direciton, nhw4 is + # self-splatting). + splat_colors = splat_weights * colors.unsqueeze(4) # (N, H, W, K, 9, 4) + + return torch.cat([splat_colors, splat_weights], dim=5) + + +def _offset_splats( + splat_colors_and_weights: torch.Tensor, + crop_ids_h: torch.Tensor, + crop_ids_w: torch.Tensor, +) -> torch.Tensor: + """ + Pad splatting colors and weights so that tensor locations/coordinates are aligned + with the splatting directions. For example, say we have an example input Red channel + splat_colors_and_weights[n, :, :, k, direction=0, channel=0] equal to + .1 .2 .3 + .4 .5 .6 + .7 .8 .9 + the (h, w) entry indicates that pixel n, h, w, k splats the given color in direction + equal to 0, which corresponds to offsets[0] = (-1, -1). Note that this is the x-y + direction, not h-w. This function pads and crops this array to + 0 0 0 + .2 .3 0 + .5 .6 0 + which indicates, for example, that: + * There is no pixel splatting in direction (-1, -1) whose splat lands on pixel + h=w=0. + * There is a pixel splatting in direction (-1, -1) whose splat lands on the pi- + xel h=1, w=0, and that pixel's splatting color is .2. + * There is a pixel splatting in direction (-1, -1) whose splat lands on the pi- + xel h=2, w=1, and that pixel's splatting color is .6. + + Args: + *splat_colors_and_weights*: (N, H, W, K, 9, 5) tensor of colors and weights, + where dim=-2 corresponds to the splatting directions/offsets. + *crop_ids_h*: (N, H, W+2, K, 9, 5) precomputed tensor used for padding within + torch.gather. See _precompute for more info. + *crop_ids_w*: (N, H, W, K, 9, 5) precomputed tensor used for padding within + torch.gather. See _precompute for more info. + + + Returns: + *splat_colors_and_weights*: (N, H, W, K, 9, 5) tensor. + """ + N, H, W, K, _, _ = splat_colors_and_weights.shape + # Transform splat_colors such that each of the 9 layers (corresponding to + # the 9 splat offsets) is padded with 1 and shifted in the appropriate + # direction. E.g. splat_colors[n, :, :, 0] corresponds to the (-1, -1) + # offset, so will be padded with one rows of 1 on the right and have a + # single row clipped at the bottom, and splat_colors[n, :, :, 4] corrsponds + # to offset (0, 0) and will remain unchanged. + splat_colors_and_weights = F.pad( + splat_colors_and_weights, [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0] + ) # N, H+2, W+2, 9, 5 + + # (N, H+2, W+2, K, 9, 5) -> (N, H, W+2, K, 9, 5) + splat_colors_and_weights = torch.gather( + splat_colors_and_weights, dim=1, index=crop_ids_h + ) + + # (N, H, W+2, K, 9, 5) -> (N, H, W, K, 9, 5) + splat_colors_and_weights = torch.gather( + splat_colors_and_weights, dim=2, index=crop_ids_w + ) + + return splat_colors_and_weights + + +def _compute_splatted_colors_and_weights( + occlusion_layers: torch.Tensor, # (N, H, W, 9) + splat_colors_and_weights: torch.Tensor, # (N, H, W, K, 9, 5) +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Accumulate splatted colors in background, surface and foreground occlusion buffers. + + Args: + occlusion_layers: (N, H, W, 9) tensor. See _compute_occlusion_layers. + splat_colors_and_weights: (N, H, W, K, 9, 5) tensor. See _offset_splats. + + Returns: + splatted_colors: (N, H, W, 4, 3) tensor. Last dimension corresponds to back- + ground, surface, and foreground splat colors. + splatted_weights: (N, H, W, 1, 3) tensor. Last dimension corresponds to back- + ground, surface, and foreground splat weights and is used for normalization. + + """ + N, H, W, K, _, _ = splat_colors_and_weights.shape + + # Create an occlusion mask, with the last dimension of length 3, corresponding to + # background/surface/foreground splatting. E.g. occlusion_layer_mask[n,h,w,k,d,0] is + # 1 if the pixel at hw is splatted from direction d such that the splatting pixel p + # is below the splatted pixel q (in the background); otherwise, the value is 0. + # occlusion_layer_mask[n,h,w,k,d,1] is 1 if the splatting pixel is at the same + # surface level as the splatted pixel q, and occlusion_layer_mask[n,h,w,k,d,2] is + # 1 only if the splatting pixel is in the foreground. + layer_ids = torch.arange(K, device=splat_colors_and_weights.device).view( + 1, 1, 1, K, 1 + ) + occlusion_layers = occlusion_layers.view(N, H, W, 1, 9) + occlusion_layer_mask = torch.stack( + [ + occlusion_layers > layer_ids, # (N, H, W, K, 9) + occlusion_layers == layer_ids, # (N, H, W, K, 9) + occlusion_layers < layer_ids, # (N, H, W, K, 9) + ], + dim=5, + ).float() # (N, H, W, K, 9, 3) + + # (N * H * W, 5, 9 * K) x (N * H * W, 9 * K, 3) -> (N * H * W, 5, 3) + splatted_colors_and_weights = torch.bmm( + splat_colors_and_weights.permute(0, 1, 2, 5, 3, 4).reshape( + (N * H * W, 5, K * 9) + ), + occlusion_layer_mask.reshape((N * H * W, K * 9, 3)), + ).reshape((N, H, W, 5, 3)) + + return ( + splatted_colors_and_weights[..., :4, :], + splatted_colors_and_weights[..., 4:5, :], + ) + + +def _normalize_and_compose_all_layers( + background_color: torch.Tensor, + splatted_colors_per_occlusion_layer: torch.Tensor, + splatted_weights_per_occlusion_layer: torch.Tensor, +) -> torch.Tensor: + """ + Normalize each bg/surface/fg buffer by its weight, and compose. + + Args: + background_color: (3) RGB tensor. + splatter_colors_per_occlusion_layer: (N, H, W, 4, 3) RGBA tensor, last dimension + corresponds to foreground, surface, and background splatting. + splatted_weights_per_occlusion_layer: (N, H, W, 1, 3) weight tensor. + + Returns: + output_colors: (N, H, W, 4) RGBA tensor. + """ + device = splatted_colors_per_occlusion_layer.device + + # Normalize each of bg/surface/fg splat layers separately. + normalization_scales = 1.0 / ( + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + torch.maximum( + splatted_weights_per_occlusion_layer, + torch.tensor([1.0], device=device), + ) + ) # (N, H, W, 1, 3) + + normalized_splatted_colors = ( + splatted_colors_per_occlusion_layer * normalization_scales + ) # (N, H, W, 4, 3) + + # Use alpha-compositing to compose the splat layers. + output_colors = torch.cat( + [background_color, torch.tensor([0.0], device=device)] + ) # (4), will broadcast to (N, H, W, 4) below. + + for occlusion_layer_id in (-1, -2, -3): + # Over-compose the bg, surface, and fg occlusion layers. Note that we already + # multiplied each pixel's RGBA by its own alpha as part of self-splatting in + # _compute_splatting_colors_and_weights, so we don't re-multiply by alpha here. + alpha = normalized_splatted_colors[..., 3:4, occlusion_layer_id] # (N, H, W, 1) + output_colors = ( + normalized_splatted_colors[..., occlusion_layer_id] + + (1.0 - alpha) * output_colors + ) + return output_colors + + +class SplatterBlender(torch.nn.Module): + def __init__( + self, + input_shape: Tuple[int, int, int, int], + device, + ): + """ + A splatting blender. See `forward` docs for details of the splatting mechanism. + + Args: + input_shape: Tuple (N, H, W, K) indicating the batch size, image height, + image width, and number of rasterized layers. Used to precompute + constant tensors that do not change as long as this tuple is unchanged. + """ + super().__init__() + self.crop_ids_h, self.crop_ids_w, self.offsets = _precompute( + input_shape, device + ) + + def to(self, device): + self.offsets = self.offsets.to(device) + self.crop_ids_h = self.crop_ids_h.to(device) + self.crop_ids_w = self.crop_ids_w.to(device) + super().to(device) + + def forward( + self, + colors: torch.Tensor, + pixel_coords_cameras: torch.Tensor, + cameras: FoVPerspectiveCameras, + background_mask: torch.Tensor, + blend_params: BlendParams, + ) -> torch.Tensor: + """ + RGB blending using splatting, as proposed in [0]. + + Args: + colors: (N, H, W, K, 3) tensor of RGB colors at each h, w pixel location for + K intersection layers. + pixel_coords_cameras: (N, H, W, K, 3) tensor of pixel coordinates in the + camera frame of reference. It is *crucial* that these are computed by + interpolating triangle vertex positions using barycentric coordinates -- + this allows gradients to travel through pixel_coords_camera back to the + vertex positions. + cameras: Cameras object used to project pixel_coords_cameras screen coords. + background_mask: (N, H, W, K, 3) boolean tensor, True for bg pixels. A pixel + is considered "background" if no mesh triangle projects to it. This is + typically computed by the rasterizer. + blend_params: BlendParams, from which we use sigma (splatting kernel + variance) and background_color. + + Returns: + output_colors: (N, H, W, 4) tensor of RGBA values. The alpha layer is set to + fully transparent in the background. + + [0] Cole, F. et al., "Differentiable Surface Rendering via Non-differentiable + Sampling". + """ + + # Our implementation has 6 stages. In the description below, we will call each + # pixel q and the 9 surrounding splatting pixels (including itself) p. + # 1. Use barycentrics to compute the position of each pixel in screen + # coordinates. These should exactly correspond to pixel centers during the + # forward pass, but can be shifted on backwards. This step allows gradients to + # travel to vertex coordinates, even if the rasterizer is non-differentiable. + # 2a. For each center pixel q, take each splatting p and decide whether it + # is on the same surface level as q, or in the background or foreground. + # 2b. For each center pixel q, compute the splatting weight of surrounding + # pixels p, and their splatting colors (which are just the original colors + # weighted by the splatting weights). + # 3. As a vectorization technicality, offset the tensors corresponding to + # the splatting p values in the nine directions, by padding each of nine + # splatting layers on the bottom/top, left/right. + # 4. Do the actual splatting, by accumulating the splatting colors of the + # surrounding p's for each pixel q. The weights get accumulated separately for + # p's that got assigned to the background/surface/foreground in Step 2a. + # 5. Normalize each the splatted bg/surface/fg colors for each q, and + # compose the resulting color maps. + # + # Note that it is crucial that in Step 1 we compute the pixel coordinates by in- + # terpolating triangle vertices using barycentric coords from the rasterizer. In + # our case, these pixel_coords_camera are computed by the shader and passed to + # this function to avoid re-computation. + + pixel_coords_screen, colors = _prepare_pixels_and_colors( + pixel_coords_cameras, colors, cameras, background_mask + ) # (N, H, W, K, 3) and (N, H, W, K, 4) + + occlusion_layers = _compute_occlusion_layers( + pixel_coords_screen[..., 2:3].squeeze(dim=-1) + ) # (N, H, W, 9) + + splat_colors_and_weights = _compute_splatting_colors_and_weights( + pixel_coords_screen, + colors, + blend_params.sigma, + self.offsets, + ) # (N, H, W, K, 9, 5) + + splat_colors_and_weights = _offset_splats( + splat_colors_and_weights, + self.crop_ids_h, + self.crop_ids_w, + ) # (N, H, W, K, 9, 5) + + ( + splatted_colors_per_occlusion_layer, + splatted_weights_per_occlusion_layer, + ) = _compute_splatted_colors_and_weights( + occlusion_layers, splat_colors_and_weights + ) # (N, H, W, 4, 3) and (N, H, W, 1, 3) + + output_colors = _normalize_and_compose_all_layers( + _get_background_color(blend_params, colors.device), + splatted_colors_per_occlusion_layer, + splatted_weights_per_occlusion_layer, + ) # (N, H, W, 4) + + return output_colors diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ac7e8c85d2e7c9b8f7d5e27cc1624fb632b90be2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/renderer/utils.py @@ -0,0 +1,462 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import copy +import inspect +import warnings +from typing import Any, List, Optional, Tuple, TypeVar, Union + +import numpy as np +import torch +import torch.nn as nn + +from ..common.datatypes import Device, make_device + + +class TensorAccessor(nn.Module): + """ + A helper class to be used with the __getitem__ method. This can be used for + getting/setting the values for an attribute of a class at one particular + index. This is useful when the attributes of a class are batched tensors + and one element in the batch needs to be modified. + """ + + def __init__(self, class_object, index: Union[int, slice]) -> None: + """ + Args: + class_object: this should be an instance of a class which has + attributes which are tensors representing a batch of + values. + index: int/slice, an index indicating the position in the batch. + In __setattr__ and __getattr__ only the value of class + attributes at this index will be accessed. + """ + self.__dict__["class_object"] = class_object + self.__dict__["index"] = index + + def __setattr__(self, name: str, value: Any): + """ + Update the attribute given by `name` to the value given by `value` + at the index specified by `self.index`. + + Args: + name: str, name of the attribute. + value: value to set the attribute to. + """ + v = getattr(self.class_object, name) + if not torch.is_tensor(v): + msg = "Can only set values on attributes which are tensors; got %r" + raise AttributeError(msg % type(v)) + + # Convert the attribute to a tensor if it is not a tensor. + if not torch.is_tensor(value): + value = torch.tensor( + value, device=v.device, dtype=v.dtype, requires_grad=v.requires_grad + ) + + # Check the shapes match the existing shape and the shape of the index. + if v.dim() > 1 and value.dim() > 1 and value.shape[1:] != v.shape[1:]: + msg = "Expected value to have shape %r; got %r" + raise ValueError(msg % (v.shape, value.shape)) + if ( + v.dim() == 0 + and isinstance(self.index, slice) + and len(value) != len(self.index) + ): + msg = "Expected value to have len %r; got %r" + raise ValueError(msg % (len(self.index), len(value))) + self.class_object.__dict__[name][self.index] = value + + def __getattr__(self, name: str): + """ + Return the value of the attribute given by "name" on self.class_object + at the index specified in self.index. + + Args: + name: string of the attribute name + """ + if hasattr(self.class_object, name): + return self.class_object.__dict__[name][self.index] + else: + msg = "Attribute %s not found on %r" + return AttributeError(msg % (name, self.class_object.__name__)) + + +BROADCAST_TYPES = (float, int, list, tuple, torch.Tensor, np.ndarray) + + +class TensorProperties(nn.Module): + """ + A mix-in class for storing tensors as properties with helper methods. + """ + + def __init__( + self, + dtype: torch.dtype = torch.float32, + device: Device = "cpu", + **kwargs, + ) -> None: + """ + Args: + dtype: data type to set for the inputs + device: Device (as str or torch.device) + kwargs: any number of keyword arguments. Any arguments which are + of type (float/int/list/tuple/tensor/array) are broadcasted and + other keyword arguments are set as attributes. + """ + super().__init__() + self.device = make_device(device) + self._N = 0 + if kwargs is not None: + + # broadcast all inputs which are float/int/list/tuple/tensor/array + # set as attributes anything else e.g. strings, bools + args_to_broadcast = {} + for k, v in kwargs.items(): + if v is None or isinstance(v, (str, bool)): + setattr(self, k, v) + elif isinstance(v, BROADCAST_TYPES): + args_to_broadcast[k] = v + else: + msg = "Arg %s with type %r is not broadcastable" + warnings.warn(msg % (k, type(v))) + + names = args_to_broadcast.keys() + # convert from type dict.values to tuple + values = tuple(v for v in args_to_broadcast.values()) + + if len(values) > 0: + broadcasted_values = convert_to_tensors_and_broadcast( + *values, device=device + ) + + # Set broadcasted values as attributes on self. + for i, n in enumerate(names): + setattr(self, n, broadcasted_values[i]) + if self._N == 0: + self._N = broadcasted_values[i].shape[0] + + def __len__(self) -> int: + return self._N + + def isempty(self) -> bool: + return self._N == 0 + + def __getitem__(self, index: Union[int, slice]) -> TensorAccessor: + """ + + Args: + index: an int or slice used to index all the fields. + + Returns: + if `index` is an index int/slice return a TensorAccessor class + with getattribute/setattribute methods which return/update the value + at the index in the original class. + """ + if isinstance(index, (int, slice)): + return TensorAccessor(class_object=self, index=index) + + msg = "Expected index of type int or slice; got %r" + raise ValueError(msg % type(index)) + + # pyre-fixme[14]: `to` overrides method defined in `Module` inconsistently. + def to(self, device: Device = "cpu") -> "TensorProperties": + """ + In place operation to move class properties which are tensors to a + specified device. If self has a property "device", update this as well. + """ + device_ = make_device(device) + for k in dir(self): + v = getattr(self, k) + if k == "device": + setattr(self, k, device_) + if torch.is_tensor(v) and v.device != device_: + setattr(self, k, v.to(device_)) + return self + + def cpu(self) -> "TensorProperties": + return self.to("cpu") + + # pyre-fixme[14]: `cuda` overrides method defined in `Module` inconsistently. + def cuda(self, device: Optional[int] = None) -> "TensorProperties": + return self.to(f"cuda:{device}" if device is not None else "cuda") + + def clone(self, other) -> "TensorProperties": + """ + Update the tensor properties of other with the cloned properties of self. + """ + for k in dir(self): + v = getattr(self, k) + if inspect.ismethod(v) or k.startswith("__") or type(v) is TypeVar: + continue + if torch.is_tensor(v): + v_clone = v.clone() + else: + v_clone = copy.deepcopy(v) + setattr(other, k, v_clone) + return other + + def gather_props(self, batch_idx) -> "TensorProperties": + """ + This is an in place operation to reformat all tensor class attributes + based on a set of given indices using torch.gather. This is useful when + attributes which are batched tensors e.g. shape (N, 3) need to be + multiplied with another tensor which has a different first dimension + e.g. packed vertices of shape (V, 3). + + Example + + .. code-block:: python + + self.specular_color = (N, 3) tensor of specular colors for each mesh + + A lighting calculation may use + + .. code-block:: python + + verts_packed = meshes.verts_packed() # (V, 3) + + To multiply these two tensors the batch dimension needs to be the same. + To achieve this we can do + + .. code-block:: python + + batch_idx = meshes.verts_packed_to_mesh_idx() # (V) + + This gives index of the mesh for each vertex in verts_packed. + + .. code-block:: python + + self.gather_props(batch_idx) + self.specular_color = (V, 3) tensor with the specular color for + each packed vertex. + + torch.gather requires the index tensor to have the same shape as the + input tensor so this method takes care of the reshaping of the index + tensor to use with class attributes with arbitrary dimensions. + + Args: + batch_idx: shape (B, ...) where `...` represents an arbitrary + number of dimensions + + Returns: + self with all properties reshaped. e.g. a property with shape (N, 3) + is transformed to shape (B, 3). + """ + # Iterate through the attributes of the class which are tensors. + for k in dir(self): + v = getattr(self, k) + if torch.is_tensor(v): + if v.shape[0] > 1: + # There are different values for each batch element + # so gather these using the batch_idx. + # First clone the input batch_idx tensor before + # modifying it. + _batch_idx = batch_idx.clone() + idx_dims = _batch_idx.shape + tensor_dims = v.shape + if len(idx_dims) > len(tensor_dims): + msg = "batch_idx cannot have more dimensions than %s. " + msg += "got shape %r and %s has shape %r" + raise ValueError(msg % (k, idx_dims, k, tensor_dims)) + if idx_dims != tensor_dims: + # To use torch.gather the index tensor (_batch_idx) has + # to have the same shape as the input tensor. + new_dims = len(tensor_dims) - len(idx_dims) + new_shape = idx_dims + (1,) * new_dims + # pyre-fixme[58]: `+` is not supported for operand types + # `Tuple[int]` and `torch._C.Size` + expand_dims = (-1,) + tensor_dims[1:] + _batch_idx = _batch_idx.view(*new_shape) + _batch_idx = _batch_idx.expand(*expand_dims) + + v = v.gather(0, _batch_idx) + setattr(self, k, v) + return self + + +def format_tensor( + input, + dtype: torch.dtype = torch.float32, + device: Device = "cpu", +) -> torch.Tensor: + """ + Helper function for converting a scalar value to a tensor. + + Args: + input: Python scalar, Python list/tuple, torch scalar, 1D torch tensor + dtype: data type for the input + device: Device (as str or torch.device) on which the tensor should be placed. + + Returns: + input_vec: torch tensor with optional added batch dimension. + """ + device_ = make_device(device) + if not torch.is_tensor(input): + input = torch.tensor(input, dtype=dtype, device=device_) + + if input.dim() == 0: + input = input.view(1) + + if input.device == device_: + return input + + input = input.to(device=device) + return input + + +def convert_to_tensors_and_broadcast( + *args, + dtype: torch.dtype = torch.float32, + device: Device = "cpu", +): + """ + Helper function to handle parsing an arbitrary number of inputs (*args) + which all need to have the same batch dimension. + The output is a list of tensors. + + Args: + *args: an arbitrary number of inputs + Each of the values in `args` can be one of the following + - Python scalar + - Torch scalar + - Torch tensor of shape (N, K_i) or (1, K_i) where K_i are + an arbitrary number of dimensions which can vary for each + value in args. In this case each input is broadcast to a + tensor of shape (N, K_i) + dtype: data type to use when creating new tensors. + device: torch device on which the tensors should be placed. + + Output: + args: A list of tensors of shape (N, K_i) + """ + # Convert all inputs to tensors with a batch dimension + args_1d = [format_tensor(c, dtype, device) for c in args] + + # Find broadcast size + sizes = [c.shape[0] for c in args_1d] + N = max(sizes) + + args_Nd = [] + for c in args_1d: + if c.shape[0] != 1 and c.shape[0] != N: + msg = "Got non-broadcastable sizes %r" % sizes + raise ValueError(msg) + + # Expand broadcast dim and keep non broadcast dims the same size + expand_sizes = (N,) + (-1,) * len(c.shape[1:]) + args_Nd.append(c.expand(*expand_sizes)) + + return args_Nd + + +def ndc_grid_sample( + input: torch.Tensor, + grid_ndc: torch.Tensor, + *, + align_corners: bool = False, + **grid_sample_kwargs, +) -> torch.Tensor: + """ + Samples a tensor `input` of shape `(B, dim, H, W)` at 2D locations + specified by a tensor `grid_ndc` of shape `(B, ..., 2)` using + the `torch.nn.functional.grid_sample` function. + `grid_ndc` is specified in PyTorch3D NDC coordinate frame. + + Args: + input: The tensor of shape `(B, dim, H, W)` to be sampled. + grid_ndc: A tensor of shape `(B, ..., 2)` denoting the set of + 2D locations at which `input` is sampled. + See [1] for a detailed description of the NDC coordinates. + align_corners: Forwarded to the `torch.nn.functional.grid_sample` + call. See its docstring. + grid_sample_kwargs: Additional arguments forwarded to the + `torch.nn.functional.grid_sample` call. See the corresponding + docstring for a listing of the corresponding arguments. + + Returns: + sampled_input: A tensor of shape `(B, dim, ...)` containing the samples + of `input` at 2D locations `grid_ndc`. + + References: + [1] https://pytorch3d.org/docs/cameras + """ + + batch, *spatial_size, pt_dim = grid_ndc.shape + if batch != input.shape[0]: + raise ValueError("'input' and 'grid_ndc' have to have the same batch size.") + if input.ndim != 4: + raise ValueError("'input' has to be a 4-dimensional Tensor.") + if pt_dim != 2: + raise ValueError("The last dimension of 'grid_ndc' has to be == 2.") + + grid_ndc_flat = grid_ndc.reshape(batch, -1, 1, 2) + + # pyre-fixme[6]: For 2nd param expected `Tuple[int, int]` but got `Size`. + grid_flat = ndc_to_grid_sample_coords(grid_ndc_flat, input.shape[2:]) + + sampled_input_flat = torch.nn.functional.grid_sample( + input, grid_flat, align_corners=align_corners, **grid_sample_kwargs + ) + + sampled_input = sampled_input_flat.reshape([batch, input.shape[1], *spatial_size]) + + return sampled_input + + +def ndc_to_grid_sample_coords( + xy_ndc: torch.Tensor, + image_size_hw: Tuple[int, int], +) -> torch.Tensor: + """ + Convert from the PyTorch3D's NDC coordinates to + `torch.nn.functional.grid_sampler`'s coordinates. + + Args: + xy_ndc: Tensor of shape `(..., 2)` containing 2D points in the + PyTorch3D's NDC coordinates. + image_size_hw: A tuple `(image_height, image_width)` denoting the + height and width of the image tensor to sample. + Returns: + xy_grid_sample: Tensor of shape `(..., 2)` containing 2D points in the + `torch.nn.functional.grid_sample` coordinates. + """ + if len(image_size_hw) != 2 or any(s <= 0 for s in image_size_hw): + raise ValueError("'image_size_hw' has to be a 2-tuple of positive integers") + aspect = min(image_size_hw) / max(image_size_hw) + xy_grid_sample = -xy_ndc # first negate the coords + if image_size_hw[0] >= image_size_hw[1]: + xy_grid_sample[..., 1] *= aspect + else: + xy_grid_sample[..., 0] *= aspect + return xy_grid_sample + + +def parse_image_size( + image_size: Union[List[int], Tuple[int, int], int] +) -> Tuple[int, int]: + """ + Args: + image_size: A single int (for square images) or a tuple/list of two ints. + + Returns: + A tuple of two ints. + + Throws: + ValueError if got more than two ints, any negative numbers or non-ints. + """ + if not isinstance(image_size, (tuple, list)): + return (image_size, image_size) + if len(image_size) != 2: + raise ValueError("Image size can only be a tuple/list of (H, W)") + if not all(i > 0 for i in image_size): + raise ValueError("Image sizes must be greater than 0; got %d, %d" % image_size) + if not all(isinstance(i, int) for i in image_size): + raise ValueError("Image sizes must be integers; got %f, %f" % image_size) + return tuple(image_size) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7ed0194a4fbfa2d68bb113c96e1c0d4e70c4debf --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .meshes import join_meshes_as_batch, join_meshes_as_scene, Meshes +from .pointclouds import ( + join_pointclouds_as_batch, + join_pointclouds_as_scene, + Pointclouds, +) +from .utils import list_to_packed, list_to_padded, packed_to_list, padded_to_list +from .volumes import Volumes + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0b196c18b29c6c09a37089a1dbaa8caf7096a8e Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/__init__.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/meshes.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/meshes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97f9c90ee9d0ee2403bdd7b73f3dcc3cc12ae45c Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/meshes.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/pointclouds.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/pointclouds.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23f1d152c63ab1992fc20c0ee4b2f5e09eec8278 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/pointclouds.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/utils.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7d839d9f114e723a60788aacbe695324f336be2 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/utils.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/volumes.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/volumes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..267f46cf2d92d9a7bfff6a074c3ddd5f1a2d0d05 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/__pycache__/volumes.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/meshes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/meshes.py new file mode 100644 index 0000000000000000000000000000000000000000..53f3f454aa663ace0246bcecc2a4ff268e2d646a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/meshes.py @@ -0,0 +1,1752 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import List, Union + +import torch + +from ..common.datatypes import Device, make_device +from . import utils as struct_utils + + +class Meshes: + """ + This class provides functions for working with batches of triangulated + meshes with varying numbers of faces and vertices, and converting between + representations. + + Within Meshes, there are three different representations of the faces and + verts data: + + List + - only used for input as a starting point to convert to other representations. + Padded + - has specific batch dimension. + Packed + - no batch dimension. + - has auxiliary variables used to index into the padded representation. + + Example: + + Input list of verts V_n = [[V_1], [V_2], ... , [V_N]] + where V_1, ... , V_N are the number of verts in each mesh and N is the + number of meshes. + + Input list of faces F_n = [[F_1], [F_2], ... , [F_N]] + where F_1, ... , F_N are the number of faces in each mesh. + + # SPHINX IGNORE + List | Padded | Packed + ---------------------------|-------------------------|------------------------ + [[V_1], ... , [V_N]] | size = (N, max(V_n), 3) | size = (sum(V_n), 3) + | | + Example for verts: | | + | | + V_1 = 3, V_2 = 4, V_3 = 5 | size = (3, 5, 3) | size = (12, 3) + | | + List([ | tensor([ | tensor([ + [ | [ | [0.1, 0.3, 0.5], + [0.1, 0.3, 0.5], | [0.1, 0.3, 0.5], | [0.5, 0.2, 0.1], + [0.5, 0.2, 0.1], | [0.5, 0.2, 0.1], | [0.6, 0.8, 0.7], + [0.6, 0.8, 0.7], | [0.6, 0.8, 0.7], | [0.1, 0.3, 0.3], + ], | [0, 0, 0], | [0.6, 0.7, 0.8], + [ | [0, 0, 0], | [0.2, 0.3, 0.4], + [0.1, 0.3, 0.3], | ], | [0.1, 0.5, 0.3], + [0.6, 0.7, 0.8], | [ | [0.7, 0.3, 0.6], + [0.2, 0.3, 0.4], | [0.1, 0.3, 0.3], | [0.2, 0.4, 0.8], + [0.1, 0.5, 0.3], | [0.6, 0.7, 0.8], | [0.9, 0.5, 0.2], + ], | [0.2, 0.3, 0.4], | [0.2, 0.3, 0.4], + [ | [0.1, 0.5, 0.3], | [0.9, 0.3, 0.8], + [0.7, 0.3, 0.6], | [0, 0, 0], | ]) + [0.2, 0.4, 0.8], | ], | + [0.9, 0.5, 0.2], | [ | + [0.2, 0.3, 0.4], | [0.7, 0.3, 0.6], | + [0.9, 0.3, 0.8], | [0.2, 0.4, 0.8], | + ] | [0.9, 0.5, 0.2], | + ]) | [0.2, 0.3, 0.4], | + | [0.9, 0.3, 0.8], | + | ] | + | ]) | + Example for faces: | | + | | + F_1 = 1, F_2 = 2, F_3 = 7 | size = (3, 7, 3) | size = (10, 3) + | | + List([ | tensor([ | tensor([ + [ | [ | [ 0, 1, 2], + [0, 1, 2], | [0, 1, 2], | [ 3, 4, 5], + ], | [-1, -1, -1], | [ 4, 5, 6], + [ | [-1, -1, -1] | [ 8, 9, 7], + [0, 1, 2], | [-1, -1, -1] | [ 7, 8, 10], + [1, 2, 3], | [-1, -1, -1] | [ 9, 10, 8], + ], | [-1, -1, -1], | [11, 10, 9], + [ | [-1, -1, -1], | [11, 7, 8], + [1, 2, 0], | ], | [11, 10, 8], + [0, 1, 3], | [ | [11, 9, 8], + [2, 3, 1], | [0, 1, 2], | ]) + [4, 3, 2], | [1, 2, 3], | + [4, 0, 1], | [-1, -1, -1], | + [4, 3, 1], | [-1, -1, -1], | + [4, 2, 1], | [-1, -1, -1], | + ], | [-1, -1, -1], | + ]) | [-1, -1, -1], | + | ], | + | [ | + | [1, 2, 0], | + | [0, 1, 3], | + | [2, 3, 1], | + | [4, 3, 2], | + | [4, 0, 1], | + | [4, 3, 1], | + | [4, 2, 1], | + | ] | + | ]) | + ----------------------------------------------------------------------------- + + Auxiliary variables for packed representation + + Name | Size | Example from above + -------------------------------|---------------------|----------------------- + | | + verts_packed_to_mesh_idx | size = (sum(V_n)) | tensor([ + | | 0, 0, 0, 1, 1, 1, + | | 1, 2, 2, 2, 2, 2 + | | )] + | | size = (12) + | | + mesh_to_verts_packed_first_idx | size = (N) | tensor([0, 3, 7]) + | | size = (3) + | | + num_verts_per_mesh | size = (N) | tensor([3, 4, 5]) + | | size = (3) + | | + faces_packed_to_mesh_idx | size = (sum(F_n)) | tensor([ + | | 0, 1, 1, 2, 2, 2, + | | 2, 2, 2, 2 + | | )] + | | size = (10) + | | + mesh_to_faces_packed_first_idx | size = (N) | tensor([0, 1, 3]) + | | size = (3) + | | + num_faces_per_mesh | size = (N) | tensor([1, 2, 7]) + | | size = (3) + | | + verts_padded_to_packed_idx | size = (sum(V_n)) | tensor([ + | | 0, 1, 2, 5, 6, 7, + | | 8, 10, 11, 12, 13, + | | 14 + | | )] + | | size = (12) + ----------------------------------------------------------------------------- + # SPHINX IGNORE + + From the faces, edges are computed and have packed and padded + representations with auxiliary variables. + + E_n = [[E_1], ... , [E_N]] + where E_1, ... , E_N are the number of unique edges in each mesh. + Total number of unique edges = sum(E_n) + + # SPHINX IGNORE + Name | Size | Example from above + -------------------------------|-------------------------|---------------------- + | | + edges_packed | size = (sum(E_n), 2) | tensor([ + | | [0, 1], + | | [0, 2], + | | [1, 2], + | | ... + | | [10, 11], + | | )] + | | size = (18, 2) + | | + num_edges_per_mesh | size = (N) | tensor([3, 5, 10]) + | | size = (3) + | | + edges_packed_to_mesh_idx | size = (sum(E_n)) | tensor([ + | | 0, 0, 0, + | | . . . + | | 2, 2, 2 + | | ]) + | | size = (18) + | | + faces_packed_to_edges_packed | size = (sum(F_n), 3) | tensor([ + | | [2, 1, 0], + | | [5, 4, 3], + | | . . . + | | [12, 14, 16], + | | ]) + | | size = (10, 3) + | | + mesh_to_edges_packed_first_idx | size = (N) | tensor([0, 3, 8]) + | | size = (3) + ---------------------------------------------------------------------------- + # SPHINX IGNORE + """ + + _INTERNAL_TENSORS = [ + "_verts_packed", + "_verts_packed_to_mesh_idx", + "_mesh_to_verts_packed_first_idx", + "_verts_padded", + "_num_verts_per_mesh", + "_faces_packed", + "_faces_packed_to_mesh_idx", + "_mesh_to_faces_packed_first_idx", + "_faces_padded", + "_faces_areas_packed", + "_verts_normals_packed", + "_faces_normals_packed", + "_num_faces_per_mesh", + "_edges_packed", + "_edges_packed_to_mesh_idx", + "_mesh_to_edges_packed_first_idx", + "_faces_packed_to_edges_packed", + "_num_edges_per_mesh", + "_verts_padded_to_packed_idx", + "_laplacian_packed", + "valid", + "equisized", + ] + + def __init__( + self, + verts, + faces, + textures=None, + *, + verts_normals=None, + ) -> None: + """ + Args: + verts: + Can be either + + - List where each element is a tensor of shape (num_verts, 3) + containing the (x, y, z) coordinates of each vertex. + - Padded float tensor with shape (num_meshes, max_num_verts, 3). + Meshes should be padded with fill value of 0 so they all have + the same number of vertices. + faces: + Can be either + + - List where each element is a tensor of shape (num_faces, 3) + containing the indices of the 3 vertices in the corresponding + mesh in verts which form the triangular face. + - Padded long tensor of shape (num_meshes, max_num_faces, 3). + Meshes should be padded with fill value of -1 so they have + the same number of faces. + textures: Optional instance of the Textures class with mesh + texture properties. + verts_normals: + Optional. Can be either + + - List where each element is a tensor of shape (num_verts, 3) + containing the normals of each vertex. + - Padded float tensor with shape (num_meshes, max_num_verts, 3). + They should be padded with fill value of 0 so they all have + the same number of vertices. + Note that modifying the mesh later, e.g. with offset_verts_, + can cause these normals to be forgotten and normals to be recalculated + based on the new vertex positions. + + Refer to comments above for descriptions of List and Padded representations. + """ + self.device = torch.device("cpu") + if textures is not None and not hasattr(textures, "sample_textures"): + msg = "Expected textures to be an instance of type TexturesBase; got %r" + raise ValueError(msg % type(textures)) + + self.textures = textures + + # Indicates whether the meshes in the list/batch have the same number + # of faces and vertices. + self.equisized = False + + # Boolean indicator for each mesh in the batch + # True if mesh has non zero number of verts and face, False otherwise. + self.valid = None + + self._N = 0 # batch size (number of meshes) + self._V = 0 # (max) number of vertices per mesh + self._F = 0 # (max) number of faces per mesh + + # List of Tensors of verts and faces. + self._verts_list = None + self._faces_list = None + + # Packed representation for verts. + self._verts_packed = None # (sum(V_n), 3) + self._verts_packed_to_mesh_idx = None # sum(V_n) + + # Index to convert verts from flattened padded to packed + self._verts_padded_to_packed_idx = None # N * max_V + + # Index of each mesh's first vert in the packed verts. + # Assumes packing is sequential. + self._mesh_to_verts_packed_first_idx = None # N + + # Packed representation for faces. + self._faces_packed = None # (sum(F_n), 3) + self._faces_packed_to_mesh_idx = None # sum(F_n) + + # Index of each mesh's first face in packed faces. + # Assumes packing is sequential. + self._mesh_to_faces_packed_first_idx = None # N + + # Packed representation of edges sorted by index of the first vertex + # in the edge. Edges can be shared between faces in a mesh. + self._edges_packed = None # (sum(E_n), 2) + + # Map from packed edges to corresponding mesh index. + self._edges_packed_to_mesh_idx = None # sum(E_n) + self._num_edges_per_mesh = None # N + self._mesh_to_edges_packed_first_idx = None # N + + # Map from packed faces to packed edges. This represents the index of + # the edge opposite the vertex for each vertex in the face. E.g. + # + # v0 + # /\ + # / \ + # e1 / \ e2 + # / \ + # /________\ + # v2 e0 v1 + # + # Face (v0, v1, v2) => Edges (e0, e1, e2) + self._faces_packed_to_edges_packed = None # (sum(F_n), 3) + + # Padded representation of verts. + self._verts_padded = None # (N, max(V_n), 3) + self._num_verts_per_mesh = None # N + + # Padded representation of faces. + self._faces_padded = None # (N, max(F_n), 3) + self._num_faces_per_mesh = None # N + + # Face areas + self._faces_areas_packed = None + + # Normals + self._verts_normals_packed = None + self._faces_normals_packed = None + + # Packed representation of Laplacian Matrix + self._laplacian_packed = None + + # Identify type of verts and faces. + if isinstance(verts, list) and isinstance(faces, list): + self._verts_list = verts + self._faces_list = [ + f[f.gt(-1).all(1)].to(torch.int64) if len(f) > 0 else f for f in faces + ] + self._N = len(self._verts_list) + self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device) + if self._N > 0: + self.device = self._verts_list[0].device + if not ( + all(v.device == self.device for v in verts) + and all(f.device == self.device for f in faces) + ): + raise ValueError( + "All Verts and Faces tensors should be on same device." + ) + self._num_verts_per_mesh = torch.tensor( + [len(v) for v in self._verts_list], device=self.device + ) + self._V = int(self._num_verts_per_mesh.max()) + self._num_faces_per_mesh = torch.tensor( + [len(f) for f in self._faces_list], device=self.device + ) + self._F = int(self._num_faces_per_mesh.max()) + self.valid = torch.tensor( + [ + len(v) > 0 and len(f) > 0 + for (v, f) in zip(self._verts_list, self._faces_list) + ], + dtype=torch.bool, + device=self.device, + ) + if (len(self._num_verts_per_mesh.unique()) == 1) and ( + len(self._num_faces_per_mesh.unique()) == 1 + ): + self.equisized = True + + elif torch.is_tensor(verts) and torch.is_tensor(faces): + if verts.size(2) != 3 or faces.size(2) != 3: + raise ValueError("Verts or Faces tensors have incorrect dimensions.") + self._verts_padded = verts + self._faces_padded = faces.to(torch.int64) + self._N = self._verts_padded.shape[0] + self._V = self._verts_padded.shape[1] + + if verts.device != faces.device: + msg = "Verts and Faces tensors should be on same device. \n Got {} and {}." + raise ValueError(msg.format(verts.device, faces.device)) + + self.device = self._verts_padded.device + self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device) + if self._N > 0: + # Check that padded faces - which have value -1 - are at the + # end of the tensors + faces_not_padded = self._faces_padded.gt(-1).all(2) + self._num_faces_per_mesh = faces_not_padded.sum(1) + if (faces_not_padded[:, :-1] < faces_not_padded[:, 1:]).any(): + raise ValueError("Padding of faces must be at the end") + + # NOTE that we don't check for the ordering of padded verts + # as long as the faces index correspond to the right vertices. + + self.valid = self._num_faces_per_mesh > 0 + self._F = int(self._num_faces_per_mesh.max()) + if len(self._num_faces_per_mesh.unique()) == 1: + self.equisized = True + + self._num_verts_per_mesh = torch.full( + size=(self._N,), + fill_value=self._V, + dtype=torch.int64, + device=self.device, + ) + + else: + raise ValueError( + "Verts and Faces must be either a list or a tensor with \ + shape (batch_size, N, 3) where N is either the maximum \ + number of verts or faces respectively." + ) + + if self.isempty(): + self._num_verts_per_mesh = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + self._num_faces_per_mesh = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + + # Set the num verts/faces on the textures if present. + if textures is not None: + shape_ok = self.textures.check_shapes(self._N, self._V, self._F) + if not shape_ok: + msg = "Textures do not match the dimensions of Meshes." + raise ValueError(msg) + + self.textures._num_faces_per_mesh = self._num_faces_per_mesh.tolist() + self.textures._num_verts_per_mesh = self._num_verts_per_mesh.tolist() + self.textures.valid = self.valid + + if verts_normals is not None: + self._set_verts_normals(verts_normals) + + def _set_verts_normals(self, verts_normals) -> None: + if isinstance(verts_normals, list): + if len(verts_normals) != self._N: + raise ValueError("Invalid verts_normals input") + + for item, n_verts in zip(verts_normals, self._num_verts_per_mesh): + if ( + not isinstance(item, torch.Tensor) + or item.ndim != 2 + or item.shape[1] != 3 + or item.shape[0] != n_verts + ): + raise ValueError("Invalid verts_normals input") + self._verts_normals_packed = torch.cat(verts_normals, 0) + elif torch.is_tensor(verts_normals): + if ( + verts_normals.ndim != 3 + or verts_normals.size(2) != 3 + or verts_normals.size(0) != self._N + ): + raise ValueError("Vertex normals tensor has incorrect dimensions.") + self._verts_normals_packed = struct_utils.padded_to_packed( + verts_normals, split_size=self._num_verts_per_mesh.tolist() + ) + else: + raise ValueError("verts_normals must be a list or tensor") + + def __len__(self) -> int: + return self._N + + def __getitem__( + self, index: Union[int, List[int], slice, torch.BoolTensor, torch.LongTensor] + ) -> "Meshes": + """ + Args: + index: Specifying the index of the mesh to retrieve. + Can be an int, slice, list of ints or a boolean tensor. + + Returns: + Meshes object with selected meshes. The mesh tensors are not cloned. + """ + if isinstance(index, (int, slice)): + verts = self.verts_list()[index] + faces = self.faces_list()[index] + elif isinstance(index, list): + verts = [self.verts_list()[i] for i in index] + faces = [self.faces_list()[i] for i in index] + elif isinstance(index, torch.Tensor): + if index.dim() != 1 or index.dtype.is_floating_point: + raise IndexError(index) + # NOTE consider converting index to cpu for efficiency + if index.dtype == torch.bool: + # advanced indexing on a single dimension + index = index.nonzero() + index = index.squeeze(1) if index.numel() > 0 else index + index = index.tolist() + verts = [self.verts_list()[i] for i in index] + faces = [self.faces_list()[i] for i in index] + else: + raise IndexError(index) + + textures = None if self.textures is None else self.textures[index] + + if torch.is_tensor(verts) and torch.is_tensor(faces): + return self.__class__(verts=[verts], faces=[faces], textures=textures) + elif isinstance(verts, list) and isinstance(faces, list): + return self.__class__(verts=verts, faces=faces, textures=textures) + else: + raise ValueError("(verts, faces) not defined correctly") + + def isempty(self) -> bool: + """ + Checks whether any mesh is valid. + + Returns: + bool indicating whether there is any data. + """ + return self._N == 0 or self.valid.eq(False).all() + + def verts_list(self): + """ + Get the list representation of the vertices. + + Returns: + list of tensors of vertices of shape (V_n, 3). + """ + if self._verts_list is None: + assert ( + self._verts_padded is not None + ), "verts_padded is required to compute verts_list." + self._verts_list = struct_utils.padded_to_list( + self._verts_padded, self.num_verts_per_mesh().tolist() + ) + return self._verts_list + + def faces_list(self): + """ + Get the list representation of the faces. + + Returns: + list of tensors of faces of shape (F_n, 3). + """ + if self._faces_list is None: + assert ( + self._faces_padded is not None + ), "faces_padded is required to compute faces_list." + self._faces_list = struct_utils.padded_to_list( + self._faces_padded, self.num_faces_per_mesh().tolist() + ) + return self._faces_list + + def verts_packed(self): + """ + Get the packed representation of the vertices. + + Returns: + tensor of vertices of shape (sum(V_n), 3). + """ + self._compute_packed() + return self._verts_packed + + def verts_packed_to_mesh_idx(self): + """ + Return a 1D tensor with the same first dimension as verts_packed. + verts_packed_to_mesh_idx[i] gives the index of the mesh which contains + verts_packed[i]. + + Returns: + 1D tensor of indices. + """ + self._compute_packed() + return self._verts_packed_to_mesh_idx + + def mesh_to_verts_packed_first_idx(self): + """ + Return a 1D tensor x with length equal to the number of meshes such that + the first vertex of the ith mesh is verts_packed[x[i]]. + + Returns: + 1D tensor of indices of first items. + """ + self._compute_packed() + return self._mesh_to_verts_packed_first_idx + + def num_verts_per_mesh(self): + """ + Return a 1D tensor x with length equal to the number of meshes giving + the number of vertices in each mesh. + + Returns: + 1D tensor of sizes. + """ + return self._num_verts_per_mesh + + def faces_packed(self): + """ + Get the packed representation of the faces. + Faces are given by the indices of the three vertices in verts_packed. + + Returns: + tensor of faces of shape (sum(F_n), 3). + """ + self._compute_packed() + return self._faces_packed + + def faces_packed_to_mesh_idx(self): + """ + Return a 1D tensor with the same first dimension as faces_packed. + faces_packed_to_mesh_idx[i] gives the index of the mesh which contains + faces_packed[i]. + + Returns: + 1D tensor of indices. + """ + self._compute_packed() + return self._faces_packed_to_mesh_idx + + def mesh_to_faces_packed_first_idx(self): + """ + Return a 1D tensor x with length equal to the number of meshes such that + the first face of the ith mesh is faces_packed[x[i]]. + + Returns: + 1D tensor of indices of first items. + """ + self._compute_packed() + return self._mesh_to_faces_packed_first_idx + + def verts_padded(self): + """ + Get the padded representation of the vertices. + + Returns: + tensor of vertices of shape (N, max(V_n), 3). + """ + self._compute_padded() + return self._verts_padded + + def faces_padded(self): + """ + Get the padded representation of the faces. + + Returns: + tensor of faces of shape (N, max(F_n), 3). + """ + self._compute_padded() + return self._faces_padded + + def num_faces_per_mesh(self): + """ + Return a 1D tensor x with length equal to the number of meshes giving + the number of faces in each mesh. + + Returns: + 1D tensor of sizes. + """ + return self._num_faces_per_mesh + + def edges_packed(self): + """ + Get the packed representation of the edges. + + Returns: + tensor of edges of shape (sum(E_n), 2). + """ + self._compute_edges_packed() + return self._edges_packed + + def edges_packed_to_mesh_idx(self): + """ + Return a 1D tensor with the same first dimension as edges_packed. + edges_packed_to_mesh_idx[i] gives the index of the mesh which contains + edges_packed[i]. + + Returns: + 1D tensor of indices. + """ + self._compute_edges_packed() + return self._edges_packed_to_mesh_idx + + def mesh_to_edges_packed_first_idx(self): + """ + Return a 1D tensor x with length equal to the number of meshes such that + the first edge of the ith mesh is edges_packed[x[i]]. + + Returns: + 1D tensor of indices of first items. + """ + self._compute_edges_packed() + return self._mesh_to_edges_packed_first_idx + + def faces_packed_to_edges_packed(self): + """ + Get the packed representation of the faces in terms of edges. + Faces are given by the indices of the three edges in + the packed representation of the edges. + + Returns: + tensor of faces of shape (sum(F_n), 3). + """ + self._compute_edges_packed() + return self._faces_packed_to_edges_packed + + def num_edges_per_mesh(self): + """ + Return a 1D tensor x with length equal to the number of meshes giving + the number of edges in each mesh. + + Returns: + 1D tensor of sizes. + """ + self._compute_edges_packed() + return self._num_edges_per_mesh + + def verts_padded_to_packed_idx(self): + """ + Return a 1D tensor x with length equal to the total number of vertices + such that verts_packed()[i] is element x[i] of the flattened padded + representation. + The packed representation can be calculated as follows. + + .. code-block:: python + + p = verts_padded().reshape(-1, 3) + verts_packed = p[x] + + Returns: + 1D tensor of indices. + """ + if self._verts_padded_to_packed_idx is not None: + return self._verts_padded_to_packed_idx + + self._verts_padded_to_packed_idx = torch.cat( + [ + torch.arange(v, dtype=torch.int64, device=self.device) + i * self._V + for (i, v) in enumerate(self.num_verts_per_mesh()) + ], + dim=0, + ) + return self._verts_padded_to_packed_idx + + def has_verts_normals(self) -> bool: + """ + Check whether vertex normals are already present. + """ + return self._verts_normals_packed is not None + + def verts_normals_packed(self): + """ + Get the packed representation of the vertex normals. + + Returns: + tensor of normals of shape (sum(V_n), 3). + """ + self._compute_vertex_normals() + return self._verts_normals_packed + + def verts_normals_list(self): + """ + Get the list representation of the vertex normals. + + Returns: + list of tensors of normals of shape (V_n, 3). + """ + if self.isempty(): + return [ + torch.empty((0, 3), dtype=torch.float32, device=self.device) + ] * self._N + verts_normals_packed = self.verts_normals_packed() + split_size = self.num_verts_per_mesh().tolist() + return struct_utils.packed_to_list(verts_normals_packed, split_size) + + def verts_normals_padded(self): + """ + Get the padded representation of the vertex normals. + + Returns: + tensor of normals of shape (N, max(V_n), 3). + """ + if self.isempty(): + return torch.zeros((self._N, 0, 3), dtype=torch.float32, device=self.device) + verts_normals_list = self.verts_normals_list() + return struct_utils.list_to_padded( + verts_normals_list, (self._V, 3), pad_value=0.0, equisized=self.equisized + ) + + def faces_normals_packed(self): + """ + Get the packed representation of the face normals. + + Returns: + tensor of normals of shape (sum(F_n), 3). + """ + self._compute_face_areas_normals() + return self._faces_normals_packed + + def faces_normals_list(self): + """ + Get the list representation of the face normals. + + Returns: + list of tensors of normals of shape (F_n, 3). + """ + if self.isempty(): + return [ + torch.empty((0, 3), dtype=torch.float32, device=self.device) + ] * self._N + faces_normals_packed = self.faces_normals_packed() + split_size = self.num_faces_per_mesh().tolist() + return struct_utils.packed_to_list(faces_normals_packed, split_size) + + def faces_normals_padded(self): + """ + Get the padded representation of the face normals. + + Returns: + tensor of normals of shape (N, max(F_n), 3). + """ + if self.isempty(): + return torch.zeros((self._N, 0, 3), dtype=torch.float32, device=self.device) + faces_normals_list = self.faces_normals_list() + return struct_utils.list_to_padded( + faces_normals_list, (self._F, 3), pad_value=0.0, equisized=self.equisized + ) + + def faces_areas_packed(self): + """ + Get the packed representation of the face areas. + + Returns: + tensor of areas of shape (sum(F_n),). + """ + self._compute_face_areas_normals() + return self._faces_areas_packed + + def laplacian_packed(self): + self._compute_laplacian_packed() + return self._laplacian_packed + + def _compute_face_areas_normals(self, refresh: bool = False): + """ + Compute the area and normal of each face in faces_packed. + The convention of a normal for a face consisting of verts [v0, v1, v2] + is normal = (v1 - v0) x (v2 - v0) + + Args: + refresh: Set to True to force recomputation of face areas. + Default: False. + """ + from ..ops.mesh_face_areas_normals import mesh_face_areas_normals + + if not ( + refresh + or any( + v is None + for v in [self._faces_areas_packed, self._faces_normals_packed] + ) + ): + return + faces_packed = self.faces_packed() + verts_packed = self.verts_packed() + face_areas, face_normals = mesh_face_areas_normals(verts_packed, faces_packed) + self._faces_areas_packed = face_areas + self._faces_normals_packed = face_normals + + def _compute_vertex_normals(self, refresh: bool = False): + """Computes the packed version of vertex normals from the packed verts + and faces. This assumes verts are shared between faces. The normal for + a vertex is computed as the sum of the normals of all the faces it is + part of weighed by the face areas. + + Args: + refresh: Set to True to force recomputation of vertex normals. + Default: False. + """ + if not (refresh or any(v is None for v in [self._verts_normals_packed])): + return + + if self.isempty(): + self._verts_normals_packed = torch.zeros( + (self._N, 3), dtype=torch.int64, device=self.device + ) + else: + faces_packed = self.faces_packed() + verts_packed = self.verts_packed() + verts_normals = torch.zeros_like(verts_packed) + vertices_faces = verts_packed[faces_packed] + + faces_normals = torch.cross( + vertices_faces[:, 2] - vertices_faces[:, 1], + vertices_faces[:, 0] - vertices_faces[:, 1], + dim=1, + ) + + # NOTE: this is already applying the area weighting as the magnitude + # of the cross product is 2 x area of the triangle. + verts_normals = verts_normals.index_add( + 0, faces_packed[:, 0], faces_normals + ) + verts_normals = verts_normals.index_add( + 0, faces_packed[:, 1], faces_normals + ) + verts_normals = verts_normals.index_add( + 0, faces_packed[:, 2], faces_normals + ) + + self._verts_normals_packed = torch.nn.functional.normalize( + verts_normals, eps=1e-6, dim=1 + ) + + def _compute_padded(self, refresh: bool = False): + """ + Computes the padded version of meshes from verts_list and faces_list. + """ + if not ( + refresh or any(v is None for v in [self._verts_padded, self._faces_padded]) + ): + return + + verts_list = self.verts_list() + faces_list = self.faces_list() + assert ( + faces_list is not None and verts_list is not None + ), "faces_list and verts_list arguments are required" + + if self.isempty(): + self._faces_padded = torch.zeros( + (self._N, 0, 3), dtype=torch.int64, device=self.device + ) + self._verts_padded = torch.zeros( + (self._N, 0, 3), dtype=torch.float32, device=self.device + ) + else: + self._faces_padded = struct_utils.list_to_padded( + faces_list, (self._F, 3), pad_value=-1.0, equisized=self.equisized + ) + self._verts_padded = struct_utils.list_to_padded( + verts_list, (self._V, 3), pad_value=0.0, equisized=self.equisized + ) + + # TODO(nikhilar) Improve performance of _compute_packed. + def _compute_packed(self, refresh: bool = False): + """ + Computes the packed version of the meshes from verts_list and faces_list + and sets the values of auxiliary tensors. + + Args: + refresh: Set to True to force recomputation of packed representations. + Default: False. + """ + + if not ( + refresh + or any( + v is None + for v in [ + self._verts_packed, + self._verts_packed_to_mesh_idx, + self._mesh_to_verts_packed_first_idx, + self._faces_packed, + self._faces_packed_to_mesh_idx, + self._mesh_to_faces_packed_first_idx, + ] + ) + ): + return + + # Packed can be calculated from padded or list, so can call the + # accessor function for verts_list and faces_list. + verts_list = self.verts_list() + faces_list = self.faces_list() + if self.isempty(): + self._verts_packed = torch.zeros( + (0, 3), dtype=torch.float32, device=self.device + ) + self._verts_packed_to_mesh_idx = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + self._mesh_to_verts_packed_first_idx = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + self._num_verts_per_mesh = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + self._faces_packed = -( + torch.ones((0, 3), dtype=torch.int64, device=self.device) + ) + self._faces_packed_to_mesh_idx = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + self._mesh_to_faces_packed_first_idx = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + self._num_faces_per_mesh = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + return + + verts_list_to_packed = struct_utils.list_to_packed(verts_list) + self._verts_packed = verts_list_to_packed[0] + if not torch.allclose(self.num_verts_per_mesh(), verts_list_to_packed[1]): + raise ValueError("The number of verts per mesh should be consistent.") + self._mesh_to_verts_packed_first_idx = verts_list_to_packed[2] + self._verts_packed_to_mesh_idx = verts_list_to_packed[3] + + faces_list_to_packed = struct_utils.list_to_packed(faces_list) + faces_packed = faces_list_to_packed[0] + if not torch.allclose(self.num_faces_per_mesh(), faces_list_to_packed[1]): + raise ValueError("The number of faces per mesh should be consistent.") + self._mesh_to_faces_packed_first_idx = faces_list_to_packed[2] + self._faces_packed_to_mesh_idx = faces_list_to_packed[3] + + faces_packed_offset = self._mesh_to_verts_packed_first_idx[ + self._faces_packed_to_mesh_idx + ] + self._faces_packed = faces_packed + faces_packed_offset.view(-1, 1) + + def _compute_edges_packed(self, refresh: bool = False): + """ + Computes edges in packed form from the packed version of faces and verts. + """ + if not ( + refresh + or any( + v is None + for v in [ + self._edges_packed, + self._faces_packed_to_mesh_idx, + self._edges_packed_to_mesh_idx, + self._num_edges_per_mesh, + self._mesh_to_edges_packed_first_idx, + ] + ) + ): + return + + if self.isempty(): + self._edges_packed = torch.full( + (0, 2), fill_value=-1, dtype=torch.int64, device=self.device + ) + self._edges_packed_to_mesh_idx = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + return + + faces = self.faces_packed() + F = faces.shape[0] + v0, v1, v2 = faces.chunk(3, dim=1) + e01 = torch.cat([v0, v1], dim=1) # (sum(F_n), 2) + e12 = torch.cat([v1, v2], dim=1) # (sum(F_n), 2) + e20 = torch.cat([v2, v0], dim=1) # (sum(F_n), 2) + + # All edges including duplicates. + edges = torch.cat([e12, e20, e01], dim=0) # (sum(F_n)*3, 2) + edge_to_mesh = torch.cat( + [ + self._faces_packed_to_mesh_idx, + self._faces_packed_to_mesh_idx, + self._faces_packed_to_mesh_idx, + ], + dim=0, + ) # sum(F_n)*3 + + # Sort the edges in increasing vertex order to remove duplicates as + # the same edge may appear in different orientations in different faces. + # i.e. rows in edges after sorting will be of the form (v0, v1) where v1 > v0. + # This sorting does not change the order in dim=0. + edges, _ = edges.sort(dim=1) + + # Remove duplicate edges: convert each edge (v0, v1) into an + # integer hash = V * v0 + v1; this allows us to use the scalar version of + # unique which is much faster than edges.unique(dim=1) which is very slow. + # After finding the unique elements reconstruct the vertex indices as: + # (v0, v1) = (hash / V, hash % V) + # The inverse maps from unique_edges back to edges: + # unique_edges[inverse_idxs] == edges + # i.e. inverse_idxs[i] == j means that edges[i] == unique_edges[j] + + V = self._verts_packed.shape[0] + edges_hash = V * edges[:, 0] + edges[:, 1] + u, inverse_idxs = torch.unique(edges_hash, return_inverse=True) + + # Find indices of unique elements. + # TODO (nikhilar) remove following 4 lines when torch.unique has support + # for returning unique indices + sorted_hash, sort_idx = torch.sort(edges_hash, dim=0) + unique_mask = torch.ones( + edges_hash.shape[0], dtype=torch.bool, device=self.device + ) + unique_mask[1:] = sorted_hash[1:] != sorted_hash[:-1] + unique_idx = sort_idx[unique_mask] + + self._edges_packed = torch.stack([u // V, u % V], dim=1) + self._edges_packed_to_mesh_idx = edge_to_mesh[unique_idx] + + self._faces_packed_to_edges_packed = inverse_idxs.reshape(3, F).t() + + # Compute number of edges per mesh + num_edges_per_mesh = torch.zeros(self._N, dtype=torch.int32, device=self.device) + ones = torch.ones(1, dtype=torch.int32, device=self.device).expand( + self._edges_packed_to_mesh_idx.shape + ) + num_edges_per_mesh = num_edges_per_mesh.scatter_add_( + 0, self._edges_packed_to_mesh_idx, ones + ) + self._num_edges_per_mesh = num_edges_per_mesh + + # Compute first idx for each mesh in edges_packed + mesh_to_edges_packed_first_idx = torch.zeros( + self._N, dtype=torch.int64, device=self.device + ) + num_edges_cumsum = num_edges_per_mesh.cumsum(dim=0) + mesh_to_edges_packed_first_idx[1:] = num_edges_cumsum[:-1].clone() + + self._mesh_to_edges_packed_first_idx = mesh_to_edges_packed_first_idx + + def _compute_laplacian_packed(self, refresh: bool = False): + """ + Computes the laplacian in packed form. + The definition of the laplacian is + L[i, j] = -1 , if i == j + L[i, j] = 1 / deg(i) , if (i, j) is an edge + L[i, j] = 0 , otherwise + where deg(i) is the degree of the i-th vertex in the graph + + Returns: + Sparse FloatTensor of shape (V, V) where V = sum(V_n) + + """ + from ..ops import laplacian + + if not (refresh or self._laplacian_packed is None): + return + + if self.isempty(): + self._laplacian_packed = torch.zeros( + (0, 0), dtype=torch.float32, device=self.device + ).to_sparse() + return + + verts_packed = self.verts_packed() # (sum(V_n), 3) + edges_packed = self.edges_packed() # (sum(E_n), 3) + + self._laplacian_packed = laplacian(verts_packed, edges_packed) + + def clone(self): + """ + Deep copy of Meshes object. All internal tensors are cloned individually. + + Returns: + new Meshes object. + """ + verts_list = self.verts_list() + faces_list = self.faces_list() + new_verts_list = [v.clone() for v in verts_list] + new_faces_list = [f.clone() for f in faces_list] + other = self.__class__(verts=new_verts_list, faces=new_faces_list) + for k in self._INTERNAL_TENSORS: + v = getattr(self, k) + if torch.is_tensor(v): + setattr(other, k, v.clone()) + + # Textures is not a tensor but has a clone method + if self.textures is not None: + other.textures = self.textures.clone() + return other + + def detach(self): + """ + Detach Meshes object. All internal tensors are detached individually. + + Returns: + new Meshes object. + """ + verts_list = self.verts_list() + faces_list = self.faces_list() + new_verts_list = [v.detach() for v in verts_list] + new_faces_list = [f.detach() for f in faces_list] + other = self.__class__(verts=new_verts_list, faces=new_faces_list) + for k in self._INTERNAL_TENSORS: + v = getattr(self, k) + if torch.is_tensor(v): + setattr(other, k, v.detach()) + + # Textures is not a tensor but has a detach method + if self.textures is not None: + other.textures = self.textures.detach() + return other + + def to(self, device: Device, copy: bool = False): + """ + Match functionality of torch.Tensor.to() + If copy = True or the self Tensor is on a different device, the + returned tensor is a copy of self with the desired torch.device. + If copy = False and the self Tensor already has the correct torch.device, + then self is returned. + + Args: + device: Device (as str or torch.device) for the new tensor. + copy: Boolean indicator whether or not to clone self. Default False. + + Returns: + Meshes object. + """ + device_ = make_device(device) + if not copy and self.device == device_: + return self + + other = self.clone() + if self.device == device_: + return other + + other.device = device_ + if other._N > 0: + other._verts_list = [v.to(device_) for v in other._verts_list] + other._faces_list = [f.to(device_) for f in other._faces_list] + for k in self._INTERNAL_TENSORS: + v = getattr(self, k) + if torch.is_tensor(v): + setattr(other, k, v.to(device_)) + if self.textures is not None: + other.textures = other.textures.to(device_) + return other + + def cpu(self): + return self.to("cpu") + + def cuda(self): + return self.to("cuda") + + def get_mesh_verts_faces(self, index: int): + """ + Get tensors for a single mesh from the list representation. + + Args: + index: Integer in the range [0, N). + + Returns: + verts: Tensor of shape (V, 3). + faces: LongTensor of shape (F, 3). + """ + if not isinstance(index, int): + raise ValueError("Mesh index must be an integer.") + if index < 0 or index > self._N: + raise ValueError( + "Mesh index must be in the range [0, N) where \ + N is the number of meshes in the batch." + ) + verts = self.verts_list() + faces = self.faces_list() + return verts[index], faces[index] + + # TODO(nikhilar) Move function to a utils file. + def split(self, split_sizes: list): + """ + Splits Meshes object of size N into a list of Meshes objects of + size len(split_sizes), where the i-th Meshes object is of size split_sizes[i]. + Similar to torch.split(). + + Args: + split_sizes: List of integer sizes of Meshes objects to be returned. + + Returns: + list[Meshes]. + """ + if not all(isinstance(x, int) for x in split_sizes): + raise ValueError("Value of split_sizes must be a list of integers.") + meshlist = [] + curi = 0 + for i in split_sizes: + meshlist.append(self[curi : curi + i]) + curi += i + return meshlist + + def offset_verts_(self, vert_offsets_packed): + """ + Add an offset to the vertices of this Meshes. In place operation. + If normals are present they may be recalculated. + + Args: + vert_offsets_packed: A Tensor of shape (3,) or the same shape as + self.verts_packed, giving offsets to be added + to all vertices. + Returns: + self. + """ + verts_packed = self.verts_packed() + if vert_offsets_packed.shape == (3,): + update_normals = False + vert_offsets_packed = vert_offsets_packed.expand_as(verts_packed) + else: + update_normals = True + if vert_offsets_packed.shape != verts_packed.shape: + raise ValueError("Verts offsets must have dimension (all_v, 3).") + # update verts packed + self._verts_packed = verts_packed + vert_offsets_packed + new_verts_list = list( + self._verts_packed.split(self.num_verts_per_mesh().tolist(), 0) + ) + # update verts list + # Note that since _compute_packed() has been executed, verts_list + # cannot be None even if not provided during construction. + self._verts_list = new_verts_list + + # update verts padded + if self._verts_padded is not None: + for i, verts in enumerate(new_verts_list): + if len(verts) > 0: + self._verts_padded[i, : verts.shape[0], :] = verts + + # update face areas and normals and vertex normals + # only if the original attributes are present + if update_normals and any( + v is not None + for v in [self._faces_areas_packed, self._faces_normals_packed] + ): + self._compute_face_areas_normals(refresh=True) + if update_normals and self._verts_normals_packed is not None: + self._compute_vertex_normals(refresh=True) + + return self + + # TODO(nikhilar) Move out of place operator to a utils file. + def offset_verts(self, vert_offsets_packed): + """ + Out of place offset_verts. + + Args: + vert_offsets_packed: A Tensor of the same shape as self.verts_packed + giving offsets to be added to all vertices. + Returns: + new Meshes object. + """ + new_mesh = self.clone() + return new_mesh.offset_verts_(vert_offsets_packed) + + def scale_verts_(self, scale): + """ + Multiply the vertices of this Meshes object by a scalar value. + In place operation. + + Args: + scale: A scalar, or a Tensor of shape (N,). + + Returns: + self. + """ + if not torch.is_tensor(scale): + scale = torch.full((len(self),), scale, device=self.device) + new_verts_list = [] + verts_list = self.verts_list() + for i, old_verts in enumerate(verts_list): + new_verts_list.append(scale[i] * old_verts) + # update list + self._verts_list = new_verts_list + # update packed + if self._verts_packed is not None: + self._verts_packed = torch.cat(new_verts_list, dim=0) + # update padded + if self._verts_padded is not None: + for i, verts in enumerate(self._verts_list): + if len(verts) > 0: + self._verts_padded[i, : verts.shape[0], :] = verts + + # update face areas and normals + # only if the original attributes are computed + if any( + v is not None + for v in [self._faces_areas_packed, self._faces_normals_packed] + ): + self._compute_face_areas_normals(refresh=True) + return self + + def scale_verts(self, scale): + """ + Out of place scale_verts. + + Args: + scale: A scalar, or a Tensor of shape (N,). + + Returns: + new Meshes object. + """ + new_mesh = self.clone() + return new_mesh.scale_verts_(scale) + + def update_padded(self, new_verts_padded): + """ + This function allows for an update of verts_padded without having to + explicitly convert it to the list representation for heterogeneous batches. + Returns a Meshes structure with updated padded tensors and copies of the + auxiliary tensors at construction time. + It updates self._verts_padded with new_verts_padded, and does a + shallow copy of (faces_padded, faces_list, num_verts_per_mesh, num_faces_per_mesh). + If packed representations are computed in self, they are updated as well. + + Args: + new_points_padded: FloatTensor of shape (N, V, 3) + + Returns: + Meshes with updated padded representations + """ + + def check_shapes(x, size): + if x.shape[0] != size[0]: + raise ValueError("new values must have the same batch dimension.") + if x.shape[1] != size[1]: + raise ValueError("new values must have the same number of points.") + if x.shape[2] != size[2]: + raise ValueError("new values must have the same dimension.") + + check_shapes(new_verts_padded, [self._N, self._V, 3]) + + new = self.__class__(verts=new_verts_padded, faces=self.faces_padded()) + + if new._N != self._N or new._V != self._V or new._F != self._F: + raise ValueError("Inconsistent sizes after construction.") + + # overwrite the equisized flag + new.equisized = self.equisized + + # overwrite textures if any + new.textures = self.textures + + # copy auxiliary tensors + copy_tensors = ["_num_verts_per_mesh", "_num_faces_per_mesh", "valid"] + + for k in copy_tensors: + v = getattr(self, k) + if torch.is_tensor(v): + setattr(new, k, v) # shallow copy + + # shallow copy of faces_list if any, st new.faces_list() + # does not re-compute from _faces_padded + new._faces_list = self._faces_list + + # update verts/faces packed if they are computed in self + if self._verts_packed is not None: + copy_tensors = [ + "_faces_packed", + "_verts_packed_to_mesh_idx", + "_faces_packed_to_mesh_idx", + "_mesh_to_verts_packed_first_idx", + "_mesh_to_faces_packed_first_idx", + ] + for k in copy_tensors: + v = getattr(self, k) + assert torch.is_tensor(v) + setattr(new, k, v) # shallow copy + # update verts_packed + pad_to_packed = self.verts_padded_to_packed_idx() + new_verts_packed = new_verts_padded.reshape(-1, 3)[pad_to_packed, :] + new._verts_packed = new_verts_packed + new._verts_padded_to_packed_idx = pad_to_packed + + # update edges packed if they are computed in self + if self._edges_packed is not None: + copy_tensors = [ + "_edges_packed", + "_edges_packed_to_mesh_idx", + "_mesh_to_edges_packed_first_idx", + "_faces_packed_to_edges_packed", + "_num_edges_per_mesh", + ] + for k in copy_tensors: + v = getattr(self, k) + assert torch.is_tensor(v) + setattr(new, k, v) # shallow copy + + # update laplacian if it is compute in self + if self._laplacian_packed is not None: + new._laplacian_packed = self._laplacian_packed + + assert new._verts_list is None + assert new._verts_normals_packed is None + assert new._faces_normals_packed is None + assert new._faces_areas_packed is None + + return new + + # TODO(nikhilar) Move function to utils file. + def get_bounding_boxes(self): + """ + Compute an axis-aligned bounding box for each mesh in this Meshes object. + + Returns: + bboxes: Tensor of shape (N, 3, 2) where bbox[i, j] gives the + min and max values of mesh i along the jth coordinate axis. + """ + all_mins, all_maxes = [], [] + for verts in self.verts_list(): + cur_mins = verts.min(dim=0)[0] # (3,) + cur_maxes = verts.max(dim=0)[0] # (3,) + all_mins.append(cur_mins) + all_maxes.append(cur_maxes) + all_mins = torch.stack(all_mins, dim=0) # (N, 3) + all_maxes = torch.stack(all_maxes, dim=0) # (N, 3) + bboxes = torch.stack([all_mins, all_maxes], dim=2) + return bboxes + + def extend(self, N: int): + """ + Create new Meshes class which contains each input mesh N times + + Args: + N: number of new copies of each mesh. + + Returns: + new Meshes object. + """ + if not isinstance(N, int): + raise ValueError("N must be an integer.") + if N <= 0: + raise ValueError("N must be > 0.") + new_verts_list, new_faces_list = [], [] + for verts, faces in zip(self.verts_list(), self.faces_list()): + new_verts_list.extend(verts.clone() for _ in range(N)) + new_faces_list.extend(faces.clone() for _ in range(N)) + + tex = None + if self.textures is not None: + tex = self.textures.extend(N) + + return self.__class__(verts=new_verts_list, faces=new_faces_list, textures=tex) + + def sample_textures(self, fragments): + if self.textures is not None: + + # Check dimensions of textures match that of meshes + shape_ok = self.textures.check_shapes(self._N, self._V, self._F) + if not shape_ok: + msg = "Textures do not match the dimensions of Meshes." + raise ValueError(msg) + + # Pass in faces packed. If the textures are defined per + # vertex, the face indices are needed in order to interpolate + # the vertex attributes across the face. + return self.textures.sample_textures( + fragments, faces_packed=self.faces_packed() + ) + else: + raise ValueError("Meshes does not have textures") + + def submeshes( + self, + face_indices: Union[ + List[List[torch.LongTensor]], List[torch.LongTensor], torch.LongTensor + ], + ) -> "Meshes": + """ + Split a batch of meshes into a batch of submeshes. + + The return value is a Meshes object representing + [mesh restricted to only faces indexed by selected_faces + for mesh, selected_faces_list in zip(self, face_indices) + for faces in selected_faces_list] + + Args: + face_indices: + Let the original mesh have verts_list() of length N. + Can be either + - List of lists of LongTensors. The n-th element is a list of length + num_submeshes_n (empty lists are allowed). The k-th element of the n-th + sublist is a LongTensor of length num_faces_submesh_n_k. + - List of LongTensors. The n-th element is a (possibly empty) LongTensor + of shape (num_submeshes_n, num_faces_n). + - A LongTensor of shape (N, num_submeshes_per_mesh, num_faces_per_submesh) + where all meshes in the batch will have the same number of submeshes. + This will result in an output Meshes object with batch size equal to + N * num_submeshes_per_mesh. + + Returns: + Meshes object of length `sum(len(ids) for ids in face_indices)`. + + Example 1: + + If `meshes` has batch size 1, and `face_indices` is a 1D LongTensor, + then `meshes.submeshes([[face_indices]]) and + `meshes.submeshes(face_indices[None, None])` both produce a Meshes of length 1, + containing a single submesh with a subset of `meshes`' faces, whose indices are + specified by `face_indices`. + + Example 2: + + Take a Meshes object `cubes` with 4 meshes, each a translated cube. Then: + * len(cubes) is 4, len(cubes.verts_list()) is 4, len(cubes.faces_list()) 4, + * [cube_verts.size for cube_verts in cubes.verts_list()] is [8, 8, 8, 8], + * [cube_faces.size for cube_faces in cubes.faces_list()] if [6, 6, 6, 6], + + Now let front_facet, top_and_bottom, all_facets be LongTensors of + sizes (2), (4), and (12), each picking up a number of facets of a cube by + specifying the appropriate triangular faces. + + Then let `subcubes = cubes.submeshes([[front_facet, top_and_bottom], [], + [all_facets], []])`. + * len(subcubes) is 3. + * subcubes[0] is the front facet of the cube contained in cubes[0]. + * subcubes[1] is a mesh containing the (disconnected) top and bottom facets + of cubes[0]. + * subcubes[2] is cubes[2]. + * There are no submeshes of cubes[1] and cubes[3] in subcubes. + * subcubes[0] and subcubes[1] are not watertight. subcubes[2] is. + """ + if len(face_indices) != len(self): + raise ValueError( + "You must specify exactly one set of submeshes" + " for each mesh in this Meshes object." + ) + + sub_verts = [] + sub_verts_ids = [] + sub_faces = [] + + for face_ids_per_mesh, faces, verts in zip( + face_indices, self.faces_list(), self.verts_list() + ): + sub_verts_ids.append([]) + for submesh_face_ids in face_ids_per_mesh: + faces_to_keep = faces[submesh_face_ids] + + # Say we are keeping two faces from a mesh with six vertices: + # faces_to_keep = [[0, 6, 4], + # [0, 2, 6]] + # Then we want verts_to_keep to contain only vertices [0, 2, 4, 6]: + vertex_ids_to_keep = torch.unique(faces_to_keep, sorted=True) + sub_verts.append(verts[vertex_ids_to_keep]) + sub_verts_ids[-1].append(vertex_ids_to_keep) + + # Now, convert faces_to_keep to use the new vertex ids. + # In our example, instead of + # [[0, 6, 4], + # [0, 2, 6]] + # we want faces_to_keep to be + # [[0, 3, 2], + # [0, 1, 3]], + # as each point id got reduced to its sort rank. + _, ids_of_unique_ids_in_sorted = torch.unique( + faces_to_keep, return_inverse=True + ) + sub_faces.append(ids_of_unique_ids_in_sorted) + + return self.__class__( + verts=sub_verts, + faces=sub_faces, + textures=( + self.textures.submeshes(sub_verts_ids, face_indices) + if self.textures + else None + ), + ) + + +def join_meshes_as_batch(meshes: List[Meshes], include_textures: bool = True) -> Meshes: + """ + Merge multiple Meshes objects, i.e. concatenate the meshes objects. They + must all be on the same device. If include_textures is true, they must all + be compatible, either all or none having textures, and all the Textures + objects being the same type. If include_textures is False, textures are + ignored. + + If the textures are TexturesAtlas then being the same type includes having + the same resolution. If they are TexturesUV then it includes having the same + align_corners and padding_mode. + + Args: + meshes: list of meshes. + include_textures: (bool) whether to try to join the textures. + + Returns: + new Meshes object containing all the meshes from all the inputs. + """ + if isinstance(meshes, Meshes): + # Meshes objects can be iterated and produce single Meshes. We avoid + # letting join_meshes_as_batch(mesh1, mesh2) silently do the wrong thing. + raise ValueError("Wrong first argument to join_meshes_as_batch.") + verts = [v for mesh in meshes for v in mesh.verts_list()] + faces = [f for mesh in meshes for f in mesh.faces_list()] + if len(meshes) == 0 or not include_textures: + return Meshes(verts=verts, faces=faces) + + if meshes[0].textures is None: + if any(mesh.textures is not None for mesh in meshes): + raise ValueError("Inconsistent textures in join_meshes_as_batch.") + return Meshes(verts=verts, faces=faces) + + if any(mesh.textures is None for mesh in meshes): + raise ValueError("Inconsistent textures in join_meshes_as_batch.") + + # Now we know there are multiple meshes and they have textures to merge. + all_textures = [mesh.textures for mesh in meshes] + first = all_textures[0] + tex_types_same = all(type(tex) == type(first) for tex in all_textures) # noqa: E721 + + if not tex_types_same: + raise ValueError("All meshes in the batch must have the same type of texture.") + + tex = first.join_batch(all_textures[1:]) + return Meshes(verts=verts, faces=faces, textures=tex) + + +def join_meshes_as_scene( + meshes: Union[Meshes, List[Meshes]], include_textures: bool = True +) -> Meshes: + """ + Joins a batch of meshes in the form of a Meshes object or a list of Meshes + objects as a single mesh. If the input is a list, the Meshes objects in the + list must all be on the same device. Unless include_textures is False, the + meshes must all have the same type of texture or must all not have textures. + + If textures are included, then the textures are joined as a single scene in + addition to the meshes. For this, texture types have an appropriate method + called join_scene which joins mesh textures into a single texture. + If the textures are TexturesAtlas then they must have the same resolution. + If they are TexturesUV then they must have the same align_corners and + padding_mode. Values in verts_uvs outside [0, 1] will not + be respected. + + Args: + meshes: Meshes object that contains a batch of meshes, or a list of + Meshes objects. + include_textures: (bool) whether to try to join the textures. + + Returns: + new Meshes object containing a single mesh + """ + if not isinstance(include_textures, (bool, int)): + # We want to avoid letting join_meshes_as_scene(mesh1, mesh2) silently + # do the wrong thing. + raise ValueError( + f"include_textures argument cannot be {type(include_textures)}" + ) + if isinstance(meshes, List): + meshes = join_meshes_as_batch(meshes, include_textures=include_textures) + + if len(meshes) == 1: + return meshes + verts = meshes.verts_packed() # (sum(V_n), 3) + # Offset automatically done by faces_packed + faces = meshes.faces_packed() # (sum(F_n), 3) + textures = None + + if include_textures and meshes.textures is not None: + textures = meshes.textures.join_scene() + + mesh = Meshes(verts=verts.unsqueeze(0), faces=faces.unsqueeze(0), textures=textures) + return mesh diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/pointclouds.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/pointclouds.py new file mode 100644 index 0000000000000000000000000000000000000000..f699881c5b41fed04442b59611857949331e93f9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/pointclouds.py @@ -0,0 +1,1305 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from itertools import zip_longest +from typing import List, Optional, Sequence, Tuple, Union + +import numpy as np +import torch + +from ..common.datatypes import Device, make_device +from . import utils as struct_utils + + +class Pointclouds: + """ + This class provides functions for working with batches of 3d point clouds, + and converting between representations. + + Within Pointclouds, there are three different representations of the data. + + List + - only used for input as a starting point to convert to other representations. + Padded + - has specific batch dimension. + Packed + - no batch dimension. + - has auxiliary variables used to index into the padded representation. + + Example + + Input list of points = [[P_1], [P_2], ... , [P_N]] + where P_1, ... , P_N are the number of points in each cloud and N is the + number of clouds. + + # SPHINX IGNORE + List | Padded | Packed + ---------------------------|-------------------------|------------------------ + [[P_1], ... , [P_N]] | size = (N, max(P_n), 3) | size = (sum(P_n), 3) + | | + Example for locations | | + or colors: | | + | | + P_1 = 3, P_2 = 4, P_3 = 5 | size = (3, 5, 3) | size = (12, 3) + | | + List([ | tensor([ | tensor([ + [ | [ | [0.1, 0.3, 0.5], + [0.1, 0.3, 0.5], | [0.1, 0.3, 0.5], | [0.5, 0.2, 0.1], + [0.5, 0.2, 0.1], | [0.5, 0.2, 0.1], | [0.6, 0.8, 0.7], + [0.6, 0.8, 0.7] | [0.6, 0.8, 0.7], | [0.1, 0.3, 0.3], + ], | [0, 0, 0], | [0.6, 0.7, 0.8], + [ | [0, 0, 0] | [0.2, 0.3, 0.4], + [0.1, 0.3, 0.3], | ], | [0.1, 0.5, 0.3], + [0.6, 0.7, 0.8], | [ | [0.7, 0.3, 0.6], + [0.2, 0.3, 0.4], | [0.1, 0.3, 0.3], | [0.2, 0.4, 0.8], + [0.1, 0.5, 0.3] | [0.6, 0.7, 0.8], | [0.9, 0.5, 0.2], + ], | [0.2, 0.3, 0.4], | [0.2, 0.3, 0.4], + [ | [0.1, 0.5, 0.3], | [0.9, 0.3, 0.8], + [0.7, 0.3, 0.6], | [0, 0, 0] | ]) + [0.2, 0.4, 0.8], | ], | + [0.9, 0.5, 0.2], | [ | + [0.2, 0.3, 0.4], | [0.7, 0.3, 0.6], | + [0.9, 0.3, 0.8], | [0.2, 0.4, 0.8], | + ] | [0.9, 0.5, 0.2], | + ]) | [0.2, 0.3, 0.4], | + | [0.9, 0.3, 0.8] | + | ] | + | ]) | + ----------------------------------------------------------------------------- + + Auxiliary variables for packed representation + + Name | Size | Example from above + -------------------------------|---------------------|----------------------- + | | + packed_to_cloud_idx | size = (sum(P_n)) | tensor([ + | | 0, 0, 0, 1, 1, 1, + | | 1, 2, 2, 2, 2, 2 + | | )] + | | size = (12) + | | + cloud_to_packed_first_idx | size = (N) | tensor([0, 3, 7]) + | | size = (3) + | | + num_points_per_cloud | size = (N) | tensor([3, 4, 5]) + | | size = (3) + | | + padded_to_packed_idx | size = (sum(P_n)) | tensor([ + | | 0, 1, 2, 5, 6, 7, + | | 8, 10, 11, 12, 13, + | | 14 + | | )] + | | size = (12) + ----------------------------------------------------------------------------- + # SPHINX IGNORE + """ + + _INTERNAL_TENSORS = [ + "_points_packed", + "_points_padded", + "_normals_packed", + "_normals_padded", + "_features_packed", + "_features_padded", + "_packed_to_cloud_idx", + "_cloud_to_packed_first_idx", + "_num_points_per_cloud", + "_padded_to_packed_idx", + "valid", + "equisized", + ] + + def __init__(self, points, normals=None, features=None) -> None: + """ + Args: + points: + Can be either + + - List where each element is a tensor of shape (num_points, 3) + containing the (x, y, z) coordinates of each point. + - Padded float tensor with shape (num_clouds, num_points, 3). + normals: + Can be either + + - None + - List where each element is a tensor of shape (num_points, 3) + containing the normal vector for each point. + - Padded float tensor of shape (num_clouds, num_points, 3). + features: + Can be either + + - None + - List where each element is a tensor of shape (num_points, C) + containing the features for the points in the cloud. + - Padded float tensor of shape (num_clouds, num_points, C). + where C is the number of channels in the features. + For example 3 for RGB color. + + Refer to comments above for descriptions of List and Padded + representations. + """ + self.device = torch.device("cpu") + + # Indicates whether the clouds in the list/batch have the same number + # of points. + self.equisized = False + + # Boolean indicator for each cloud in the batch. + # True if cloud has non zero number of points, False otherwise. + self.valid = None + + self._N = 0 # batch size (number of clouds) + self._P = 0 # (max) number of points per cloud + self._C = None # number of channels in the features + + # List of Tensors of points and features. + self._points_list = None + self._normals_list = None + self._features_list = None + + # Number of points per cloud. + self._num_points_per_cloud = None # N + + # Packed representation. + self._points_packed = None # (sum(P_n), 3) + self._normals_packed = None # (sum(P_n), 3) + self._features_packed = None # (sum(P_n), C) + + self._packed_to_cloud_idx = None # sum(P_n) + + # Index of each cloud's first point in the packed points. + # Assumes packing is sequential. + self._cloud_to_packed_first_idx = None # N + + # Padded representation. + self._points_padded = None # (N, max(P_n), 3) + self._normals_padded = None # (N, max(P_n), 3) + self._features_padded = None # (N, max(P_n), C) + + # Index to convert points from flattened padded to packed. + self._padded_to_packed_idx = None # N * max_P + + # Identify type of points. + if isinstance(points, list): + self._points_list = points + self._N = len(self._points_list) + self.valid = torch.zeros((self._N,), dtype=torch.bool, device=self.device) + + if self._N > 0: + self.device = self._points_list[0].device + for p in self._points_list: + if len(p) > 0 and (p.dim() != 2 or p.shape[1] != 3): + raise ValueError("Clouds in list must be of shape Px3 or empty") + if p.device != self.device: + raise ValueError("All points must be on the same device") + + num_points_per_cloud = torch.tensor( + [len(p) for p in self._points_list], device=self.device + ) + self._P = int(num_points_per_cloud.max()) + self.valid = torch.tensor( + [len(p) > 0 for p in self._points_list], + dtype=torch.bool, + device=self.device, + ) + + if len(num_points_per_cloud.unique()) == 1: + self.equisized = True + self._num_points_per_cloud = num_points_per_cloud + else: + self._num_points_per_cloud = torch.tensor([], dtype=torch.int64) + + elif torch.is_tensor(points): + if points.dim() != 3 or points.shape[2] != 3: + raise ValueError("Points tensor has incorrect dimensions.") + self._points_padded = points + self._N = self._points_padded.shape[0] + self._P = self._points_padded.shape[1] + self.device = self._points_padded.device + self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device) + self._num_points_per_cloud = torch.tensor( + [self._P] * self._N, device=self.device + ) + self.equisized = True + else: + raise ValueError( + "Points must be either a list or a tensor with \ + shape (batch_size, P, 3) where P is the maximum number of \ + points in a cloud." + ) + + # parse normals + normals_parsed = self._parse_auxiliary_input(normals) + self._normals_list, self._normals_padded, normals_C = normals_parsed + if normals_C is not None and normals_C != 3: + raise ValueError("Normals are expected to be 3-dimensional") + + # parse features + features_parsed = self._parse_auxiliary_input(features) + self._features_list, self._features_padded, features_C = features_parsed + if features_C is not None: + self._C = features_C + + def _parse_auxiliary_input( + self, aux_input + ) -> Tuple[Optional[List[torch.Tensor]], Optional[torch.Tensor], Optional[int]]: + """ + Interpret the auxiliary inputs (normals, features) given to __init__. + + Args: + aux_input: + Can be either + + - List where each element is a tensor of shape (num_points, C) + containing the features for the points in the cloud. + - Padded float tensor of shape (num_clouds, num_points, C). + For normals, C = 3 + + Returns: + 3-element tuple of list, padded, num_channels. + If aux_input is list, then padded is None. If aux_input is a tensor, + then list is None. + """ + if aux_input is None or self._N == 0: + return None, None, None + + aux_input_C = None + + if isinstance(aux_input, list): + return self._parse_auxiliary_input_list(aux_input) + if torch.is_tensor(aux_input): + if aux_input.dim() != 3: + raise ValueError("Auxiliary input tensor has incorrect dimensions.") + if self._N != aux_input.shape[0]: + raise ValueError("Points and inputs must be the same length.") + if self._P != aux_input.shape[1]: + raise ValueError( + "Inputs tensor must have the right maximum \ + number of points in each cloud." + ) + if aux_input.device != self.device: + raise ValueError( + "All auxiliary inputs must be on the same device as the points." + ) + aux_input_C = aux_input.shape[2] + return None, aux_input, aux_input_C + else: + raise ValueError( + "Auxiliary input must be either a list or a tensor with \ + shape (batch_size, P, C) where P is the maximum number of \ + points in a cloud." + ) + + def _parse_auxiliary_input_list( + self, aux_input: list + ) -> Tuple[Optional[List[torch.Tensor]], None, Optional[int]]: + """ + Interpret the auxiliary inputs (normals, features) given to __init__, + if a list. + + Args: + aux_input: + - List where each element is a tensor of shape (num_points, C) + containing the features for the points in the cloud. + For normals, C = 3 + + Returns: + 3-element tuple of list, padded=None, num_channels. + If aux_input is list, then padded is None. If aux_input is a tensor, + then list is None. + """ + aux_input_C = None + good_empty = None + needs_fixing = False + + if len(aux_input) != self._N: + raise ValueError("Points and auxiliary input must be the same length.") + for p, d in zip(self._num_points_per_cloud, aux_input): + valid_but_empty = p == 0 and d is not None and d.ndim == 2 + if p > 0 or valid_but_empty: + if p != d.shape[0]: + raise ValueError( + "A cloud has mismatched numbers of points and inputs" + ) + if d.dim() != 2: + raise ValueError( + "A cloud auxiliary input must be of shape PxC or empty" + ) + if aux_input_C is None: + aux_input_C = d.shape[1] + elif aux_input_C != d.shape[1]: + raise ValueError("The clouds must have the same number of channels") + if d.device != self.device: + raise ValueError( + "All auxiliary inputs must be on the same device as the points." + ) + else: + needs_fixing = True + + if aux_input_C is None: + # We found nothing useful + return None, None, None + + # If we have empty but "wrong" inputs we want to store "fixed" versions. + if needs_fixing: + if good_empty is None: + good_empty = torch.zeros((0, aux_input_C), device=self.device) + aux_input_out = [] + for p, d in zip(self._num_points_per_cloud, aux_input): + valid_but_empty = p == 0 and d is not None and d.ndim == 2 + if p > 0 or valid_but_empty: + aux_input_out.append(d) + else: + aux_input_out.append(good_empty) + else: + aux_input_out = aux_input + + return aux_input_out, None, aux_input_C + + def __len__(self) -> int: + return self._N + + def __getitem__( + self, + index: Union[int, List[int], slice, torch.BoolTensor, torch.LongTensor], + ) -> "Pointclouds": + """ + Args: + index: Specifying the index of the cloud to retrieve. + Can be an int, slice, list of ints or a boolean tensor. + + Returns: + Pointclouds object with selected clouds. The tensors are not cloned. + """ + normals, features = None, None + normals_list = self.normals_list() + features_list = self.features_list() + if isinstance(index, int): + points = [self.points_list()[index]] + if normals_list is not None: + normals = [normals_list[index]] + if features_list is not None: + features = [features_list[index]] + elif isinstance(index, slice): + points = self.points_list()[index] + if normals_list is not None: + normals = normals_list[index] + if features_list is not None: + features = features_list[index] + elif isinstance(index, list): + points = [self.points_list()[i] for i in index] + if normals_list is not None: + normals = [normals_list[i] for i in index] + if features_list is not None: + features = [features_list[i] for i in index] + elif isinstance(index, torch.Tensor): + if index.dim() != 1 or index.dtype.is_floating_point: + raise IndexError(index) + # NOTE consider converting index to cpu for efficiency + if index.dtype == torch.bool: + # advanced indexing on a single dimension + index = index.nonzero() + index = index.squeeze(1) if index.numel() > 0 else index + index = index.tolist() + points = [self.points_list()[i] for i in index] + if normals_list is not None: + normals = [normals_list[i] for i in index] + if features_list is not None: + features = [features_list[i] for i in index] + else: + raise IndexError(index) + + return self.__class__(points=points, normals=normals, features=features) + + def isempty(self) -> bool: + """ + Checks whether any cloud is valid. + + Returns: + bool indicating whether there is any data. + """ + return self._N == 0 or self.valid.eq(False).all() + + def points_list(self) -> List[torch.Tensor]: + """ + Get the list representation of the points. + + Returns: + list of tensors of points of shape (P_n, 3). + """ + if self._points_list is None: + assert ( + self._points_padded is not None + ), "points_padded is required to compute points_list." + points_list = [] + for i in range(self._N): + points_list.append( + self._points_padded[i, : self.num_points_per_cloud()[i]] + ) + self._points_list = points_list + return self._points_list + + def normals_list(self) -> Optional[List[torch.Tensor]]: + """ + Get the list representation of the normals, + or None if there are no normals. + + Returns: + list of tensors of normals of shape (P_n, 3). + """ + if self._normals_list is None: + if self._normals_padded is None: + # No normals provided so return None + return None + self._normals_list = struct_utils.padded_to_list( + self._normals_padded, self.num_points_per_cloud().tolist() + ) + return self._normals_list + + def features_list(self) -> Optional[List[torch.Tensor]]: + """ + Get the list representation of the features, + or None if there are no features. + + Returns: + list of tensors of features of shape (P_n, C). + """ + if self._features_list is None: + if self._features_padded is None: + # No features provided so return None + return None + self._features_list = struct_utils.padded_to_list( + self._features_padded, self.num_points_per_cloud().tolist() + ) + return self._features_list + + def points_packed(self) -> torch.Tensor: + """ + Get the packed representation of the points. + + Returns: + tensor of points of shape (sum(P_n), 3). + """ + self._compute_packed() + return self._points_packed + + def normals_packed(self) -> Optional[torch.Tensor]: + """ + Get the packed representation of the normals. + + Returns: + tensor of normals of shape (sum(P_n), 3), + or None if there are no normals. + """ + self._compute_packed() + return self._normals_packed + + def features_packed(self) -> Optional[torch.Tensor]: + """ + Get the packed representation of the features. + + Returns: + tensor of features of shape (sum(P_n), C), + or None if there are no features + """ + self._compute_packed() + return self._features_packed + + def packed_to_cloud_idx(self): + """ + Return a 1D tensor x with length equal to the total number of points. + packed_to_cloud_idx()[i] gives the index of the cloud which contains + points_packed()[i]. + + Returns: + 1D tensor of indices. + """ + self._compute_packed() + return self._packed_to_cloud_idx + + def cloud_to_packed_first_idx(self): + """ + Return a 1D tensor x with length equal to the number of clouds such that + the first point of the ith cloud is points_packed[x[i]]. + + Returns: + 1D tensor of indices of first items. + """ + self._compute_packed() + return self._cloud_to_packed_first_idx + + def num_points_per_cloud(self) -> torch.Tensor: + """ + Return a 1D tensor x with length equal to the number of clouds giving + the number of points in each cloud. + + Returns: + 1D tensor of sizes. + """ + return self._num_points_per_cloud + + def points_padded(self) -> torch.Tensor: + """ + Get the padded representation of the points. + + Returns: + tensor of points of shape (N, max(P_n), 3). + """ + self._compute_padded() + return self._points_padded + + def normals_padded(self) -> Optional[torch.Tensor]: + """ + Get the padded representation of the normals, + or None if there are no normals. + + Returns: + tensor of normals of shape (N, max(P_n), 3). + """ + self._compute_padded() + return self._normals_padded + + def features_padded(self) -> Optional[torch.Tensor]: + """ + Get the padded representation of the features, + or None if there are no features. + + Returns: + tensor of features of shape (N, max(P_n), 3). + """ + self._compute_padded() + return self._features_padded + + def padded_to_packed_idx(self): + """ + Return a 1D tensor x with length equal to the total number of points + such that points_packed()[i] is element x[i] of the flattened padded + representation. + The packed representation can be calculated as follows. + + .. code-block:: python + + p = points_padded().reshape(-1, 3) + points_packed = p[x] + + Returns: + 1D tensor of indices. + """ + if self._padded_to_packed_idx is not None: + return self._padded_to_packed_idx + if self._N == 0: + self._padded_to_packed_idx = [] + else: + self._padded_to_packed_idx = torch.cat( + [ + torch.arange(v, dtype=torch.int64, device=self.device) + i * self._P + for (i, v) in enumerate(self.num_points_per_cloud()) + ], + dim=0, + ) + return self._padded_to_packed_idx + + def _compute_padded(self, refresh: bool = False): + """ + Computes the padded version from points_list, normals_list and features_list. + + Args: + refresh: whether to force the recalculation. + """ + if not (refresh or self._points_padded is None): + return + + self._normals_padded, self._features_padded = None, None + if self.isempty(): + self._points_padded = torch.zeros((self._N, 0, 3), device=self.device) + else: + self._points_padded = struct_utils.list_to_padded( + self.points_list(), + (self._P, 3), + pad_value=0.0, + equisized=self.equisized, + ) + normals_list = self.normals_list() + if normals_list is not None: + self._normals_padded = struct_utils.list_to_padded( + normals_list, + (self._P, 3), + pad_value=0.0, + equisized=self.equisized, + ) + features_list = self.features_list() + if features_list is not None: + self._features_padded = struct_utils.list_to_padded( + features_list, + (self._P, self._C), + pad_value=0.0, + equisized=self.equisized, + ) + + # TODO(nikhilar) Improve performance of _compute_packed. + def _compute_packed(self, refresh: bool = False): + """ + Computes the packed version from points_list, normals_list and + features_list and sets the values of auxiliary tensors. + + Args: + refresh: Set to True to force recomputation of packed + representations. Default: False. + """ + + if not ( + refresh + or any( + v is None + for v in [ + self._points_packed, + self._packed_to_cloud_idx, + self._cloud_to_packed_first_idx, + ] + ) + ): + return + + # Packed can be calculated from padded or list, so can call the + # accessor function for the lists. + points_list = self.points_list() + normals_list = self.normals_list() + features_list = self.features_list() + if self.isempty(): + self._points_packed = torch.zeros( + (0, 3), dtype=torch.float32, device=self.device + ) + self._packed_to_cloud_idx = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + self._cloud_to_packed_first_idx = torch.zeros( + (0,), dtype=torch.int64, device=self.device + ) + self._normals_packed = None + self._features_packed = None + return + + points_list_to_packed = struct_utils.list_to_packed(points_list) + self._points_packed = points_list_to_packed[0] + if not torch.allclose(self._num_points_per_cloud, points_list_to_packed[1]): + raise ValueError("Inconsistent list to packed conversion") + self._cloud_to_packed_first_idx = points_list_to_packed[2] + self._packed_to_cloud_idx = points_list_to_packed[3] + + self._normals_packed, self._features_packed = None, None + if normals_list is not None: + normals_list_to_packed = struct_utils.list_to_packed(normals_list) + self._normals_packed = normals_list_to_packed[0] + + if features_list is not None: + features_list_to_packed = struct_utils.list_to_packed(features_list) + self._features_packed = features_list_to_packed[0] + + def clone(self): + """ + Deep copy of Pointclouds object. All internal tensors are cloned + individually. + + Returns: + new Pointclouds object. + """ + # instantiate new pointcloud with the representation which is not None + # (either list or tensor) to save compute. + new_points, new_normals, new_features = None, None, None + if self._points_list is not None: + new_points = [v.clone() for v in self.points_list()] + normals_list = self.normals_list() + features_list = self.features_list() + if normals_list is not None: + new_normals = [n.clone() for n in normals_list] + if features_list is not None: + new_features = [f.clone() for f in features_list] + elif self._points_padded is not None: + new_points = self.points_padded().clone() + normals_padded = self.normals_padded() + features_padded = self.features_padded() + if normals_padded is not None: + new_normals = self.normals_padded().clone() + if features_padded is not None: + new_features = self.features_padded().clone() + other = self.__class__( + points=new_points, normals=new_normals, features=new_features + ) + for k in self._INTERNAL_TENSORS: + v = getattr(self, k) + if torch.is_tensor(v): + setattr(other, k, v.clone()) + return other + + def detach(self): + """ + Detach Pointclouds object. All internal tensors are detached + individually. + + Returns: + new Pointclouds object. + """ + # instantiate new pointcloud with the representation which is not None + # (either list or tensor) to save compute. + new_points, new_normals, new_features = None, None, None + if self._points_list is not None: + new_points = [v.detach() for v in self.points_list()] + normals_list = self.normals_list() + features_list = self.features_list() + if normals_list is not None: + new_normals = [n.detach() for n in normals_list] + if features_list is not None: + new_features = [f.detach() for f in features_list] + elif self._points_padded is not None: + new_points = self.points_padded().detach() + normals_padded = self.normals_padded() + features_padded = self.features_padded() + if normals_padded is not None: + new_normals = self.normals_padded().detach() + if features_padded is not None: + new_features = self.features_padded().detach() + other = self.__class__( + points=new_points, normals=new_normals, features=new_features + ) + for k in self._INTERNAL_TENSORS: + v = getattr(self, k) + if torch.is_tensor(v): + setattr(other, k, v.detach()) + return other + + def to(self, device: Device, copy: bool = False): + """ + Match functionality of torch.Tensor.to() + If copy = True or the self Tensor is on a different device, the + returned tensor is a copy of self with the desired torch.device. + If copy = False and the self Tensor already has the correct torch.device, + then self is returned. + + Args: + device: Device (as str or torch.device) for the new tensor. + copy: Boolean indicator whether or not to clone self. Default False. + + Returns: + Pointclouds object. + """ + device_ = make_device(device) + + if not copy and self.device == device_: + return self + + other = self.clone() + if self.device == device_: + return other + + other.device = device_ + if other._N > 0: + other._points_list = [v.to(device_) for v in other.points_list()] + if other._normals_list is not None: + other._normals_list = [n.to(device_) for n in other.normals_list()] + if other._features_list is not None: + other._features_list = [f.to(device_) for f in other.features_list()] + for k in self._INTERNAL_TENSORS: + v = getattr(self, k) + if torch.is_tensor(v): + setattr(other, k, v.to(device_)) + return other + + def cpu(self): + return self.to("cpu") + + def cuda(self): + return self.to("cuda") + + def get_cloud(self, index: int): + """ + Get tensors for a single cloud from the list representation. + + Args: + index: Integer in the range [0, N). + + Returns: + points: Tensor of shape (P, 3). + normals: Tensor of shape (P, 3) + features: LongTensor of shape (P, C). + """ + if not isinstance(index, int): + raise ValueError("Cloud index must be an integer.") + if index < 0 or index > self._N: + raise ValueError( + "Cloud index must be in the range [0, N) where \ + N is the number of clouds in the batch." + ) + points = self.points_list()[index] + normals, features = None, None + normals_list = self.normals_list() + if normals_list is not None: + normals = normals_list[index] + features_list = self.features_list() + if features_list is not None: + features = features_list[index] + return points, normals, features + + # TODO(nikhilar) Move function to a utils file. + def split(self, split_sizes: list): + """ + Splits Pointclouds object of size N into a list of Pointclouds objects + of size len(split_sizes), where the i-th Pointclouds object is of size + split_sizes[i]. Similar to torch.split(). + + Args: + split_sizes: List of integer sizes of Pointclouds objects to be + returned. + + Returns: + list[Pointclouds]. + """ + if not all(isinstance(x, int) for x in split_sizes): + raise ValueError("Value of split_sizes must be a list of integers.") + cloudlist = [] + curi = 0 + for i in split_sizes: + cloudlist.append(self[curi : curi + i]) + curi += i + return cloudlist + + def offset_(self, offsets_packed): + """ + Translate the point clouds by an offset. In place operation. + + Args: + offsets_packed: A Tensor of shape (3,) or the same shape + as self.points_packed giving offsets to be added to + all points. + + Returns: + self. + """ + points_packed = self.points_packed() + if offsets_packed.shape == (3,): + offsets_packed = offsets_packed.expand_as(points_packed) + if offsets_packed.shape != points_packed.shape: + raise ValueError("Offsets must have dimension (all_p, 3).") + self._points_packed = points_packed + offsets_packed + new_points_list = list( + self._points_packed.split(self.num_points_per_cloud().tolist(), 0) + ) + # Note that since _compute_packed() has been executed, points_list + # cannot be None even if not provided during construction. + self._points_list = new_points_list + if self._points_padded is not None: + for i, points in enumerate(new_points_list): + if len(points) > 0: + self._points_padded[i, : points.shape[0], :] = points + return self + + # TODO(nikhilar) Move out of place operator to a utils file. + def offset(self, offsets_packed): + """ + Out of place offset. + + Args: + offsets_packed: A Tensor of the same shape as self.points_packed + giving offsets to be added to all points. + Returns: + new Pointclouds object. + """ + new_clouds = self.clone() + return new_clouds.offset_(offsets_packed) + + def subsample(self, max_points: Union[int, Sequence[int]]) -> "Pointclouds": + """ + Subsample each cloud so that it has at most max_points points. + + Args: + max_points: maximum number of points in each cloud. + + Returns: + new Pointclouds object, or self if nothing to be done. + """ + if isinstance(max_points, int): + max_points = [max_points] * len(self) + elif len(max_points) != len(self): + raise ValueError("wrong number of max_points supplied") + if all( + int(n_points) <= int(max_) + for n_points, max_ in zip(self.num_points_per_cloud(), max_points) + ): + return self + + points_list = [] + features_list = [] + normals_list = [] + for max_, n_points, points, features, normals in zip_longest( + map(int, max_points), + map(int, self.num_points_per_cloud()), + self.points_list(), + self.features_list() or (), + self.normals_list() or (), + ): + if n_points > max_: + keep_np = np.random.choice(n_points, max_, replace=False) + keep = torch.tensor(keep_np, device=points.device, dtype=torch.int64) + points = points[keep] + if features is not None: + features = features[keep] + if normals is not None: + normals = normals[keep] + points_list.append(points) + features_list.append(features) + normals_list.append(normals) + + return Pointclouds( + points=points_list, + normals=self.normals_list() and normals_list, + features=self.features_list() and features_list, + ) + + def scale_(self, scale): + """ + Multiply the coordinates of this object by a scalar value. + - i.e. enlarge/dilate + In place operation. + + Args: + scale: A scalar, or a Tensor of shape (N,). + + Returns: + self. + """ + if not torch.is_tensor(scale): + scale = torch.full((len(self),), scale, device=self.device) + new_points_list = [] + points_list = self.points_list() + for i, old_points in enumerate(points_list): + new_points_list.append(scale[i] * old_points) + self._points_list = new_points_list + if self._points_packed is not None: + self._points_packed = torch.cat(new_points_list, dim=0) + if self._points_padded is not None: + for i, points in enumerate(new_points_list): + if len(points) > 0: + self._points_padded[i, : points.shape[0], :] = points + return self + + def scale(self, scale): + """ + Out of place scale_. + + Args: + scale: A scalar, or a Tensor of shape (N,). + + Returns: + new Pointclouds object. + """ + new_clouds = self.clone() + return new_clouds.scale_(scale) + + # TODO(nikhilar) Move function to utils file. + def get_bounding_boxes(self): + """ + Compute an axis-aligned bounding box for each cloud. + + Returns: + bboxes: Tensor of shape (N, 3, 2) where bbox[i, j] gives the + min and max values of cloud i along the jth coordinate axis. + """ + all_mins, all_maxes = [], [] + for points in self.points_list(): + cur_mins = points.min(dim=0)[0] # (3,) + cur_maxes = points.max(dim=0)[0] # (3,) + all_mins.append(cur_mins) + all_maxes.append(cur_maxes) + all_mins = torch.stack(all_mins, dim=0) # (N, 3) + all_maxes = torch.stack(all_maxes, dim=0) # (N, 3) + bboxes = torch.stack([all_mins, all_maxes], dim=2) + return bboxes + + def estimate_normals( + self, + neighborhood_size: int = 50, + disambiguate_directions: bool = True, + assign_to_self: bool = False, + ): + """ + Estimates the normals of each point in each cloud and assigns + them to the internal tensors `self._normals_list` and `self._normals_padded` + + The function uses `ops.estimate_pointcloud_local_coord_frames` + to estimate the normals. Please refer to that function for more + detailed information about the implemented algorithm. + + Args: + **neighborhood_size**: The size of the neighborhood used to estimate the + geometry around each point. + **disambiguate_directions**: If `True`, uses the algorithm from [1] to + ensure sign consistency of the normals of neighboring points. + **normals**: A tensor of normals for each input point + of shape `(minibatch, num_point, 3)`. + If `pointclouds` are of `Pointclouds` class, returns a padded tensor. + **assign_to_self**: If `True`, assigns the computed normals to the + internal buffers overwriting any previously stored normals. + + References: + [1] Tombari, Salti, Di Stefano: Unique Signatures of Histograms for + Local Surface Description, ECCV 2010. + """ + from .. import ops + + # estimate the normals + normals_est = ops.estimate_pointcloud_normals( + self, + neighborhood_size=neighborhood_size, + disambiguate_directions=disambiguate_directions, + ) + + # assign to self + if assign_to_self: + _, self._normals_padded, _ = self._parse_auxiliary_input(normals_est) + self._normals_list, self._normals_packed = None, None + if self._points_list is not None: + # update self._normals_list + self.normals_list() + if self._points_packed is not None: + # update self._normals_packed + self._normals_packed = torch.cat(self._normals_list, dim=0) + + return normals_est + + def extend(self, N: int): + """ + Create new Pointclouds which contains each cloud N times. + + Args: + N: number of new copies of each cloud. + + Returns: + new Pointclouds object. + """ + if not isinstance(N, int): + raise ValueError("N must be an integer.") + if N <= 0: + raise ValueError("N must be > 0.") + + new_points_list, new_normals_list, new_features_list = [], None, None + for points in self.points_list(): + new_points_list.extend(points.clone() for _ in range(N)) + normals_list = self.normals_list() + if normals_list is not None: + new_normals_list = [] + for normals in normals_list: + new_normals_list.extend(normals.clone() for _ in range(N)) + features_list = self.features_list() + if features_list is not None: + new_features_list = [] + for features in features_list: + new_features_list.extend(features.clone() for _ in range(N)) + return self.__class__( + points=new_points_list, normals=new_normals_list, features=new_features_list + ) + + def update_padded( + self, new_points_padded, new_normals_padded=None, new_features_padded=None + ): + """ + Returns a Pointcloud structure with updated padded tensors and copies of + the auxiliary tensors. This function allows for an update of + points_padded (and normals and features) without having to explicitly + convert it to the list representation for heterogeneous batches. + + Args: + new_points_padded: FloatTensor of shape (N, P, 3) + new_normals_padded: (optional) FloatTensor of shape (N, P, 3) + new_features_padded: (optional) FloatTensor of shape (N, P, C) + + Returns: + Pointcloud with updated padded representations + """ + + def check_shapes(x, size): + if x.shape[0] != size[0]: + raise ValueError("new values must have the same batch dimension.") + if x.shape[1] != size[1]: + raise ValueError("new values must have the same number of points.") + if size[2] is not None: + if x.shape[2] != size[2]: + raise ValueError( + "new values must have the same number of channels." + ) + + check_shapes(new_points_padded, [self._N, self._P, 3]) + if new_normals_padded is not None: + check_shapes(new_normals_padded, [self._N, self._P, 3]) + if new_features_padded is not None: + check_shapes(new_features_padded, [self._N, self._P, self._C]) + + new = self.__class__( + points=new_points_padded, + normals=new_normals_padded, + features=new_features_padded, + ) + + # overwrite the equisized flag + new.equisized = self.equisized + + # copy normals + if new_normals_padded is None: + # If no normals are provided, keep old ones (shallow copy) + new._normals_list = self._normals_list + new._normals_padded = self._normals_padded + new._normals_packed = self._normals_packed + + # copy features + if new_features_padded is None: + # If no features are provided, keep old ones (shallow copy) + new._features_list = self._features_list + new._features_padded = self._features_padded + new._features_packed = self._features_packed + + # copy auxiliary tensors + copy_tensors = [ + "_packed_to_cloud_idx", + "_cloud_to_packed_first_idx", + "_num_points_per_cloud", + "_padded_to_packed_idx", + "valid", + ] + for k in copy_tensors: + v = getattr(self, k) + if torch.is_tensor(v): + setattr(new, k, v) # shallow copy + + # update points + new._points_padded = new_points_padded + assert new._points_list is None + assert new._points_packed is None + + # update normals and features if provided + if new_normals_padded is not None: + new._normals_padded = new_normals_padded + new._normals_list = None + new._normals_packed = None + if new_features_padded is not None: + new._features_padded = new_features_padded + new._features_list = None + new._features_packed = None + return new + + def inside_box(self, box): + """ + Finds the points inside a 3D box. + + Args: + box: FloatTensor of shape (2, 3) or (N, 2, 3) where N is the number + of clouds. + box[..., 0, :] gives the min x, y & z. + box[..., 1, :] gives the max x, y & z. + Returns: + idx: BoolTensor of length sum(P_i) indicating whether the packed points are + within the input box. + """ + if box.dim() > 3 or box.dim() < 2: + raise ValueError("Input box must be of shape (2, 3) or (N, 2, 3).") + + if box.dim() == 3 and box.shape[0] != 1 and box.shape[0] != self._N: + raise ValueError( + "Input box dimension is incompatible with pointcloud size." + ) + + if box.dim() == 2: + box = box[None] + + if (box[..., 0, :] > box[..., 1, :]).any(): + raise ValueError("Input box is invalid: min values larger than max values.") + + points_packed = self.points_packed() + sumP = points_packed.shape[0] + + if box.shape[0] == 1: + box = box.expand(sumP, 2, 3) + elif box.shape[0] == self._N: + box = box.unbind(0) + box = [ + b.expand(p, 2, 3) for (b, p) in zip(box, self.num_points_per_cloud()) + ] + box = torch.cat(box, 0) + + coord_inside = (points_packed >= box[:, 0]) * (points_packed <= box[:, 1]) + return coord_inside.all(dim=-1) + + +def join_pointclouds_as_batch(pointclouds: Sequence[Pointclouds]) -> Pointclouds: + """ + Merge a list of Pointclouds objects into a single batched Pointclouds + object. All pointclouds must be on the same device. + + Args: + batch: List of Pointclouds objects each with batch dim [b1, b2, ..., bN] + Returns: + pointcloud: Poinclouds object with all input pointclouds collated into + a single object with batch dim = sum(b1, b2, ..., bN) + """ + if isinstance(pointclouds, Pointclouds) or not isinstance(pointclouds, Sequence): + raise ValueError("Wrong first argument to join_points_as_batch.") + + device = pointclouds[0].device + if not all(p.device == device for p in pointclouds): + raise ValueError("Pointclouds must all be on the same device") + + kwargs = {} + for field in ("points", "normals", "features"): + field_list = [getattr(p, field + "_list")() for p in pointclouds] + if None in field_list: + if field == "points": + raise ValueError("Pointclouds cannot have their points set to None!") + if not all(f is None for f in field_list): + raise ValueError( + f"Pointclouds in the batch have some fields '{field}'" + + " defined and some set to None." + ) + field_list = None + else: + field_list = [p for points in field_list for p in points] + if field == "features" and any( + p.shape[1] != field_list[0].shape[1] for p in field_list[1:] + ): + raise ValueError("Pointclouds must have the same number of features") + kwargs[field] = field_list + + return Pointclouds(**kwargs) + + +def join_pointclouds_as_scene( + pointclouds: Union[Pointclouds, List[Pointclouds]] +) -> Pointclouds: + """ + Joins a batch of point cloud in the form of a Pointclouds object or a list of Pointclouds + objects as a single point cloud. If the input is a list, the Pointclouds objects in the + list must all be on the same device, and they must either all or none have features and + all or none have normals. + + Args: + Pointclouds: Pointclouds object that contains a batch of point clouds, or a list of + Pointclouds objects. + + Returns: + new Pointclouds object containing a single point cloud + """ + if isinstance(pointclouds, list): + pointclouds = join_pointclouds_as_batch(pointclouds) + + if len(pointclouds) == 1: + return pointclouds + points = pointclouds.points_packed() + features = pointclouds.features_packed() + normals = pointclouds.normals_packed() + pointcloud = Pointclouds( + points=points[None], + features=None if features is None else features[None], + normals=None if normals is None else normals[None], + ) + return pointcloud diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6c0c4f73012d4b3b9601f69623d8d105e50a0db0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/utils.py @@ -0,0 +1,242 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import List, Sequence, Tuple, Union + +import torch + + +""" +Util functions for points/verts/faces/volumes. +""" + + +def list_to_padded( + x: Union[List[torch.Tensor], Tuple[torch.Tensor]], + pad_size: Union[Sequence[int], None] = None, + pad_value: float = 0.0, + equisized: bool = False, +) -> torch.Tensor: + r""" + Transforms a list of N tensors each of shape (Si_0, Si_1, ... Si_D) + into: + - a single tensor of shape (N, pad_size(0), pad_size(1), ..., pad_size(D)) + if pad_size is provided + - or a tensor of shape (N, max(Si_0), max(Si_1), ..., max(Si_D)) if pad_size is None. + + Args: + x: list of Tensors + pad_size: list(int) specifying the size of the padded tensor. + If `None` (default), the largest size of each dimension + is set as the `pad_size`. + pad_value: float value to be used to fill the padded tensor + equisized: bool indicating whether the items in x are of equal size + (sometimes this is known and if provided saves computation) + + Returns: + x_padded: tensor consisting of padded input tensors stored + over the newly allocated memory. + """ + if equisized: + return torch.stack(x, 0) + + if not all(torch.is_tensor(y) for y in x): + raise ValueError("All items have to be instances of a torch.Tensor.") + + # we set the common number of dimensions to the maximum + # of the dimensionalities of the tensors in the list + element_ndim = max(y.ndim for y in x) + + # replace empty 1D tensors with empty tensors with a correct number of dimensions + x = [ + (y.new_zeros([0] * element_ndim) if (y.ndim == 1 and y.nelement() == 0) else y) + for y in x + ] + + if any(y.ndim != x[0].ndim for y in x): + raise ValueError("All items have to have the same number of dimensions!") + + if pad_size is None: + pad_dims = [ + max(y.shape[dim] for y in x if len(y) > 0) for dim in range(x[0].ndim) + ] + else: + if any(len(pad_size) != y.ndim for y in x): + raise ValueError("Pad size must contain target size for all dimensions.") + pad_dims = pad_size + + N = len(x) + x_padded = x[0].new_full((N, *pad_dims), pad_value) + for i, y in enumerate(x): + if len(y) > 0: + slices = (i, *(slice(0, y.shape[dim]) for dim in range(y.ndim))) + x_padded[slices] = y + return x_padded + + +def padded_to_list( + x: torch.Tensor, + split_size: Union[Sequence[int], Sequence[Sequence[int]], None] = None, +): + r""" + Transforms a padded tensor of shape (N, S_1, S_2, ..., S_D) into a list + of N tensors of shape: + - (Si_1, Si_2, ..., Si_D) where (Si_1, Si_2, ..., Si_D) is specified in split_size(i) + - or (S_1, S_2, ..., S_D) if split_size is None + - or (Si_1, S_2, ..., S_D) if split_size(i) is an integer. + + Args: + x: tensor + split_size: optional 1D or 2D list/tuple of ints defining the number of + items for each tensor. + + Returns: + x_list: a list of tensors sharing the memory with the input. + """ + x_list = list(x.unbind(0)) + + if split_size is None: + return x_list + + N = len(split_size) + if x.shape[0] != N: + raise ValueError("Split size must be of same length as inputs first dimension") + + for i in range(N): + if isinstance(split_size[i], int): + x_list[i] = x_list[i][: split_size[i]] + else: + slices = tuple(slice(0, s) for s in split_size[i]) # pyre-ignore + x_list[i] = x_list[i][slices] + return x_list + + +def list_to_packed(x: List[torch.Tensor]): + r""" + Transforms a list of N tensors each of shape (Mi, K, ...) into a single + tensor of shape (sum(Mi), K, ...). + + Args: + x: list of tensors. + + Returns: + 4-element tuple containing + + - **x_packed**: tensor consisting of packed input tensors along the + 1st dimension. + - **num_items**: tensor of shape N containing Mi for each element in x. + - **item_packed_first_idx**: tensor of shape N indicating the index of + the first item belonging to the same element in the original list. + - **item_packed_to_list_idx**: tensor of shape sum(Mi) containing the + index of the element in the list the item belongs to. + """ + if not x: + raise ValueError("Input list is empty") + device = x[0].device + sizes = [xi.shape[0] for xi in x] + sizes_total = sum(sizes) + num_items = torch.tensor(sizes, dtype=torch.int64, device=device) + item_packed_first_idx = torch.zeros_like(num_items) + item_packed_first_idx[1:] = torch.cumsum(num_items[:-1], dim=0) + item_packed_to_list_idx = torch.arange( + sizes_total, dtype=torch.int64, device=device + ) + item_packed_to_list_idx = ( + torch.bucketize(item_packed_to_list_idx, item_packed_first_idx, right=True) - 1 + ) + x_packed = torch.cat(x, dim=0) + + return x_packed, num_items, item_packed_first_idx, item_packed_to_list_idx + + +def packed_to_list(x: torch.Tensor, split_size: Union[list, int]): + r""" + Transforms a tensor of shape (sum(Mi), K, L, ...) to N set of tensors of + shape (Mi, K, L, ...) where Mi's are defined in split_size + + Args: + x: tensor + split_size: list, tuple or int defining the number of items for each tensor + in the output list. + + Returns: + x_list: A list of Tensors + """ + return x.split(split_size, dim=0) + + +def padded_to_packed( + x: torch.Tensor, + split_size: Union[list, tuple, None] = None, + pad_value: Union[float, int, None] = None, +): + r""" + Transforms a padded tensor of shape (N, M, K) into a packed tensor + of shape: + - (sum(Mi), K) where (Mi, K) are the dimensions of + each of the tensors in the batch and Mi is specified by split_size(i) + - (N*M, K) if split_size is None + + Support only for 3-dimensional input tensor and 1-dimensional split size. + + Args: + x: tensor + split_size: list, tuple or int defining the number of items for each tensor + in the output list. + pad_value: optional value to use to filter the padded values in the input + tensor. + + Only one of split_size or pad_value should be provided, or both can be None. + + Returns: + x_packed: a packed tensor. + """ + if x.ndim != 3: + raise ValueError("Supports only 3-dimensional input tensors") + + N, M, D = x.shape + + if split_size is not None and pad_value is not None: + raise ValueError("Only one of split_size or pad_value should be provided.") + + x_packed = x.reshape(-1, D) # flatten padded + + if pad_value is None and split_size is None: + return x_packed + + # Convert to packed using pad value + if pad_value is not None: + mask = x_packed.ne(pad_value).any(-1) + x_packed = x_packed[mask] + return x_packed + + # Convert to packed using split sizes + # pyre-fixme[6]: Expected `Sized` for 1st param but got `Union[None, + # List[typing.Any], typing.Tuple[typing.Any, ...]]`. + N = len(split_size) + if x.shape[0] != N: + raise ValueError("Split size must be of same length as inputs first dimension") + + # pyre-fixme[16]: `None` has no attribute `__iter__`. + if not all(isinstance(i, int) for i in split_size): + raise ValueError( + "Support only 1-dimensional unbinded tensor. \ + Split size for more dimensions provided" + ) + + padded_to_packed_idx = torch.cat( + [ + torch.arange(v, dtype=torch.int64, device=x.device) + i * M + # pyre-fixme[6]: Expected `Iterable[Variable[_T]]` for 1st param but got + # `Union[None, List[typing.Any], typing.Tuple[typing.Any, ...]]`. + for (i, v) in enumerate(split_size) + ], + dim=0, + ) + + return x_packed[padded_to_packed_idx] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/volumes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/volumes.py new file mode 100644 index 0000000000000000000000000000000000000000..8bf069b5f9e2b46c8203cf23666bb2fc8e7dcc45 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/structures/volumes.py @@ -0,0 +1,1137 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import copy +from typing import List, Optional, Tuple, Union + +import torch +from pytorch3d.common.compat import meshgrid_ij +from pytorch3d.common.datatypes import Device, make_device +from pytorch3d.transforms import Scale, Transform3d + +from . import utils as struct_utils + + +_Scalar = Union[int, float] +_Vector = Union[torch.Tensor, Tuple[_Scalar, ...], List[_Scalar]] +_ScalarOrVector = Union[_Scalar, _Vector] + +_VoxelSize = _ScalarOrVector +_Translation = _Vector + +_TensorBatch = Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]] +_ALL_CONTENT: slice = slice(0, None) + + +class Volumes: + """ + This class provides functions for working with batches of volumetric grids + of possibly varying spatial sizes. + + VOLUME DENSITIES + + The Volumes class can be either constructed from a 5D tensor of + `densities` of size `batch x density_dim x depth x height x width` or + from a list of differently-sized 4D tensors `[D_1, ..., D_batch]`, + where each `D_i` is of size `[density_dim x depth_i x height_i x width_i]`. + + In case the `Volumes` object is initialized from the list of `densities`, + the list of tensors is internally converted to a single 5D tensor by + zero-padding the relevant dimensions. Both list and padded representations can be + accessed with the `Volumes.densities()` or `Volumes.densities_list()` getters. + The sizes of the individual volumes in the structure can be retrieved + with the `Volumes.get_grid_sizes()` getter. + + The `Volumes` class is immutable. I.e. after generating a `Volumes` object, + one cannot change its properties, such as `self._densities` or `self._features` + anymore. + + + VOLUME FEATURES + + While the `densities` field is intended to represent various measures of the + "density" of the volume cells (opacity, signed/unsigned distances + from the nearest surface, ...), one can additionally initialize the + object with the `features` argument. `features` are either a 5D tensor + of shape `batch x feature_dim x depth x height x width` or a list of + of differently-sized 4D tensors `[F_1, ..., F_batch]`, + where each `F_i` is of size `[feature_dim x depth_i x height_i x width_i]`. + `features` are intended to describe other properties of volume cells, + such as per-voxel 3D vectors of RGB colors that can be later used + for rendering the volume. + + + VOLUME COORDINATES + + Additionally, using the `VolumeLocator` class the `Volumes` class keeps track + of the locations of the centers of the volume cells in the local volume + coordinates as well as in the world coordinates. + + Local coordinates: + - Represent the locations of the volume cells in the local coordinate + frame of the volume. + - The center of the voxel indexed with `[·, ·, 0, 0, 0]` in the volume + has its 3D local coordinate set to `[-1, -1, -1]`, while the voxel + at index `[·, ·, depth_i-1, height_i-1, width_i-1]` has its + 3D local coordinate set to `[1, 1, 1]`. + - The first/second/third coordinate of each of the 3D per-voxel + XYZ vector denotes the horizontal/vertical/depth-wise position + respectively. I.e the order of the coordinate dimensions in the + volume is reversed w.r.t. the order of the 3D coordinate vectors. + - The intermediate coordinates between `[-1, -1, -1]` and `[1, 1, 1]`. + are linearly interpolated over the spatial dimensions of the volume. + - Note that the convention is the same as for the 5D version of the + `torch.nn.functional.grid_sample` function called with + the same value of `align_corners` argument. + - Note that the local coordinate convention of `Volumes` + (+X = left to right, +Y = top to bottom, +Z = away from the user) + is *different* from the world coordinate convention of the + renderer for `Meshes` or `Pointclouds` + (+X = right to left, +Y = bottom to top, +Z = away from the user). + + World coordinates: + - These define the locations of the centers of the volume cells + in the world coordinates. + - They are specified with the following mapping that converts + points `x_local` in the local coordinates to points `x_world` + in the world coordinates:: + + x_world = ( + x_local * (volume_size - 1) * 0.5 * voxel_size + ) - volume_translation, + + here `voxel_size` specifies the size of each voxel of the volume, + and `volume_translation` is the 3D offset of the central voxel of + the volume w.r.t. the origin of the world coordinate frame. + Both `voxel_size` and `volume_translation` are specified in + the world coordinate units. `volume_size` is the spatial size of + the volume in form of a 3D vector `[width, height, depth]`. + - Given the above definition of `x_world`, one can derive the + inverse mapping from `x_world` to `x_local` as follows:: + + x_local = ( + (x_world + volume_translation) / (0.5 * voxel_size) + ) / (volume_size - 1) + + - For a trivial volume with `volume_translation==[0, 0, 0]` + with `voxel_size=-1`, `x_world` would range + from -(volume_size-1)/2` to `+(volume_size-1)/2`. + + Coordinate tensors that denote the locations of each of the volume cells in + local / world coordinates (with shape `(depth x height x width x 3)`) + can be retrieved by calling the `Volumes.get_coord_grid()` getter with the + appropriate `world_coordinates` argument. + + Internally, the mapping between `x_local` and `x_world` is represented + as a `Transform3d` object `Volumes.VolumeLocator._local_to_world_transform`. + Users can access the relevant transformations with the + `Volumes.get_world_to_local_coords_transform()` and + `Volumes.get_local_to_world_coords_transform()` + functions. + + Example coordinate conversion: + - For a "trivial" volume with `voxel_size = 1.`, + `volume_translation=[0., 0., 0.]`, and the spatial size of + `DxHxW = 5x5x5`, the point `x_world = (-2, 0, 2)` gets mapped + to `x_local=(-1, 0, 1)`. + - For a "trivial" volume `v` with `voxel_size = 1.`, + `volume_translation=[0., 0., 0.]`, the following holds: + + torch.nn.functional.grid_sample( + v.densities(), + v.get_coord_grid(world_coordinates=False), + align_corners=align_corners, + ) == v.densities(), + + i.e. sampling the volume at trivial local coordinates + (no scaling with `voxel_size`` or shift with `volume_translation`) + results in the same volume. + """ + + def __init__( + self, + densities: _TensorBatch, + features: Optional[_TensorBatch] = None, + voxel_size: _VoxelSize = 1.0, + volume_translation: _Translation = (0.0, 0.0, 0.0), + align_corners: bool = True, + ) -> None: + """ + Args: + **densities**: Batch of input feature volume occupancies of shape + `(minibatch, density_dim, depth, height, width)`, or a list + of 4D tensors `[D_1, ..., D_minibatch]` where each `D_i` has + shape `(density_dim, depth_i, height_i, width_i)`. + Typically, each voxel contains a non-negative number + corresponding to its opaqueness. + **features**: Batch of input feature volumes of shape: + `(minibatch, feature_dim, depth, height, width)` or a list + of 4D tensors `[F_1, ..., F_minibatch]` where each `F_i` has + shape `(feature_dim, depth_i, height_i, width_i)`. + The field is optional and can be set to `None` in case features are + not required. + **voxel_size**: Denotes the size of each volume voxel in world units. + Has to be one of: + a) A scalar (square voxels) + b) 3-tuple or a 3-list of scalars + c) a Tensor of shape (3,) + d) a Tensor of shape (minibatch, 3) + e) a Tensor of shape (minibatch, 1) + f) a Tensor of shape (1,) (square voxels) + **volume_translation**: Denotes the 3D translation of the center + of the volume in world units. Has to be one of: + a) 3-tuple or a 3-list of scalars + b) a Tensor of shape (3,) + c) a Tensor of shape (minibatch, 3) + d) a Tensor of shape (1,) (square voxels) + **align_corners**: If set (default), the coordinates of the corner voxels are + exactly −1 or +1 in the local coordinate system. Otherwise, the coordinates + correspond to the centers of the corner voxels. Cf. the namesake argument to + `torch.nn.functional.grid_sample`. + """ + + # handle densities + densities_, grid_sizes = self._convert_densities_features_to_tensor( + densities, "densities" + ) + + # take device from densities + self.device = densities_.device + + # assign to the internal buffers + self._densities = densities_ + + # assign a coordinate transformation member + self.locator = VolumeLocator( + batch_size=len(self), + grid_sizes=grid_sizes, + voxel_size=voxel_size, + volume_translation=volume_translation, + device=self.device, + align_corners=align_corners, + ) + + # handle features + self._features = None + if features is not None: + self._set_features(features) + + def _convert_densities_features_to_tensor( + self, x: _TensorBatch, var_name: str + ) -> Tuple[torch.Tensor, torch.LongTensor]: + """ + Handle the `densities` or `features` arguments to the constructor. + """ + if isinstance(x, (list, tuple)): + x_tensor = struct_utils.list_to_padded(x) + if any(x_.ndim != 4 for x_ in x): + raise ValueError( + f"`{var_name}` has to be a list of 4-dim tensors of shape: " + f"({var_name}_dim, height, width, depth)" + ) + if any(x_.shape[0] != x[0].shape[0] for x_ in x): + raise ValueError( + f"Each entry in the list of `{var_name}` has to have the " + "same number of channels (first dimension in the tensor)." + ) + x_shapes = torch.stack( + [ + torch.tensor( + list(x_.shape[1:]), dtype=torch.long, device=x_tensor.device + ) + for x_ in x + ], + dim=0, + ) + elif torch.is_tensor(x): + if x.ndim != 5: + raise ValueError( + f"`{var_name}` has to be a 5-dim tensor of shape: " + f"(minibatch, {var_name}_dim, height, width, depth)" + ) + x_tensor = x + x_shapes = torch.tensor( + list(x.shape[2:]), dtype=torch.long, device=x.device + )[None].repeat(x.shape[0], 1) + else: + raise ValueError( + f"{var_name} must be either a list or a tensor with " + f"shape (batch_size, {var_name}_dim, H, W, D)." + ) + # pyre-ignore[7] + return x_tensor, x_shapes + + def __len__(self) -> int: + return self._densities.shape[0] + + def __getitem__( + self, + index: Union[ + int, List[int], Tuple[int], slice, torch.BoolTensor, torch.LongTensor + ], + ) -> "Volumes": + """ + Args: + index: Specifying the index of the volume to retrieve. + Can be an int, slice, list of ints or a boolean or a long tensor. + + Returns: + Volumes object with selected volumes. The tensors are not cloned. + """ + if isinstance(index, int): + index = torch.LongTensor([index]) + elif isinstance(index, (slice, list, tuple)): + pass + elif torch.is_tensor(index): + if index.dim() != 1 or index.dtype.is_floating_point: + raise IndexError(index) + else: + raise IndexError(index) + + new = self.__class__( + # pyre-fixme[16]: `Optional` has no attribute `__getitem__`. + features=self.features()[index] if self._features is not None else None, + densities=self.densities()[index], + ) + # dont forget to update grid_sizes! + self.locator._copy_transform_and_sizes(new.locator, index=index) + return new + + def features(self) -> Optional[torch.Tensor]: + """ + Returns the features of the volume. + + Returns: + **features**: The tensor of volume features. + """ + return self._features + + def densities(self) -> torch.Tensor: + """ + Returns the densities of the volume. + + Returns: + **densities**: The tensor of volume densities. + """ + return self._densities + + def densities_list(self) -> List[torch.Tensor]: + """ + Get the list representation of the densities. + + Returns: + list of tensors of densities of shape (dim_i, D_i, H_i, W_i). + """ + return self._features_densities_list(self.densities()) + + def features_list(self) -> List[torch.Tensor]: + """ + Get the list representation of the features. + + Returns: + list of tensors of features of shape (dim_i, D_i, H_i, W_i) + or `None` for feature-less volumes. + """ + features_ = self.features() + if features_ is None: + # No features provided so return None + # pyre-fixme[7]: Expected `List[torch.Tensor]` but got `None`. + return None + return self._features_densities_list(features_) + + def get_align_corners(self) -> bool: + """ + Return whether the corners of the voxels should be aligned with the + image pixels. + """ + return self.locator._align_corners + + def _features_densities_list(self, x: torch.Tensor) -> List[torch.Tensor]: + """ + Retrieve the list representation of features/densities. + + Args: + x: self.features() or self.densities() + + Returns: + list of tensors of features/densities of shape (dim_i, D_i, H_i, W_i). + """ + x_dim = x.shape[1] + pad_sizes = torch.nn.functional.pad( + self.get_grid_sizes(), [1, 0], mode="constant", value=x_dim + ) + x_list = struct_utils.padded_to_list(x, pad_sizes.tolist()) + return x_list + + def update_padded( + self, new_densities: torch.Tensor, new_features: Optional[torch.Tensor] = None + ) -> "Volumes": + """ + Returns a Volumes structure with updated padded tensors and copies of + the auxiliary tensors `self._local_to_world_transform`, + `device` and `self._grid_sizes`. This function allows for an update of + densities (and features) without having to explicitly + convert it to the list representation for heterogeneous batches. + + Args: + new_densities: FloatTensor of shape (N, dim_density, D, H, W) + new_features: (optional) FloatTensor of shape (N, dim_feature, D, H, W) + + Returns: + Volumes with updated features and densities + """ + new = copy.copy(self) + new._set_densities(new_densities) + if new_features is None: + new._features = None + else: + new._set_features(new_features) + return new + + def _set_features(self, features: _TensorBatch) -> None: + self._set_densities_features("features", features) + + def _set_densities(self, densities: _TensorBatch) -> None: + self._set_densities_features("densities", densities) + + def _set_densities_features(self, var_name: str, x: _TensorBatch) -> None: + x_tensor, grid_sizes = self._convert_densities_features_to_tensor(x, var_name) + if x_tensor.device != self.device: + raise ValueError( + f"`{var_name}` have to be on the same device as `self.densities`." + ) + if len(x_tensor.shape) != 5: + raise ValueError( + f"{var_name} has to be a 5-dim tensor of shape: " + f"(minibatch, {var_name}_dim, height, width, depth)" + ) + + if not ( + (self.get_grid_sizes().shape == grid_sizes.shape) + and torch.allclose(self.get_grid_sizes(), grid_sizes) + ): + raise ValueError( + f"The size of every grid in `{var_name}` has to match the size of" + "the corresponding `densities` grid." + ) + setattr(self, "_" + var_name, x_tensor) + + def clone(self) -> "Volumes": + """ + Deep copy of Volumes object. All internal tensors are cloned + individually. + + Returns: + new Volumes object. + """ + return copy.deepcopy(self) + + def to(self, device: Device, copy: bool = False) -> "Volumes": + """ + Match the functionality of torch.Tensor.to() + If copy = True or the self Tensor is on a different device, the + returned tensor is a copy of self with the desired torch.device. + If copy = False and the self Tensor already has the correct torch.device, + then self is returned. + + Args: + device: Device (as str or torch.device) for the new tensor. + copy: Boolean indicator whether or not to clone self. Default False. + + Returns: + Volumes object. + """ + device_ = make_device(device) + if not copy and self.device == device_: + return self + + other = self.clone() + if self.device == device_: + return other + + other.device = device_ + other._densities = self._densities.to(device_) + if self._features is not None: + # pyre-fixme[16]: `Optional` has no attribute `to`. + other._features = self.features().to(device_) + self.locator._copy_transform_and_sizes(other.locator, device=device_) + other.locator = other.locator.to(device, copy) + return other + + def cpu(self) -> "Volumes": + return self.to("cpu") + + def cuda(self) -> "Volumes": + return self.to("cuda") + + def get_grid_sizes(self) -> torch.LongTensor: + """ + Returns the sizes of individual volumetric grids in the structure. + + Returns: + **grid_sizes**: Tensor of spatial sizes of each of the volumes + of size (batchsize, 3), where i-th row holds (D_i, H_i, W_i). + """ + return self.locator.get_grid_sizes() + + def get_local_to_world_coords_transform(self) -> Transform3d: + """ + Return a Transform3d object that converts points in the + the local coordinate frame of the volume to world coordinates. + Local volume coordinates are scaled s.t. the coordinates along one + side of the volume are in range [-1, 1]. + + Returns: + **local_to_world_transform**: A Transform3d object converting + points from local coordinates to the world coordinates. + """ + return self.locator.get_local_to_world_coords_transform() + + def get_world_to_local_coords_transform(self) -> Transform3d: + """ + Return a Transform3d object that converts points in the + world coordinates to the local coordinate frame of the volume. + Local volume coordinates are scaled s.t. the coordinates along one + side of the volume are in range [-1, 1]. + + Returns: + **world_to_local_transform**: A Transform3d object converting + points from world coordinates to local coordinates. + """ + return self.get_local_to_world_coords_transform().inverse() + + def world_to_local_coords(self, points_3d_world: torch.Tensor) -> torch.Tensor: + """ + Convert a batch of 3D point coordinates `points_3d_world` of shape + (minibatch, ..., dim) in the world coordinates to + the local coordinate frame of the volume. Local volume + coordinates are scaled s.t. the coordinates along one side of the volume + are in range [-1, 1]. + + Args: + **points_3d_world**: A tensor of shape `(minibatch, ..., 3)` + containing the 3D coordinates of a set of points that will + be converted from the local volume coordinates (ranging + within [-1, 1]) to the world coordinates + defined by the `self.center` and `self.voxel_size` parameters. + + Returns: + **points_3d_local**: `points_3d_world` converted to the local + volume coordinates of shape `(minibatch, ..., 3)`. + """ + return self.locator.world_to_local_coords(points_3d_world) + + def local_to_world_coords(self, points_3d_local: torch.Tensor) -> torch.Tensor: + """ + Convert a batch of 3D point coordinates `points_3d_local` of shape + (minibatch, ..., dim) in the local coordinate frame of the volume + to the world coordinates. + + Args: + **points_3d_local**: A tensor of shape `(minibatch, ..., 3)` + containing the 3D coordinates of a set of points that will + be converted from the local volume coordinates (ranging + within [-1, 1]) to the world coordinates + defined by the `self.center` and `self.voxel_size` parameters. + + Returns: + **points_3d_world**: `points_3d_local` converted to the world + coordinates of the volume of shape `(minibatch, ..., 3)`. + """ + return self.locator.local_to_world_coords(points_3d_local) + + def get_coord_grid(self, world_coordinates: bool = True) -> torch.Tensor: + """ + Return the 3D coordinate grid of the volumetric grid + in local (`world_coordinates=False`) or world coordinates + (`world_coordinates=True`). + + The grid records location of each center of the corresponding volume voxel. + + Local coordinates are scaled s.t. the values along one side of the + volume are in range [-1, 1]. + + Args: + **world_coordinates**: if `True`, the method + returns the grid in the world coordinates, + otherwise, in local coordinates. + + Returns: + **coordinate_grid**: The grid of coordinates of shape + `(minibatch, depth, height, width, 3)`, where `minibatch`, + `height`, `width` and `depth` are the batch size, height, width + and depth of the volume `features` or `densities`. + """ + return self.locator.get_coord_grid(world_coordinates) + + +class VolumeLocator: + """ + The `VolumeLocator` class keeps track of the locations of the + centers of the volume cells in the local volume coordinates as well as in + the world coordinates for a voxel grid structure in 3D. + + Local coordinates: + - Represent the locations of the volume cells in the local coordinate + frame of the volume. + - The center of the voxel indexed with `[·, ·, 0, 0, 0]` in the volume + has its 3D local coordinate set to `[-1, -1, -1]`, while the voxel + at index `[·, ·, depth_i-1, height_i-1, width_i-1]` has its + 3D local coordinate set to `[1, 1, 1]`. + - The first/second/third coordinate of each of the 3D per-voxel + XYZ vector denotes the horizontal/vertical/depth-wise position + respectively. I.e the order of the coordinate dimensions in the + volume is reversed w.r.t. the order of the 3D coordinate vectors. + - The intermediate coordinates between `[-1, -1, -1]` and `[1, 1, 1]`. + are linearly interpolated over the spatial dimensions of the volume. + - Note that the convention is the same as for the 5D version of the + `torch.nn.functional.grid_sample` function called with + the same value of `align_corners` argument. + - Note that the local coordinate convention of `VolumeLocator` + (+X = left to right, +Y = top to bottom, +Z = away from the user) + is *different* from the world coordinate convention of the + renderer for `Meshes` or `Pointclouds` + (+X = right to left, +Y = bottom to top, +Z = away from the user). + + World coordinates: + - These define the locations of the centers of the volume cells + in the world coordinates. + - They are specified with the following mapping that converts + points `x_local` in the local coordinates to points `x_world` + in the world coordinates:: + + x_world = ( + x_local * (volume_size - 1) * 0.5 * voxel_size + ) - volume_translation, + + here `voxel_size` specifies the size of each voxel of the volume, + and `volume_translation` is the 3D offset of the central voxel of + the volume w.r.t. the origin of the world coordinate frame. + Both `voxel_size` and `volume_translation` are specified in + the world coordinate units. `volume_size` is the spatial size of + the volume in form of a 3D vector `[width, height, depth]`. + - Given the above definition of `x_world`, one can derive the + inverse mapping from `x_world` to `x_local` as follows:: + + x_local = ( + (x_world + volume_translation) / (0.5 * voxel_size) + ) / (volume_size - 1) + + - For a trivial volume with `volume_translation==[0, 0, 0]` + with `voxel_size=-1`, `x_world` would range + from -(volume_size-1)/2` to `+(volume_size-1)/2`. + + Coordinate tensors that denote the locations of each of the volume cells in + local / world coordinates (with shape `(depth x height x width x 3)`) + can be retrieved by calling the `VolumeLocator.get_coord_grid()` getter with the + appropriate `world_coordinates` argument. + + Internally, the mapping between `x_local` and `x_world` is represented + as a `Transform3d` object `VolumeLocator._local_to_world_transform`. + Users can access the relevant transformations with the + `VolumeLocator.get_world_to_local_coords_transform()` and + `VolumeLocator.get_local_to_world_coords_transform()` + functions. + + Example coordinate conversion: + - For a "trivial" volume with `voxel_size = 1.`, + `volume_translation=[0., 0., 0.]`, and the spatial size of + `DxHxW = 5x5x5`, the point `x_world = (-2, 0, 2)` gets mapped + to `x_local=(-1, 0, 1)`. + - For a "trivial" volume `v` with `voxel_size = 1.`, + `volume_translation=[0., 0., 0.]`, the following holds:: + + torch.nn.functional.grid_sample( + v.densities(), + v.get_coord_grid(world_coordinates=False), + align_corners=align_corners, + ) == v.densities(), + + i.e. sampling the volume at trivial local coordinates + (no scaling with `voxel_size`` or shift with `volume_translation`) + results in the same volume. + """ + + def __init__( + self, + batch_size: int, + grid_sizes: Union[ + torch.LongTensor, Tuple[int, int, int], List[torch.LongTensor] + ], + device: torch.device, + voxel_size: _VoxelSize = 1.0, + volume_translation: _Translation = (0.0, 0.0, 0.0), + align_corners: bool = True, + ): + """ + **batch_size** : Batch size of the underlying grids + **grid_sizes** : Represents the resolutions of different grids in the batch. Can be + a) tuple of form (H, W, D) + b) list/tuple of length batch_size of lists/tuples of form (H, W, D) + c) torch.Tensor of shape (batch_size, H, W, D) + H, W, D are height, width, depth respectively. If `grid_sizes` is a tuple than + all the grids in the batch have the same resolution. + **voxel_size**: Denotes the size of each volume voxel in world units. + Has to be one of: + a) A scalar (square voxels) + b) 3-tuple or a 3-list of scalars + c) a Tensor of shape (3,) + d) a Tensor of shape (minibatch, 3) + e) a Tensor of shape (minibatch, 1) + f) a Tensor of shape (1,) (square voxels) + **volume_translation**: Denotes the 3D translation of the center + of the volume in world units. Has to be one of: + a) 3-tuple or a 3-list of scalars + b) a Tensor of shape (3,) + c) a Tensor of shape (minibatch, 3) + d) a Tensor of shape (1,) (square voxels) + **align_corners**: If set (default), the coordinates of the corner voxels are + exactly −1 or +1 in the local coordinate system. Otherwise, the coordinates + correspond to the centers of the corner voxels. Cf. the namesake argument to + `torch.nn.functional.grid_sample`. + """ + self.device = device + self._batch_size = batch_size + self._grid_sizes = self._convert_grid_sizes2tensor(grid_sizes) + self._resolution = tuple(torch.max(self._grid_sizes.cpu(), dim=0).values) + self._align_corners = align_corners + + # set the local_to_world transform + self._set_local_to_world_transform( + voxel_size=voxel_size, + volume_translation=volume_translation, + ) + + def _convert_grid_sizes2tensor( + self, x: Union[torch.LongTensor, List[torch.LongTensor], Tuple[int, int, int]] + ) -> torch.LongTensor: + """ + Handle the grid_sizes argument to the constructor. + """ + if isinstance(x, (list, tuple)): + if isinstance(x[0], (torch.LongTensor, list, tuple)): + if self._batch_size != len(x): + raise ValueError("x should have a batch size of 'batch_size'") + # pyre-ignore[6] + if any(len(x_) != 3 for x_ in x): + raise ValueError( + "`grid_sizes` has to be a list of 3-dim tensors of shape: " + "(height, width, depth)" + ) + x_shapes = torch.stack( + [ + torch.tensor( + # pyre-ignore[6] + list(x_), + dtype=torch.long, + device=self.device, + ) + for x_ in x + ], + dim=0, + ) + elif isinstance(x[0], int): + x_shapes = torch.stack( + [ + torch.tensor(list(x), dtype=torch.long, device=self.device) + for _ in range(self._batch_size) + ], + dim=0, + ) + else: + raise ValueError( + "`grid_sizes` can be a list/tuple of int or torch.Tensor not of " + + "{type(x[0])}." + ) + + elif torch.is_tensor(x): + if x.ndim != 2: + raise ValueError( + "`grid_sizes` has to be a 2-dim tensor of shape: (minibatch, 3)" + ) + x_shapes = x.to(self.device) + else: + raise ValueError( + "grid_sizes must be either a list of tensors with shape (H, W, D), tensor with" + "shape (batch_size, H, W, D) or a tuple of (H, W, D)." + ) + # pyre-ignore[7] + return x_shapes + + def _voxel_size_translation_to_transform( + self, + voxel_size: torch.Tensor, + volume_translation: torch.Tensor, + batch_size: int, + ) -> Transform3d: + """ + Converts the `voxel_size` and `volume_translation` constructor arguments + to the internal `Transform3d` object `local_to_world_transform`. + """ + volume_size_zyx = self.get_grid_sizes().float() + volume_size_xyz = volume_size_zyx[:, [2, 1, 0]] + + # x_local = ( + # (x_world + volume_translation) / (0.5 * voxel_size) + # ) / (volume_size - 1) + + # x_world = ( + # x_local * (volume_size - 1) * 0.5 * voxel_size + # ) - volume_translation + + local_to_world_transform = Scale( + (volume_size_xyz - 1) * voxel_size * 0.5, device=self.device + ).translate(-volume_translation) + + return local_to_world_transform + + def get_coord_grid(self, world_coordinates: bool = True) -> torch.Tensor: + """ + Return the 3D coordinate grid of the volumetric grid + in local (`world_coordinates=False`) or world coordinates + (`world_coordinates=True`). + + The grid records location of each center of the corresponding volume voxel. + + Local coordinates are scaled s.t. the values along one side of the + volume are in range [-1, 1]. + + Args: + **world_coordinates**: if `True`, the method + returns the grid in the world coordinates, + otherwise, in local coordinates. + + Returns: + **coordinate_grid**: The grid of coordinates of shape + `(minibatch, depth, height, width, 3)`, where `minibatch`, + `height`, `width` and `depth` are the batch size, height, width + and depth of the volume `features` or `densities`. + """ + # TODO(dnovotny): Implement caching of the coordinate grid. + return self._calculate_coordinate_grid(world_coordinates=world_coordinates) + + def _calculate_coordinate_grid( + self, world_coordinates: bool = True + ) -> torch.Tensor: + """ + Calculate the 3D coordinate grid of the volumetric grid either + in local (`world_coordinates=False`) or + world coordinates (`world_coordinates=True`) . + """ + + ba, (de, he, wi) = self._batch_size, self._resolution + grid_sizes = self.get_grid_sizes() + + # generate coordinate axes + def corner_coord_adjustment(r): + return 0.0 if self._align_corners else 1.0 / r + + vol_axes = [ + torch.linspace( + -1.0 + corner_coord_adjustment(r), + 1.0 - corner_coord_adjustment(r), + r, + dtype=torch.float32, + device=self.device, + ) + for r in (de, he, wi) + ] + + # generate per-coord meshgrids + Z, Y, X = meshgrid_ij(vol_axes) + + # stack the coord grids ... this order matches the coordinate convention + # of torch.nn.grid_sample + vol_coords_local = torch.stack((X, Y, Z), dim=3)[None].repeat(ba, 1, 1, 1, 1) + + # get grid sizes relative to the maximal volume size + grid_sizes_relative = ( + torch.tensor([[de, he, wi]], device=grid_sizes.device, dtype=torch.float32) + - 1 + ) / (grid_sizes - 1).float() + + if (grid_sizes_relative != 1.0).any(): + # if any of the relative sizes != 1.0, adjust the grid + grid_sizes_relative_reshape = grid_sizes_relative[:, [2, 1, 0]][ + :, None, None, None + ] + vol_coords_local *= grid_sizes_relative_reshape + vol_coords_local += grid_sizes_relative_reshape - 1 + + if world_coordinates: + vol_coords = self.local_to_world_coords(vol_coords_local) + else: + vol_coords = vol_coords_local + + return vol_coords + + def get_local_to_world_coords_transform(self) -> Transform3d: + """ + Return a Transform3d object that converts points in the + the local coordinate frame of the volume to world coordinates. + Local volume coordinates are scaled s.t. the coordinates along one + side of the volume are in range [-1, 1]. + + Returns: + **local_to_world_transform**: A Transform3d object converting + points from local coordinates to the world coordinates. + """ + return self._local_to_world_transform + + def get_world_to_local_coords_transform(self) -> Transform3d: + """ + Return a Transform3d object that converts points in the + world coordinates to the local coordinate frame of the volume. + Local volume coordinates are scaled s.t. the coordinates along one + side of the volume are in range [-1, 1]. + + Returns: + **world_to_local_transform**: A Transform3d object converting + points from world coordinates to local coordinates. + """ + return self.get_local_to_world_coords_transform().inverse() + + def world_to_local_coords(self, points_3d_world: torch.Tensor) -> torch.Tensor: + """ + Convert a batch of 3D point coordinates `points_3d_world` of shape + (minibatch, ..., dim) in the world coordinates to + the local coordinate frame of the volume. Local volume + coordinates are scaled s.t. the coordinates along one side of the volume + are in range [-1, 1]. + + Args: + **points_3d_world**: A tensor of shape `(minibatch, ..., 3)` + containing the 3D coordinates of a set of points that will + be converted from the local volume coordinates (ranging + within [-1, 1]) to the world coordinates + defined by the `self.center` and `self.voxel_size` parameters. + + Returns: + **points_3d_local**: `points_3d_world` converted to the local + volume coordinates of shape `(minibatch, ..., 3)`. + """ + pts_shape = points_3d_world.shape + return ( + self.get_world_to_local_coords_transform() + .transform_points(points_3d_world.view(pts_shape[0], -1, 3)) + .view(pts_shape) + ) + + def local_to_world_coords(self, points_3d_local: torch.Tensor) -> torch.Tensor: + """ + Convert a batch of 3D point coordinates `points_3d_local` of shape + (minibatch, ..., dim) in the local coordinate frame of the volume + to the world coordinates. + + Args: + **points_3d_local**: A tensor of shape `(minibatch, ..., 3)` + containing the 3D coordinates of a set of points that will + be converted from the local volume coordinates (ranging + within [-1, 1]) to the world coordinates + defined by the `self.center` and `self.voxel_size` parameters. + + Returns: + **points_3d_world**: `points_3d_local` converted to the world + coordinates of the volume of shape `(minibatch, ..., 3)`. + """ + pts_shape = points_3d_local.shape + return ( + self.get_local_to_world_coords_transform() + .transform_points(points_3d_local.view(pts_shape[0], -1, 3)) + .view(pts_shape) + ) + + def get_grid_sizes(self) -> torch.LongTensor: + """ + Returns the sizes of individual volumetric grids in the structure. + + Returns: + **grid_sizes**: Tensor of spatial sizes of each of the volumes + of size (batchsize, 3), where i-th row holds (D_i, H_i, W_i). + """ + return self._grid_sizes + + def _set_local_to_world_transform( + self, + voxel_size: _VoxelSize = 1.0, + volume_translation: _Translation = (0.0, 0.0, 0.0), + ): + """ + Sets the internal representation of the transformation between the + world and local volume coordinates by specifying + `voxel_size` and `volume_translation` + + Args: + **voxel_size**: Denotes the size of input voxels. Has to be one of: + a) A scalar (square voxels) + b) 3-tuple or a 3-list of scalars + c) a Tensor of shape (3,) + d) a Tensor of shape (minibatch, 3) + e) a Tensor of shape (1,) (square voxels) + **volume_translation**: Denotes the 3D translation of the center + of the volume in world units. Has to be one of: + a) 3-tuple or a 3-list of scalars + b) a Tensor of shape (3,) + c) a Tensor of shape (minibatch, 3) + d) a Tensor of shape (1,) (square voxels) + """ + # handle voxel size and center + # here we force the tensors to lie on self.device + voxel_size = self._handle_voxel_size(voxel_size, len(self)) + volume_translation = self._handle_volume_translation( + volume_translation, len(self) + ) + self._local_to_world_transform = self._voxel_size_translation_to_transform( + voxel_size, volume_translation, len(self) + ) + + def _copy_transform_and_sizes( + self, + other: "VolumeLocator", + device: Optional[torch.device] = None, + index: Optional[ + Union[int, List[int], Tuple[int], slice, torch.Tensor] + ] = _ALL_CONTENT, + ) -> None: + """ + Copies the local to world transform and grid sizes to other VolumeLocator object + and moves it to specified device. Operates in place on other. + + Args: + other: VolumeLocator object to which to copy + device: torch.device on which to put the result, defatults to self.device + index: Specifies which parts to copy. + Can be an int, slice, list of ints or a boolean or a long tensor. + Defaults to all items (`:`). + """ + device = device if device is not None else self.device + other._grid_sizes = self._grid_sizes[index].to(device) + other._local_to_world_transform = self.get_local_to_world_coords_transform()[ + # pyre-fixme[6]: For 1st param expected `Union[List[int], int, slice, + # BoolTensor, LongTensor]` but got `Union[None, List[int], Tuple[int], + # int, slice, Tensor]`. + index + ].to(device) + + def _handle_voxel_size( + self, voxel_size: _VoxelSize, batch_size: int + ) -> torch.Tensor: + """ + Handle the `voxel_size` argument to the `VolumeLocator` constructor. + """ + err_msg = ( + "voxel_size has to be either a 3-tuple of scalars, or a scalar, or" + " a torch.Tensor of shape (3,) or (1,) or (minibatch, 3) or (minibatch, 1)." + ) + if isinstance(voxel_size, (float, int)): + # convert a scalar to a 3-element tensor + voxel_size = torch.full( + (1, 3), voxel_size, device=self.device, dtype=torch.float32 + ) + elif isinstance(voxel_size, torch.Tensor): + if voxel_size.numel() == 1: + # convert a single-element tensor to a 3-element one + voxel_size = voxel_size.view(-1).repeat(3) + elif len(voxel_size.shape) == 2 and ( + voxel_size.shape[0] == batch_size and voxel_size.shape[1] == 1 + ): + voxel_size = voxel_size.repeat(1, 3) + return self._convert_volume_property_to_tensor(voxel_size, batch_size, err_msg) + + def _handle_volume_translation( + self, translation: _Translation, batch_size: int + ) -> torch.Tensor: + """ + Handle the `volume_translation` argument to the `VolumeLocator` constructor. + """ + err_msg = ( + "`volume_translation` has to be either a 3-tuple of scalars, or" + " a Tensor of shape (1,3) or (minibatch, 3) or (3,)`." + ) + return self._convert_volume_property_to_tensor(translation, batch_size, err_msg) + + def __len__(self) -> int: + return self._batch_size + + def _convert_volume_property_to_tensor( + self, x: _Vector, batch_size: int, err_msg: str + ) -> torch.Tensor: + """ + Handle the `volume_translation` or `voxel_size` argument to + the VolumeLocator constructor. + Return a tensor of shape (N, 3) where N is the batch_size. + """ + if isinstance(x, (list, tuple)): + if len(x) != 3: + raise ValueError(err_msg) + x = torch.tensor(x, device=self.device, dtype=torch.float32)[None] + x = x.repeat((batch_size, 1)) + elif isinstance(x, torch.Tensor): + ok = ( + (x.shape[0] == 1 and x.shape[1] == 3) + or (x.shape[0] == 3 and len(x.shape) == 1) + or (x.shape[0] == batch_size and x.shape[1] == 3) + ) + if not ok: + raise ValueError(err_msg) + if x.device != self.device: + x = x.to(self.device) + if x.shape[0] == 3 and len(x.shape) == 1: + x = x[None] + if x.shape[0] == 1: + x = x.repeat((batch_size, 1)) + else: + raise ValueError(err_msg) + + return x + + def to(self, device: Device, copy: bool = False) -> "VolumeLocator": + """ + Match the functionality of torch.Tensor.to() + If copy = True or the self Tensor is on a different device, the + returned tensor is a copy of self with the desired torch.device. + If copy = False and the self Tensor already has the correct torch.device, + then self is returned. + + Args: + device: Device (as str or torch.device) for the new tensor. + copy: Boolean indicator whether or not to clone self. Default False. + + Returns: + VolumeLocator object. + """ + device_ = make_device(device) + if not copy and self.device == device_: + return self + + other = self.clone() + if self.device == device_: + return other + + other.device = device_ + other._grid_sizes = self._grid_sizes.to(device_) + other._local_to_world_transform = self.get_local_to_world_coords_transform().to( + device + ) + return other + + def clone(self) -> "VolumeLocator": + """ + Deep copy of VoluVolumeLocatormes object. All internal tensors are cloned + individually. + + Returns: + new VolumeLocator object. + """ + return copy.deepcopy(self) + + def cpu(self) -> "VolumeLocator": + return self.to("cpu") + + def cuda(self) -> "VolumeLocator": + return self.to("cuda") diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..96b241562c7675153c2e4ce67e0a10e6e6b071c1 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__init__.py @@ -0,0 +1,41 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .math import acos_linear_extrapolation +from .rotation_conversions import ( + axis_angle_to_matrix, + axis_angle_to_quaternion, + euler_angles_to_matrix, + matrix_to_axis_angle, + matrix_to_euler_angles, + matrix_to_quaternion, + matrix_to_rotation_6d, + quaternion_apply, + quaternion_invert, + quaternion_multiply, + quaternion_raw_multiply, + quaternion_to_axis_angle, + quaternion_to_matrix, + random_quaternions, + random_rotation, + random_rotations, + rotation_6d_to_matrix, + standardize_quaternion, +) +from .se3 import se3_exp_map, se3_log_map +from .so3 import ( + so3_exp_map, + so3_exponential_map, + so3_log_map, + so3_relative_angle, + so3_rotation_angle, +) +from .transform3d import Rotate, RotateAxisAngle, Scale, Transform3d, Translate + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aea9ded9da88ae12e1edc531778c9be3ade7e79f Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/__init__.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/math.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/math.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4a3b7c95c888e0f5d13b59e70aa0b9a344e2118 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/math.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/rotation_conversions.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/rotation_conversions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96b159f6161ddc031e66f95174f1d3ad946c98c7 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/rotation_conversions.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/se3.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/se3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d6d465f9ba3a4aa5c7e072181489268b52efd5d Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/se3.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/so3.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/so3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c44af6f8304894ea9cd27ae967bba4a11e2d956 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/so3.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/transform3d.cpython-310.pyc b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/transform3d.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8461023d32a999d37e2163ae33e6a645d53ee38 Binary files /dev/null and b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/__pycache__/transform3d.cpython-310.pyc differ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/math.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/math.py new file mode 100644 index 0000000000000000000000000000000000000000..5008bd589f3fbf744ad203228d38d8a106203226 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/math.py @@ -0,0 +1,87 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import math +from typing import Tuple + +import torch + + +DEFAULT_ACOS_BOUND: float = 1.0 - 1e-4 + + +def acos_linear_extrapolation( + x: torch.Tensor, + bounds: Tuple[float, float] = (-DEFAULT_ACOS_BOUND, DEFAULT_ACOS_BOUND), +) -> torch.Tensor: + """ + Implements `arccos(x)` which is linearly extrapolated outside `x`'s original + domain of `(-1, 1)`. This allows for stable backpropagation in case `x` + is not guaranteed to be strictly within `(-1, 1)`. + + More specifically:: + + bounds=(lower_bound, upper_bound) + if lower_bound <= x <= upper_bound: + acos_linear_extrapolation(x) = acos(x) + elif x <= lower_bound: # 1st order Taylor approximation + acos_linear_extrapolation(x) + = acos(lower_bound) + dacos/dx(lower_bound) * (x - lower_bound) + else: # x >= upper_bound + acos_linear_extrapolation(x) + = acos(upper_bound) + dacos/dx(upper_bound) * (x - upper_bound) + + Args: + x: Input `Tensor`. + bounds: A float 2-tuple defining the region for the + linear extrapolation of `acos`. + The first/second element of `bound` + describes the lower/upper bound that defines the lower/upper + extrapolation region, i.e. the region where + `x <= bound[0]`/`bound[1] <= x`. + Note that all elements of `bound` have to be within (-1, 1). + Returns: + acos_linear_extrapolation: `Tensor` containing the extrapolated `arccos(x)`. + """ + + lower_bound, upper_bound = bounds + + if lower_bound > upper_bound: + raise ValueError("lower bound has to be smaller or equal to upper bound.") + + if lower_bound <= -1.0 or upper_bound >= 1.0: + raise ValueError("Both lower bound and upper bound have to be within (-1, 1).") + + # init an empty tensor and define the domain sets + acos_extrap = torch.empty_like(x) + x_upper = x >= upper_bound + x_lower = x <= lower_bound + x_mid = (~x_upper) & (~x_lower) + + # acos calculation for upper_bound < x < lower_bound + acos_extrap[x_mid] = torch.acos(x[x_mid]) + # the linear extrapolation for x >= upper_bound + acos_extrap[x_upper] = _acos_linear_approximation(x[x_upper], upper_bound) + # the linear extrapolation for x <= lower_bound + acos_extrap[x_lower] = _acos_linear_approximation(x[x_lower], lower_bound) + + return acos_extrap + + +def _acos_linear_approximation(x: torch.Tensor, x0: float) -> torch.Tensor: + """ + Calculates the 1st order Taylor expansion of `arccos(x)` around `x0`. + """ + return (x - x0) * _dacos_dx(x0) + math.acos(x0) + + +def _dacos_dx(x: float) -> float: + """ + Calculates the derivative of `arccos(x)` w.r.t. `x`. + """ + return (-1.0) / math.sqrt(1.0 - x * x) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/rotation_conversions.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/rotation_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..a9fcae226e951bbd4c299960d07a1e86e414c11a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/rotation_conversions.py @@ -0,0 +1,601 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Optional + +import torch +import torch.nn.functional as F + +from ..common.datatypes import Device + + +""" +The transformation matrices returned from the functions in this file assume +the points on which the transformation will be applied are column vectors. +i.e. the R matrix is structured as + + R = [ + [Rxx, Rxy, Rxz], + [Ryx, Ryy, Ryz], + [Rzx, Rzy, Rzz], + ] # (3, 3) + +This matrix can be applied to column vectors by post multiplication +by the points e.g. + + points = [[0], [1], [2]] # (3 x 1) xyz coordinates of a point + transformed_points = R * points + +To apply the same matrix to points which are row vectors, the R matrix +can be transposed and pre multiplied by the points: + +e.g. + points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point + transformed_points = points * R.transpose(1, 0) +""" + + +def quaternion_to_matrix(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as quaternions to rotation matrices. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + r, i, j, k = torch.unbind(quaternions, -1) + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + two_s = 2.0 / (quaternions * quaternions).sum(-1) + + o = torch.stack( + ( + 1 - two_s * (j * j + k * k), + two_s * (i * j - k * r), + two_s * (i * k + j * r), + two_s * (i * j + k * r), + 1 - two_s * (i * i + k * k), + two_s * (j * k - i * r), + two_s * (i * k - j * r), + two_s * (j * k + i * r), + 1 - two_s * (i * i + j * j), + ), + -1, + ) + return o.reshape(quaternions.shape[:-1] + (3, 3)) + + +def _copysign(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: + """ + Return a tensor where each element has the absolute value taken from the, + corresponding element of a, with sign taken from the corresponding + element of b. This is like the standard copysign floating-point operation, + but is not careful about negative 0 and NaN. + + Args: + a: source tensor. + b: tensor whose signs will be used, of the same shape as a. + + Returns: + Tensor of the same shape as a with the signs of b. + """ + signs_differ = (a < 0) != (b < 0) + return torch.where(signs_differ, -a, a) + + +def _sqrt_positive_part(x: torch.Tensor) -> torch.Tensor: + """ + Returns torch.sqrt(torch.max(0, x)) + but with a zero subgradient where x is 0. + """ + ret = torch.zeros_like(x) + positive_mask = x > 0 + if torch.is_grad_enabled(): + ret[positive_mask] = torch.sqrt(x[positive_mask]) + else: + ret = torch.where(positive_mask, torch.sqrt(x), ret) + return ret + + +def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as rotation matrices to quaternions. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.") + + batch_dim = matrix.shape[:-2] + m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind( + matrix.reshape(batch_dim + (9,)), dim=-1 + ) + + q_abs = _sqrt_positive_part( + torch.stack( + [ + 1.0 + m00 + m11 + m22, + 1.0 + m00 - m11 - m22, + 1.0 - m00 + m11 - m22, + 1.0 - m00 - m11 + m22, + ], + dim=-1, + ) + ) + + # we produce the desired quaternion multiplied by each of r, i, j, k + quat_by_rijk = torch.stack( + [ + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1), + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1), + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1), + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1), + ], + dim=-2, + ) + + # We floor here at 0.1 but the exact level is not important; if q_abs is small, + # the candidate won't be picked. + flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device) + quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr)) + + # if not for numerical problems, quat_candidates[i] should be same (up to a sign), + # forall i; we pick the best-conditioned one (with the largest denominator) + out = quat_candidates[ + F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, : + ].reshape(batch_dim + (4,)) + return standardize_quaternion(out) + + +def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor: + """ + Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == "X": + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + elif axis == "Y": + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + elif axis == "Z": + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + else: + raise ValueError("letter must be either X, Y or Z.") + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles: torch.Tensor, convention: str) -> torch.Tensor: + """ + Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError("Invalid input euler angles.") + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + matrices = [ + _axis_angle_rotation(c, e) + for c, e in zip(convention, torch.unbind(euler_angles, -1)) + ] + # return functools.reduce(torch.matmul, matrices) + return torch.matmul(torch.matmul(matrices[0], matrices[1]), matrices[2]) + + +def _angle_from_tan( + axis: str, other_axis: str, data, horizontal: bool, tait_bryan: bool +) -> torch.Tensor: + """ + Extract the first or third Euler angle from the two members of + the matrix which are positive constant times its sine and cosine. + + Args: + axis: Axis label "X" or "Y or "Z" for the angle we are finding. + other_axis: Axis label "X" or "Y or "Z" for the middle axis in the + convention. + data: Rotation matrices as tensor of shape (..., 3, 3). + horizontal: Whether we are looking for the angle for the third axis, + which means the relevant entries are in the same row of the + rotation matrix. If not, they are in the same column. + tait_bryan: Whether the first and third axes in the convention differ. + + Returns: + Euler Angles in radians for each matrix in data as a tensor + of shape (...). + """ + + i1, i2 = {"X": (2, 1), "Y": (0, 2), "Z": (1, 0)}[axis] + if horizontal: + i2, i1 = i1, i2 + even = (axis + other_axis) in ["XY", "YZ", "ZX"] + if horizontal == even: + return torch.atan2(data[..., i1], data[..., i2]) + if tait_bryan: + return torch.atan2(-data[..., i2], data[..., i1]) + return torch.atan2(data[..., i2], -data[..., i1]) + + +def _index_from_letter(letter: str) -> int: + if letter == "X": + return 0 + if letter == "Y": + return 1 + if letter == "Z": + return 2 + raise ValueError("letter must be either X, Y or Z.") + + +def matrix_to_euler_angles(matrix: torch.Tensor, convention: str) -> torch.Tensor: + """ + Convert rotations given as rotation matrices to Euler angles in radians. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + convention: Convention string of three uppercase letters. + + Returns: + Euler angles in radians as tensor of shape (..., 3). + """ + if len(convention) != 3: + raise ValueError("Convention must have 3 letters.") + if convention[1] in (convention[0], convention[2]): + raise ValueError(f"Invalid convention {convention}.") + for letter in convention: + if letter not in ("X", "Y", "Z"): + raise ValueError(f"Invalid letter {letter} in convention string.") + if matrix.size(-1) != 3 or matrix.size(-2) != 3: + raise ValueError(f"Invalid rotation matrix shape {matrix.shape}.") + i0 = _index_from_letter(convention[0]) + i2 = _index_from_letter(convention[2]) + tait_bryan = i0 != i2 + if tait_bryan: + central_angle = torch.asin( + matrix[..., i0, i2] * (-1.0 if i0 - i2 in [-1, 2] else 1.0) + ) + else: + central_angle = torch.acos(matrix[..., i0, i0]) + + o = ( + _angle_from_tan( + convention[0], convention[1], matrix[..., i2], False, tait_bryan + ), + central_angle, + _angle_from_tan( + convention[2], convention[1], matrix[..., i0, :], True, tait_bryan + ), + ) + return torch.stack(o, -1) + + +def random_quaternions( + n: int, dtype: Optional[torch.dtype] = None, device: Optional[Device] = None +) -> torch.Tensor: + """ + Generate random quaternions representing rotations, + i.e. versors with nonnegative real part. + + Args: + n: Number of quaternions in a batch to return. + dtype: Type to return. + device: Desired device of returned tensor. Default: + uses the current device for the default tensor type. + + Returns: + Quaternions as tensor of shape (N, 4). + """ + if isinstance(device, str): + device = torch.device(device) + o = torch.randn((n, 4), dtype=dtype, device=device) + s = (o * o).sum(1) + o = o / _copysign(torch.sqrt(s), o[:, 0])[:, None] + return o + + +def random_rotations( + n: int, dtype: Optional[torch.dtype] = None, device: Optional[Device] = None +) -> torch.Tensor: + """ + Generate random rotations as 3x3 rotation matrices. + + Args: + n: Number of rotation matrices in a batch to return. + dtype: Type to return. + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type. + + Returns: + Rotation matrices as tensor of shape (n, 3, 3). + """ + quaternions = random_quaternions(n, dtype=dtype, device=device) + return quaternion_to_matrix(quaternions) + + +def random_rotation( + dtype: Optional[torch.dtype] = None, device: Optional[Device] = None +) -> torch.Tensor: + """ + Generate a single random 3x3 rotation matrix. + + Args: + dtype: Type to return + device: Device of returned tensor. Default: if None, + uses the current device for the default tensor type + + Returns: + Rotation matrix as tensor of shape (3, 3). + """ + return random_rotations(1, dtype, device)[0] + + +def standardize_quaternion(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert a unit quaternion to a standard form: one in which the real + part is non negative. + + Args: + quaternions: Quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Standardized quaternions as tensor of shape (..., 4). + """ + return torch.where(quaternions[..., 0:1] < 0, -quaternions, quaternions) + + +def quaternion_raw_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: + """ + Multiply two quaternions. + Usual torch rules for broadcasting apply. + + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + + Returns: + The product of a and b, a tensor of quaternions shape (..., 4). + """ + aw, ax, ay, az = torch.unbind(a, -1) + bw, bx, by, bz = torch.unbind(b, -1) + ow = aw * bw - ax * bx - ay * by - az * bz + ox = aw * bx + ax * bw + ay * bz - az * by + oy = aw * by - ax * bz + ay * bw + az * bx + oz = aw * bz + ax * by - ay * bx + az * bw + return torch.stack((ow, ox, oy, oz), -1) + + +def quaternion_multiply(a: torch.Tensor, b: torch.Tensor) -> torch.Tensor: + """ + Multiply two quaternions representing rotations, returning the quaternion + representing their composition, i.e. the versor with nonnegative real part. + Usual torch rules for broadcasting apply. + + Args: + a: Quaternions as tensor of shape (..., 4), real part first. + b: Quaternions as tensor of shape (..., 4), real part first. + + Returns: + The product of a and b, a tensor of quaternions of shape (..., 4). + """ + ab = quaternion_raw_multiply(a, b) + return standardize_quaternion(ab) + + +def quaternion_invert(quaternion: torch.Tensor) -> torch.Tensor: + """ + Given a quaternion representing rotation, get the quaternion representing + its inverse. + + Args: + quaternion: Quaternions as tensor of shape (..., 4), with real part + first, which must be versors (unit quaternions). + + Returns: + The inverse, a tensor of quaternions of shape (..., 4). + """ + + scaling = torch.tensor([1, -1, -1, -1], device=quaternion.device) + return quaternion * scaling + + +def quaternion_apply(quaternion: torch.Tensor, point: torch.Tensor) -> torch.Tensor: + """ + Apply the rotation given by a quaternion to a 3D point. + Usual torch rules for broadcasting apply. + + Args: + quaternion: Tensor of quaternions, real part first, of shape (..., 4). + point: Tensor of 3D points of shape (..., 3). + + Returns: + Tensor of rotated points of shape (..., 3). + """ + if point.size(-1) != 3: + raise ValueError(f"Points are not in 3D, {point.shape}.") + real_parts = point.new_zeros(point.shape[:-1] + (1,)) + point_as_quaternion = torch.cat((real_parts, point), -1) + out = quaternion_raw_multiply( + quaternion_raw_multiply(quaternion, point_as_quaternion), + quaternion_invert(quaternion), + ) + return out[..., 1:] + + +def axis_angle_to_matrix(axis_angle: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as axis/angle to rotation matrices. + + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + return quaternion_to_matrix(axis_angle_to_quaternion(axis_angle)) + + +def matrix_to_axis_angle(matrix: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as rotation matrices to axis/angle. + + Args: + matrix: Rotation matrices as tensor of shape (..., 3, 3). + + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + """ + return quaternion_to_axis_angle(matrix_to_quaternion(matrix)) + + +def axis_angle_to_quaternion(axis_angle: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as axis/angle to quaternions. + + Args: + axis_angle: Rotations given as a vector in axis angle form, + as a tensor of shape (..., 3), where the magnitude is + the angle turned anticlockwise in radians around the + vector's direction. + + Returns: + quaternions with real part first, as tensor of shape (..., 4). + """ + angles = torch.norm(axis_angle, p=2, dim=-1, keepdim=True) + half_angles = angles * 0.5 + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + quaternions = torch.cat( + [torch.cos(half_angles), axis_angle * sin_half_angles_over_angles], dim=-1 + ) + return quaternions + + +def quaternion_to_axis_angle(quaternions: torch.Tensor) -> torch.Tensor: + """ + Convert rotations given as quaternions to axis/angle. + + Args: + quaternions: quaternions with real part first, + as tensor of shape (..., 4). + + Returns: + Rotations given as a vector in axis angle form, as a tensor + of shape (..., 3), where the magnitude is the angle + turned anticlockwise in radians around the vector's + direction. + """ + norms = torch.norm(quaternions[..., 1:], p=2, dim=-1, keepdim=True) + half_angles = torch.atan2(norms, quaternions[..., :1]) + angles = 2 * half_angles + eps = 1e-6 + small_angles = angles.abs() < eps + sin_half_angles_over_angles = torch.empty_like(angles) + sin_half_angles_over_angles[~small_angles] = ( + torch.sin(half_angles[~small_angles]) / angles[~small_angles] + ) + # for x small, sin(x/2) is about x/2 - (x/2)^3/6 + # so sin(x/2)/x is about 1/2 - (x*x)/48 + sin_half_angles_over_angles[small_angles] = ( + 0.5 - (angles[small_angles] * angles[small_angles]) / 48 + ) + return quaternions[..., 1:] / sin_half_angles_over_angles + + +def rotation_6d_to_matrix(d6: torch.Tensor) -> torch.Tensor: + """ + Converts 6D rotation representation by Zhou et al. [1] to rotation matrix + using Gram--Schmidt orthogonalization per Section B of [1]. + Args: + d6: 6D rotation representation, of size (*, 6) + + Returns: + batch of rotation matrices of size (*, 3, 3) + + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + """ + + a1, a2 = d6[..., :3], d6[..., 3:] + b1 = F.normalize(a1, dim=-1) + b2 = a2 - (b1 * a2).sum(-1, keepdim=True) * b1 + b2 = F.normalize(b2, dim=-1) + b3 = torch.cross(b1, b2, dim=-1) + return torch.stack((b1, b2, b3), dim=-2) + + +def matrix_to_rotation_6d(matrix: torch.Tensor) -> torch.Tensor: + """ + Converts rotation matrices to 6D rotation representation by Zhou et al. [1] + by dropping the last row. Note that 6D representation is not unique. + Args: + matrix: batch of rotation matrices of size (*, 3, 3) + + Returns: + 6D rotation representation, of size (*, 6) + + [1] Zhou, Y., Barnes, C., Lu, J., Yang, J., & Li, H. + On the Continuity of Rotation Representations in Neural Networks. + IEEE Conference on Computer Vision and Pattern Recognition, 2019. + Retrieved from http://arxiv.org/abs/1812.07035 + """ + batch_dim = matrix.size()[:-2] + return matrix[..., :2, :].clone().reshape(batch_dim + (6,)) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/se3.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/se3.py new file mode 100644 index 0000000000000000000000000000000000000000..693b84c22ac56208b3b8b37374d7f078dc54241a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/se3.py @@ -0,0 +1,223 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch + +from .so3 import _so3_exp_map, hat, so3_log_map + + +def se3_exp_map(log_transform: torch.Tensor, eps: float = 1e-4) -> torch.Tensor: + """ + Convert a batch of logarithmic representations of SE(3) matrices `log_transform` + to a batch of 4x4 SE(3) matrices using the exponential map. + See e.g. [1], Sec 9.4.2. for more detailed description. + + A SE(3) matrix has the following form: + ``` + [ R 0 ] + [ T 1 ] , + ``` + where `R` is a 3x3 rotation matrix and `T` is a 3-D translation vector. + SE(3) matrices are commonly used to represent rigid motions or camera extrinsics. + + In the SE(3) logarithmic representation SE(3) matrices are + represented as 6-dimensional vectors `[log_translation | log_rotation]`, + i.e. a concatenation of two 3D vectors `log_translation` and `log_rotation`. + + The conversion from the 6D representation to a 4x4 SE(3) matrix `transform` + is done as follows: + ``` + transform = exp( [ hat(log_rotation) 0 ] + [ log_translation 1 ] ) , + ``` + where `exp` is the matrix exponential and `hat` is the Hat operator [2]. + + Note that for any `log_transform` with `0 <= ||log_rotation|| < 2pi` + (i.e. the rotation angle is between 0 and 2pi), the following identity holds: + ``` + se3_log_map(se3_exponential_map(log_transform)) == log_transform + ``` + + The conversion has a singularity around `||log(transform)|| = 0` + which is handled by clamping controlled with the `eps` argument. + + Args: + log_transform: Batch of vectors of shape `(minibatch, 6)`. + eps: A threshold for clipping the squared norm of the rotation logarithm + to avoid unstable gradients in the singular case. + + Returns: + Batch of transformation matrices of shape `(minibatch, 4, 4)`. + + Raises: + ValueError if `log_transform` is of incorrect shape. + + [1] https://jinyongjeong.github.io/Download/SE3/jlblanco2010geometry3d_techrep.pdf + [2] https://en.wikipedia.org/wiki/Hat_operator + """ + + if log_transform.ndim != 2 or log_transform.shape[1] != 6: + raise ValueError("Expected input to be of shape (N, 6).") + + N, _ = log_transform.shape + + log_translation = log_transform[..., :3] + log_rotation = log_transform[..., 3:] + + # rotation is an exponential map of log_rotation + ( + R, + rotation_angles, + log_rotation_hat, + log_rotation_hat_square, + ) = _so3_exp_map(log_rotation, eps=eps) + + # translation is V @ T + V = _se3_V_matrix( + log_rotation, + log_rotation_hat, + log_rotation_hat_square, + rotation_angles, + eps=eps, + ) + T = torch.bmm(V, log_translation[:, :, None])[:, :, 0] + + transform = torch.zeros( + N, 4, 4, dtype=log_transform.dtype, device=log_transform.device + ) + + transform[:, :3, :3] = R + transform[:, :3, 3] = T + transform[:, 3, 3] = 1.0 + + return transform.permute(0, 2, 1) + + +def se3_log_map( + transform: torch.Tensor, eps: float = 1e-4, cos_bound: float = 1e-4 +) -> torch.Tensor: + """ + Convert a batch of 4x4 transformation matrices `transform` + to a batch of 6-dimensional SE(3) logarithms of the SE(3) matrices. + See e.g. [1], Sec 9.4.2. for more detailed description. + + A SE(3) matrix has the following form: + ``` + [ R 0 ] + [ T 1 ] , + ``` + where `R` is an orthonormal 3x3 rotation matrix and `T` is a 3-D translation vector. + SE(3) matrices are commonly used to represent rigid motions or camera extrinsics. + + In the SE(3) logarithmic representation SE(3) matrices are + represented as 6-dimensional vectors `[log_translation | log_rotation]`, + i.e. a concatenation of two 3D vectors `log_translation` and `log_rotation`. + + The conversion from the 4x4 SE(3) matrix `transform` to the + 6D representation `log_transform = [log_translation | log_rotation]` + is done as follows: + ``` + log_transform = log(transform) + log_translation = log_transform[3, :3] + log_rotation = inv_hat(log_transform[:3, :3]) + ``` + where `log` is the matrix logarithm + and `inv_hat` is the inverse of the Hat operator [2]. + + Note that for any valid 4x4 `transform` matrix, the following identity holds: + ``` + se3_exp_map(se3_log_map(transform)) == transform + ``` + + The conversion has a singularity around `(transform=I)` which is handled + by clamping controlled with the `eps` and `cos_bound` arguments. + + Args: + transform: batch of SE(3) matrices of shape `(minibatch, 4, 4)`. + eps: A threshold for clipping the squared norm of the rotation logarithm + to avoid division by zero in the singular case. + cos_bound: Clamps the cosine of the rotation angle to + [-1 + cos_bound, 3 - cos_bound] to avoid non-finite outputs. + The non-finite outputs can be caused by passing small rotation angles + to the `acos` function in `so3_rotation_angle` of `so3_log_map`. + + Returns: + Batch of logarithms of input SE(3) matrices + of shape `(minibatch, 6)`. + + Raises: + ValueError if `transform` is of incorrect shape. + ValueError if `R` has an unexpected trace. + + [1] https://jinyongjeong.github.io/Download/SE3/jlblanco2010geometry3d_techrep.pdf + [2] https://en.wikipedia.org/wiki/Hat_operator + """ + + if transform.ndim != 3: + raise ValueError("Input tensor shape has to be (N, 4, 4).") + + N, dim1, dim2 = transform.shape + if dim1 != 4 or dim2 != 4: + raise ValueError("Input tensor shape has to be (N, 4, 4).") + + if not torch.allclose(transform[:, :3, 3], torch.zeros_like(transform[:, :3, 3])): + raise ValueError("All elements of `transform[:, :3, 3]` should be 0.") + + # log_rot is just so3_log_map of the upper left 3x3 block + R = transform[:, :3, :3].permute(0, 2, 1) + log_rotation = so3_log_map(R, eps=eps, cos_bound=cos_bound) + + # log_translation is V^-1 @ T + T = transform[:, 3, :3] + V = _se3_V_matrix(*_get_se3_V_input(log_rotation), eps=eps) + log_translation = torch.linalg.solve(V, T[:, :, None])[:, :, 0] + + return torch.cat((log_translation, log_rotation), dim=1) + + +def _se3_V_matrix( + log_rotation: torch.Tensor, + log_rotation_hat: torch.Tensor, + log_rotation_hat_square: torch.Tensor, + rotation_angles: torch.Tensor, + eps: float = 1e-4, +) -> torch.Tensor: + """ + A helper function that computes the "V" matrix from [1], Sec 9.4.2. + [1] https://jinyongjeong.github.io/Download/SE3/jlblanco2010geometry3d_techrep.pdf + """ + + V = ( + torch.eye(3, dtype=log_rotation.dtype, device=log_rotation.device)[None] + + log_rotation_hat + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + * ((1 - torch.cos(rotation_angles)) / (rotation_angles**2))[:, None, None] + + ( + log_rotation_hat_square + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and + # `int`. + * ((rotation_angles - torch.sin(rotation_angles)) / (rotation_angles**3))[ + :, None, None + ] + ) + ) + + return V + + +def _get_se3_V_input(log_rotation: torch.Tensor, eps: float = 1e-4): + """ + A helper function that computes the input variables to the `_se3_V_matrix` + function. + """ + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + nrms = (log_rotation**2).sum(-1) + rotation_angles = torch.clamp(nrms, eps).sqrt() + log_rotation_hat = hat(log_rotation) + log_rotation_hat_square = torch.bmm(log_rotation_hat, log_rotation_hat) + return log_rotation, log_rotation_hat, log_rotation_hat_square, rotation_angles diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/so3.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/so3.py new file mode 100644 index 0000000000000000000000000000000000000000..169e1df0b8783e754d46a5a85043811a6ae081f0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/so3.py @@ -0,0 +1,270 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings +from typing import Tuple + +import torch +from pytorch3d.transforms import rotation_conversions + +from ..transforms import acos_linear_extrapolation + + +def so3_relative_angle( + R1: torch.Tensor, + R2: torch.Tensor, + cos_angle: bool = False, + cos_bound: float = 1e-4, + eps: float = 1e-4, +) -> torch.Tensor: + """ + Calculates the relative angle (in radians) between pairs of + rotation matrices `R1` and `R2` with `angle = acos(0.5 * (Trace(R1 R2^T)-1))` + + .. note:: + This corresponds to a geodesic distance on the 3D manifold of rotation + matrices. + + Args: + R1: Batch of rotation matrices of shape `(minibatch, 3, 3)`. + R2: Batch of rotation matrices of shape `(minibatch, 3, 3)`. + cos_angle: If==True return cosine of the relative angle rather than + the angle itself. This can avoid the unstable calculation of `acos`. + cos_bound: Clamps the cosine of the relative rotation angle to + [-1 + cos_bound, 1 - cos_bound] to avoid non-finite outputs/gradients + of the `acos` call. Note that the non-finite outputs/gradients + are returned when the angle is requested (i.e. `cos_angle==False`) + and the rotation angle is close to 0 or π. + eps: Tolerance for the valid trace check of the relative rotation matrix + in `so3_rotation_angle`. + Returns: + Corresponding rotation angles of shape `(minibatch,)`. + If `cos_angle==True`, returns the cosine of the angles. + + Raises: + ValueError if `R1` or `R2` is of incorrect shape. + ValueError if `R1` or `R2` has an unexpected trace. + """ + R12 = torch.bmm(R1, R2.permute(0, 2, 1)) + return so3_rotation_angle(R12, cos_angle=cos_angle, cos_bound=cos_bound, eps=eps) + + +def so3_rotation_angle( + R: torch.Tensor, + eps: float = 1e-4, + cos_angle: bool = False, + cos_bound: float = 1e-4, +) -> torch.Tensor: + """ + Calculates angles (in radians) of a batch of rotation matrices `R` with + `angle = acos(0.5 * (Trace(R)-1))`. The trace of the + input matrices is checked to be in the valid range `[-1-eps,3+eps]`. + The `eps` argument is a small constant that allows for small errors + caused by limited machine precision. + + Args: + R: Batch of rotation matrices of shape `(minibatch, 3, 3)`. + eps: Tolerance for the valid trace check. + cos_angle: If==True return cosine of the rotation angles rather than + the angle itself. This can avoid the unstable + calculation of `acos`. + cos_bound: Clamps the cosine of the rotation angle to + [-1 + cos_bound, 1 - cos_bound] to avoid non-finite outputs/gradients + of the `acos` call. Note that the non-finite outputs/gradients + are returned when the angle is requested (i.e. `cos_angle==False`) + and the rotation angle is close to 0 or π. + + Returns: + Corresponding rotation angles of shape `(minibatch,)`. + If `cos_angle==True`, returns the cosine of the angles. + + Raises: + ValueError if `R` is of incorrect shape. + ValueError if `R` has an unexpected trace. + """ + + N, dim1, dim2 = R.shape + if dim1 != 3 or dim2 != 3: + raise ValueError("Input has to be a batch of 3x3 Tensors.") + + rot_trace = R[:, 0, 0] + R[:, 1, 1] + R[:, 2, 2] + + if ((rot_trace < -1.0 - eps) + (rot_trace > 3.0 + eps)).any(): + raise ValueError("A matrix has trace outside valid range [-1-eps,3+eps].") + + # phi ... rotation angle + phi_cos = (rot_trace - 1.0) * 0.5 + + if cos_angle: + return phi_cos + else: + if cos_bound > 0.0: + bound = 1.0 - cos_bound + return acos_linear_extrapolation(phi_cos, (-bound, bound)) + else: + return torch.acos(phi_cos) + + +def so3_exp_map(log_rot: torch.Tensor, eps: float = 0.0001) -> torch.Tensor: + """ + Convert a batch of logarithmic representations of rotation matrices `log_rot` + to a batch of 3x3 rotation matrices using Rodrigues formula [1]. + + In the logarithmic representation, each rotation matrix is represented as + a 3-dimensional vector (`log_rot`) who's l2-norm and direction correspond + to the magnitude of the rotation angle and the axis of rotation respectively. + + The conversion has a singularity around `log(R) = 0` + which is handled by clamping controlled with the `eps` argument. + + Args: + log_rot: Batch of vectors of shape `(minibatch, 3)`. + eps: A float constant handling the conversion singularity. + + Returns: + Batch of rotation matrices of shape `(minibatch, 3, 3)`. + + Raises: + ValueError if `log_rot` is of incorrect shape. + + [1] https://en.wikipedia.org/wiki/Rodrigues%27_rotation_formula + """ + return _so3_exp_map(log_rot, eps=eps)[0] + + +def so3_exponential_map(log_rot: torch.Tensor, eps: float = 0.0001) -> torch.Tensor: + warnings.warn( + """so3_exponential_map is deprecated, + Use so3_exp_map instead. + so3_exponential_map will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return so3_exp_map(log_rot, eps) + + +def _so3_exp_map( + log_rot: torch.Tensor, eps: float = 0.0001 +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """ + A helper function that computes the so3 exponential map and, + apart from the rotation matrix, also returns intermediate variables + that can be re-used in other functions. + """ + _, dim = log_rot.shape + if dim != 3: + raise ValueError("Input tensor shape has to be Nx3.") + + nrms = (log_rot * log_rot).sum(1) + # phis ... rotation angles + rot_angles = torch.clamp(nrms, eps).sqrt() + skews = hat(log_rot) + skews_square = torch.bmm(skews, skews) + + R = rotation_conversions.axis_angle_to_matrix(log_rot) + + return R, rot_angles, skews, skews_square + + +def so3_log_map( + R: torch.Tensor, eps: float = 0.0001, cos_bound: float = 1e-4 +) -> torch.Tensor: + """ + Convert a batch of 3x3 rotation matrices `R` + to a batch of 3-dimensional matrix logarithms of rotation matrices + The conversion has a singularity around `(R=I)`. + + Args: + R: batch of rotation matrices of shape `(minibatch, 3, 3)`. + eps: (unused, for backward compatibility) + cos_bound: (unused, for backward compatibility) + + Returns: + Batch of logarithms of input rotation matrices + of shape `(minibatch, 3)`. + """ + + N, dim1, dim2 = R.shape + if dim1 != 3 or dim2 != 3: + raise ValueError("Input has to be a batch of 3x3 Tensors.") + + return rotation_conversions.matrix_to_axis_angle(R) + + +def hat_inv(h: torch.Tensor) -> torch.Tensor: + """ + Compute the inverse Hat operator [1] of a batch of 3x3 matrices. + + Args: + h: Batch of skew-symmetric matrices of shape `(minibatch, 3, 3)`. + + Returns: + Batch of 3d vectors of shape `(minibatch, 3, 3)`. + + Raises: + ValueError if `h` is of incorrect shape. + ValueError if `h` not skew-symmetric. + + [1] https://en.wikipedia.org/wiki/Hat_operator + """ + + N, dim1, dim2 = h.shape + if dim1 != 3 or dim2 != 3: + raise ValueError("Input has to be a batch of 3x3 Tensors.") + + ss_diff = torch.abs(h + h.permute(0, 2, 1)).max() + + HAT_INV_SKEW_SYMMETRIC_TOL = 1e-5 + if float(ss_diff) > HAT_INV_SKEW_SYMMETRIC_TOL: + raise ValueError("One of input matrices is not skew-symmetric.") + + x = h[:, 2, 1] + y = h[:, 0, 2] + z = h[:, 1, 0] + + v = torch.stack((x, y, z), dim=1) + + return v + + +def hat(v: torch.Tensor) -> torch.Tensor: + """ + Compute the Hat operator [1] of a batch of 3D vectors. + + Args: + v: Batch of vectors of shape `(minibatch , 3)`. + + Returns: + Batch of skew-symmetric matrices of shape + `(minibatch, 3 , 3)` where each matrix is of the form: + `[ 0 -v_z v_y ] + [ v_z 0 -v_x ] + [ -v_y v_x 0 ]` + + Raises: + ValueError if `v` is of incorrect shape. + + [1] https://en.wikipedia.org/wiki/Hat_operator + """ + + N, dim = v.shape + if dim != 3: + raise ValueError("Input vectors have to be 3-dimensional.") + + h = torch.zeros((N, 3, 3), dtype=v.dtype, device=v.device) + + x, y, z = v.unbind(1) + + h[:, 0, 1] = -z + h[:, 0, 2] = y + h[:, 1, 0] = z + h[:, 1, 2] = -x + h[:, 2, 0] = -y + h[:, 2, 1] = x + + return h diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/transform3d.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/transform3d.py new file mode 100644 index 0000000000000000000000000000000000000000..fef2984552f197419cff533eb9d4d3f02cff81ca --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/transforms/transform3d.py @@ -0,0 +1,909 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import math +import os +import warnings +from typing import List, Optional, Union + +import torch + +from ..common.datatypes import Device, get_device, make_device +from ..common.workaround import _safe_det_3x3 +from .rotation_conversions import _axis_angle_rotation +from .se3 import se3_log_map + + +class Transform3d: + """ + A Transform3d object encapsulates a batch of N 3D transformations, and knows + how to transform points and normal vectors. Suppose that t is a Transform3d; + then we can do the following: + + .. code-block:: python + + N = len(t) + points = torch.randn(N, P, 3) + normals = torch.randn(N, P, 3) + points_transformed = t.transform_points(points) # => (N, P, 3) + normals_transformed = t.transform_normals(normals) # => (N, P, 3) + + + BROADCASTING + Transform3d objects supports broadcasting. Suppose that t1 and tN are + Transform3d objects with len(t1) == 1 and len(tN) == N respectively. Then we + can broadcast transforms like this: + + .. code-block:: python + + t1.transform_points(torch.randn(P, 3)) # => (P, 3) + t1.transform_points(torch.randn(1, P, 3)) # => (1, P, 3) + t1.transform_points(torch.randn(M, P, 3)) # => (M, P, 3) + tN.transform_points(torch.randn(P, 3)) # => (N, P, 3) + tN.transform_points(torch.randn(1, P, 3)) # => (N, P, 3) + + + COMBINING TRANSFORMS + Transform3d objects can be combined in two ways: composing and stacking. + Composing is function composition. Given Transform3d objects t1, t2, t3, + the following all compute the same thing: + + .. code-block:: python + + y1 = t3.transform_points(t2.transform_points(t1.transform_points(x))) + y2 = t1.compose(t2).compose(t3).transform_points(x) + y3 = t1.compose(t2, t3).transform_points(x) + + + Composing transforms should broadcast. + + .. code-block:: python + + if len(t1) == 1 and len(t2) == N, then len(t1.compose(t2)) == N. + + We can also stack a sequence of Transform3d objects, which represents + composition along the batch dimension; then the following should compute the + same thing. + + .. code-block:: python + + N, M = len(tN), len(tM) + xN = torch.randn(N, P, 3) + xM = torch.randn(M, P, 3) + y1 = torch.cat([tN.transform_points(xN), tM.transform_points(xM)], dim=0) + y2 = tN.stack(tM).transform_points(torch.cat([xN, xM], dim=0)) + + BUILDING TRANSFORMS + We provide convenience methods for easily building Transform3d objects + as compositions of basic transforms. + + .. code-block:: python + + # Scale by 0.5, then translate by (1, 2, 3) + t1 = Transform3d().scale(0.5).translate(1, 2, 3) + + # Scale each axis by a different amount, then translate, then scale + t2 = Transform3d().scale(1, 3, 3).translate(2, 3, 1).scale(2.0) + + t3 = t1.compose(t2) + tN = t1.stack(t3, t3) + + + BACKPROP THROUGH TRANSFORMS + When building transforms, we can also parameterize them by Torch tensors; + in this case we can backprop through the construction and application of + Transform objects, so they could be learned via gradient descent or + predicted by a neural network. + + .. code-block:: python + + s1_params = torch.randn(N, requires_grad=True) + t_params = torch.randn(N, 3, requires_grad=True) + s2_params = torch.randn(N, 3, requires_grad=True) + + t = Transform3d().scale(s1_params).translate(t_params).scale(s2_params) + x = torch.randn(N, 3) + y = t.transform_points(x) + loss = compute_loss(y) + loss.backward() + + with torch.no_grad(): + s1_params -= lr * s1_params.grad + t_params -= lr * t_params.grad + s2_params -= lr * s2_params.grad + + CONVENTIONS + We adopt a right-hand coordinate system, meaning that rotation about an axis + with a positive angle results in a counter clockwise rotation. + + This class assumes that transformations are applied on inputs which + are row vectors. The internal representation of the Nx4x4 transformation + matrix is of the form: + + .. code-block:: python + + M = [ + [Rxx, Ryx, Rzx, 0], + [Rxy, Ryy, Rzy, 0], + [Rxz, Ryz, Rzz, 0], + [Tx, Ty, Tz, 1], + ] + + To apply the transformation to points, which are row vectors, the latter are + converted to homogeneous (4D) coordinates and right-multiplied by the M matrix: + + .. code-block:: python + + points = [[0, 1, 2]] # (1 x 3) xyz coordinates of a point + [transformed_points, 1] ∝ [points, 1] @ M + + """ + + def __init__( + self, + dtype: torch.dtype = torch.float32, + device: Device = "cpu", + matrix: Optional[torch.Tensor] = None, + ) -> None: + """ + Args: + dtype: The data type of the transformation matrix. + to be used if `matrix = None`. + device: The device for storing the implemented transformation. + If `matrix != None`, uses the device of input `matrix`. + matrix: A tensor of shape (4, 4) or of shape (minibatch, 4, 4) + representing the 4x4 3D transformation matrix. + If `None`, initializes with identity using + the specified `device` and `dtype`. + """ + + if matrix is None: + self._matrix = torch.eye(4, dtype=dtype, device=device).view(1, 4, 4) + else: + if matrix.ndim not in (2, 3): + raise ValueError('"matrix" has to be a 2- or a 3-dimensional tensor.') + if matrix.shape[-2] != 4 or matrix.shape[-1] != 4: + raise ValueError( + '"matrix" has to be a tensor of shape (minibatch, 4, 4) or (4, 4).' + ) + # set dtype and device from matrix + dtype = matrix.dtype + device = matrix.device + self._matrix = matrix.view(-1, 4, 4) + + self._transforms = [] # store transforms to compose + self._lu = None + self.device = make_device(device) + self.dtype = dtype + + def __len__(self) -> int: + return self.get_matrix().shape[0] + + def __getitem__( + self, index: Union[int, List[int], slice, torch.BoolTensor, torch.LongTensor] + ) -> "Transform3d": + """ + Args: + index: Specifying the index of the transform to retrieve. + Can be an int, slice, list of ints, boolean, long tensor. + Supports negative indices. + + Returns: + Transform3d object with selected transforms. The tensors are not cloned. + """ + if isinstance(index, int): + index = [index] + return self.__class__(matrix=self.get_matrix()[index]) + + def compose(self, *others: "Transform3d") -> "Transform3d": + """ + Return a new Transform3d representing the composition of self with the + given other transforms, which will be stored as an internal list. + + Args: + *others: Any number of Transform3d objects + + Returns: + A new Transform3d with the stored transforms + """ + out = Transform3d(dtype=self.dtype, device=self.device) + out._matrix = self._matrix.clone() + for other in others: + if not isinstance(other, Transform3d): + msg = "Only possible to compose Transform3d objects; got %s" + raise ValueError(msg % type(other)) + out._transforms = self._transforms + list(others) + return out + + def get_matrix(self) -> torch.Tensor: + """ + Returns a 4×4 matrix corresponding to each transform in the batch. + + If the transform was composed from others, the matrix for the composite + transform will be returned. + For example, if self.transforms contains transforms t1, t2, and t3, and + given a set of points x, the following should be true: + + .. code-block:: python + + y1 = t1.compose(t2, t3).transform(x) + y2 = t3.transform(t2.transform(t1.transform(x))) + y1.get_matrix() == y2.get_matrix() + + Where necessary, those transforms are broadcast against each other. + + Returns: + A (N, 4, 4) batch of transformation matrices representing + the stored transforms. See the class documentation for the conventions. + """ + composed_matrix = self._matrix.clone() + if len(self._transforms) > 0: + for other in self._transforms: + other_matrix = other.get_matrix() + composed_matrix = _broadcast_bmm(composed_matrix, other_matrix) + return composed_matrix + + def get_se3_log(self, eps: float = 1e-4, cos_bound: float = 1e-4) -> torch.Tensor: + """ + Returns a 6D SE(3) log vector corresponding to each transform in the batch. + + In the SE(3) logarithmic representation SE(3) matrices are + represented as 6-dimensional vectors `[log_translation | log_rotation]`, + i.e. a concatenation of two 3D vectors `log_translation` and `log_rotation`. + + The conversion from the 4x4 SE(3) matrix `transform` to the + 6D representation `log_transform = [log_translation | log_rotation]` + is done as follows:: + + log_transform = log(transform.get_matrix()) + log_translation = log_transform[3, :3] + log_rotation = inv_hat(log_transform[:3, :3]) + + where `log` is the matrix logarithm + and `inv_hat` is the inverse of the Hat operator [2]. + + See the docstring for `se3.se3_log_map` and [1], Sec 9.4.2. for more + detailed description. + + Args: + eps: A threshold for clipping the squared norm of the rotation logarithm + to avoid division by zero in the singular case. + cos_bound: Clamps the cosine of the rotation angle to + [-1 + cos_bound, 3 - cos_bound] to avoid non-finite outputs. + The non-finite outputs can be caused by passing small rotation angles + to the `acos` function in `so3_rotation_angle` of `so3_log_map`. + + Returns: + A (N, 6) tensor, rows of which represent the individual transforms + stored in the object as SE(3) logarithms. + + Raises: + ValueError if the stored transform is not Euclidean (e.g. R is not a rotation + matrix or the last column has non-zeros in the first three places). + + [1] https://jinyongjeong.github.io/Download/SE3/jlblanco2010geometry3d_techrep.pdf + [2] https://en.wikipedia.org/wiki/Hat_operator + """ + return se3_log_map(self.get_matrix(), eps, cos_bound) + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + return torch.inverse(self._matrix) + + def inverse(self, invert_composed: bool = False) -> "Transform3d": + """ + Returns a new Transform3d object that represents an inverse of the + current transformation. + + Args: + invert_composed: + - True: First compose the list of stored transformations + and then apply inverse to the result. This is + potentially slower for classes of transformations + with inverses that can be computed efficiently + (e.g. rotations and translations). + - False: Invert the individual stored transformations + independently without composing them. + + Returns: + A new Transform3d object containing the inverse of the original + transformation. + """ + + tinv = Transform3d(dtype=self.dtype, device=self.device) + + if invert_composed: + # first compose then invert + tinv._matrix = torch.inverse(self.get_matrix()) + else: + # self._get_matrix_inverse() implements efficient inverse + # of self._matrix + i_matrix = self._get_matrix_inverse() + + # 2 cases: + if len(self._transforms) > 0: + # a) Either we have a non-empty list of transforms: + # Here we take self._matrix and append its inverse at the + # end of the reverted _transforms list. After composing + # the transformations with get_matrix(), this correctly + # right-multiplies by the inverse of self._matrix + # at the end of the composition. + tinv._transforms = [t.inverse() for t in reversed(self._transforms)] + last = Transform3d(dtype=self.dtype, device=self.device) + last._matrix = i_matrix + tinv._transforms.append(last) + else: + # b) Or there are no stored transformations + # we just set inverted matrix + tinv._matrix = i_matrix + + return tinv + + def stack(self, *others: "Transform3d") -> "Transform3d": + """ + Return a new batched Transform3d representing the batch elements from + self and all the given other transforms all batched together. + + Args: + *others: Any number of Transform3d objects + + Returns: + A new Transform3d. + """ + transforms = [self] + list(others) + matrix = torch.cat([t.get_matrix() for t in transforms], dim=0) + out = Transform3d(dtype=self.dtype, device=self.device) + out._matrix = matrix + return out + + def transform_points(self, points, eps: Optional[float] = None) -> torch.Tensor: + """ + Use this transform to transform a set of 3D points. Assumes row major + ordering of the input points. + + Args: + points: Tensor of shape (P, 3) or (N, P, 3) + eps: If eps!=None, the argument is used to clamp the + last coordinate before performing the final division. + The clamping corresponds to: + last_coord := (last_coord.sign() + (last_coord==0)) * + torch.clamp(last_coord.abs(), eps), + i.e. the last coordinates that are exactly 0 will + be clamped to +eps. + + Returns: + points_out: points of shape (N, P, 3) or (P, 3) depending + on the dimensions of the transform + """ + points_batch = points.clone() + if points_batch.dim() == 2: + points_batch = points_batch[None] # (P, 3) -> (1, P, 3) + if points_batch.dim() != 3: + msg = "Expected points to have dim = 2 or dim = 3: got shape %r" + raise ValueError(msg % repr(points.shape)) + + N, P, _3 = points_batch.shape + ones = torch.ones(N, P, 1, dtype=points.dtype, device=points.device) + points_batch = torch.cat([points_batch, ones], dim=2) + + composed_matrix = self.get_matrix() + points_out = _broadcast_bmm(points_batch, composed_matrix) + denom = points_out[..., 3:] # denominator + if eps is not None: + denom_sign = denom.sign() + (denom == 0.0).type_as(denom) + denom = denom_sign * torch.clamp(denom.abs(), eps) + points_out = points_out[..., :3] / denom + + # When transform is (1, 4, 4) and points is (P, 3) return + # points_out of shape (P, 3) + if points_out.shape[0] == 1 and points.dim() == 2: + points_out = points_out.reshape(points.shape) + + return points_out + + def transform_normals(self, normals) -> torch.Tensor: + """ + Use this transform to transform a set of normal vectors. + + Args: + normals: Tensor of shape (P, 3) or (N, P, 3) + + Returns: + normals_out: Tensor of shape (P, 3) or (N, P, 3) depending + on the dimensions of the transform + """ + if normals.dim() not in [2, 3]: + msg = "Expected normals to have dim = 2 or dim = 3: got shape %r" + raise ValueError(msg % (normals.shape,)) + composed_matrix = self.get_matrix() + + # TODO: inverse is bad! Solve a linear system instead + mat = composed_matrix[:, :3, :3] + normals_out = _broadcast_bmm(normals, mat.transpose(1, 2).inverse()) + + # This doesn't pass unit tests. TODO investigate further + # if self._lu is None: + # self._lu = self._matrix[:, :3, :3].transpose(1, 2).lu() + # normals_out = normals.lu_solve(*self._lu) + + # When transform is (1, 4, 4) and normals is (P, 3) return + # normals_out of shape (P, 3) + if normals_out.shape[0] == 1 and normals.dim() == 2: + normals_out = normals_out.reshape(normals.shape) + + return normals_out + + def translate(self, *args, **kwargs) -> "Transform3d": + return self.compose( + Translate(*args, device=self.device, dtype=self.dtype, **kwargs) + ) + + def scale(self, *args, **kwargs) -> "Transform3d": + return self.compose( + Scale(*args, device=self.device, dtype=self.dtype, **kwargs) + ) + + def rotate(self, *args, **kwargs) -> "Transform3d": + return self.compose( + Rotate(*args, device=self.device, dtype=self.dtype, **kwargs) + ) + + def rotate_axis_angle(self, *args, **kwargs) -> "Transform3d": + return self.compose( + RotateAxisAngle(*args, device=self.device, dtype=self.dtype, **kwargs) + ) + + def clone(self) -> "Transform3d": + """ + Deep copy of Transforms object. All internal tensors are cloned + individually. + + Returns: + new Transforms object. + """ + other = Transform3d(dtype=self.dtype, device=self.device) + if self._lu is not None: + other._lu = [elem.clone() for elem in self._lu] + other._matrix = self._matrix.clone() + other._transforms = [t.clone() for t in self._transforms] + return other + + def to( + self, + device: Device, + copy: bool = False, + dtype: Optional[torch.dtype] = None, + ) -> "Transform3d": + """ + Match functionality of torch.Tensor.to() + If copy = True or the self Tensor is on a different device, the + returned tensor is a copy of self with the desired torch.device. + If copy = False and the self Tensor already has the correct torch.device, + then self is returned. + + Args: + device: Device (as str or torch.device) for the new tensor. + copy: Boolean indicator whether or not to clone self. Default False. + dtype: If not None, casts the internal tensor variables + to a given torch.dtype. + + Returns: + Transform3d object. + """ + device_ = make_device(device) + dtype_ = self.dtype if dtype is None else dtype + skip_to = self.device == device_ and self.dtype == dtype_ + + if not copy and skip_to: + return self + + other = self.clone() + + if skip_to: + return other + + other.device = device_ + other.dtype = dtype_ + other._matrix = other._matrix.to(device=device_, dtype=dtype_) + other._transforms = [ + t.to(device_, copy=copy, dtype=dtype_) for t in other._transforms + ] + return other + + def cpu(self) -> "Transform3d": + return self.to("cpu") + + def cuda(self) -> "Transform3d": + return self.to("cuda") + + +class Translate(Transform3d): + def __init__( + self, + x, + y=None, + z=None, + dtype: torch.dtype = torch.float32, + device: Optional[Device] = None, + ) -> None: + """ + Create a new Transform3d representing 3D translations. + + Option I: Translate(xyz, dtype=torch.float32, device='cpu') + xyz should be a tensor of shape (N, 3) + + Option II: Translate(x, y, z, dtype=torch.float32, device='cpu') + Here x, y, and z will be broadcast against each other and + concatenated to form the translation. Each can be: + - A python scalar + - A torch scalar + - A 1D torch tensor + """ + xyz = _handle_input(x, y, z, dtype, device, "Translate") + super().__init__(device=xyz.device, dtype=dtype) + N = xyz.shape[0] + + mat = torch.eye(4, dtype=dtype, device=self.device) + mat = mat.view(1, 4, 4).repeat(N, 1, 1) + mat[:, 3, :3] = xyz + self._matrix = mat + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + inv_mask = self._matrix.new_ones([1, 4, 4]) + inv_mask[0, 3, :3] = -1.0 + i_matrix = self._matrix * inv_mask + return i_matrix + + def __getitem__( + self, index: Union[int, List[int], slice, torch.BoolTensor, torch.LongTensor] + ) -> "Transform3d": + """ + Args: + index: Specifying the index of the transform to retrieve. + Can be an int, slice, list of ints, boolean, long tensor. + Supports negative indices. + + Returns: + Transform3d object with selected transforms. The tensors are not cloned. + """ + if isinstance(index, int): + index = [index] + return self.__class__(self.get_matrix()[index, 3, :3]) + + +class Scale(Transform3d): + def __init__( + self, + x, + y=None, + z=None, + dtype: torch.dtype = torch.float32, + device: Optional[Device] = None, + ) -> None: + """ + A Transform3d representing a scaling operation, with different scale + factors along each coordinate axis. + + Option I: Scale(s, dtype=torch.float32, device='cpu') + s can be one of + - Python scalar or torch scalar: Single uniform scale + - 1D torch tensor of shape (N,): A batch of uniform scale + - 2D torch tensor of shape (N, 3): Scale differently along each axis + + Option II: Scale(x, y, z, dtype=torch.float32, device='cpu') + Each of x, y, and z can be one of + - python scalar + - torch scalar + - 1D torch tensor + """ + xyz = _handle_input(x, y, z, dtype, device, "scale", allow_singleton=True) + super().__init__(device=xyz.device, dtype=dtype) + N = xyz.shape[0] + + # TODO: Can we do this all in one go somehow? + mat = torch.eye(4, dtype=dtype, device=self.device) + mat = mat.view(1, 4, 4).repeat(N, 1, 1) + mat[:, 0, 0] = xyz[:, 0] + mat[:, 1, 1] = xyz[:, 1] + mat[:, 2, 2] = xyz[:, 2] + self._matrix = mat + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + xyz = torch.stack([self._matrix[:, i, i] for i in range(4)], dim=1) + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + ixyz = 1.0 / xyz + # pyre-fixme[6]: For 1st param expected `Tensor` but got `float`. + imat = torch.diag_embed(ixyz, dim1=1, dim2=2) + return imat + + def __getitem__( + self, index: Union[int, List[int], slice, torch.BoolTensor, torch.LongTensor] + ) -> "Transform3d": + """ + Args: + index: Specifying the index of the transform to retrieve. + Can be an int, slice, list of ints, boolean, long tensor. + Supports negative indices. + + Returns: + Transform3d object with selected transforms. The tensors are not cloned. + """ + if isinstance(index, int): + index = [index] + mat = self.get_matrix()[index] + x = mat[:, 0, 0] + y = mat[:, 1, 1] + z = mat[:, 2, 2] + return self.__class__(x, y, z) + + +class Rotate(Transform3d): + def __init__( + self, + R: torch.Tensor, + dtype: torch.dtype = torch.float32, + device: Optional[Device] = None, + orthogonal_tol: float = 1e-5, + ) -> None: + """ + Create a new Transform3d representing 3D rotation using a rotation + matrix as the input. + + Args: + R: a tensor of shape (3, 3) or (N, 3, 3) + orthogonal_tol: tolerance for the test of the orthogonality of R + + """ + device_ = get_device(R, device) + super().__init__(device=device_, dtype=dtype) + if R.dim() == 2: + R = R[None] + if R.shape[-2:] != (3, 3): + msg = "R must have shape (3, 3) or (N, 3, 3); got %s" + raise ValueError(msg % repr(R.shape)) + R = R.to(device=device_, dtype=dtype) + if os.environ.get("PYTORCH3D_CHECK_ROTATION_MATRICES", "0") == "1": + # Note: aten::all_close in the check is computationally slow, so we + # only run the check when PYTORCH3D_CHECK_ROTATION_MATRICES is on. + _check_valid_rotation_matrix(R, tol=orthogonal_tol) + N = R.shape[0] + mat = torch.eye(4, dtype=dtype, device=device_) + mat = mat.view(1, 4, 4).repeat(N, 1, 1) + mat[:, :3, :3] = R + self._matrix = mat + + def _get_matrix_inverse(self) -> torch.Tensor: + """ + Return the inverse of self._matrix. + """ + return self._matrix.permute(0, 2, 1).contiguous() + + def __getitem__( + self, index: Union[int, List[int], slice, torch.BoolTensor, torch.LongTensor] + ) -> "Transform3d": + """ + Args: + index: Specifying the index of the transform to retrieve. + Can be an int, slice, list of ints, boolean, long tensor. + Supports negative indices. + + Returns: + Transform3d object with selected transforms. The tensors are not cloned. + """ + if isinstance(index, int): + index = [index] + return self.__class__(self.get_matrix()[index, :3, :3]) + + +class RotateAxisAngle(Rotate): + def __init__( + self, + angle, + axis: str = "X", + degrees: bool = True, + dtype: torch.dtype = torch.float32, + device: Optional[Device] = None, + ) -> None: + """ + Create a new Transform3d representing 3D rotation about an axis + by an angle. + + Assuming a right-hand coordinate system, positive rotation angles result + in a counter clockwise rotation. + + Args: + angle: + - A torch tensor of shape (N,) + - A python scalar + - A torch scalar + axis: + string: one of ["X", "Y", "Z"] indicating the axis about which + to rotate. + NOTE: All batch elements are rotated about the same axis. + """ + axis = axis.upper() + if axis not in ["X", "Y", "Z"]: + msg = "Expected axis to be one of ['X', 'Y', 'Z']; got %s" + raise ValueError(msg % axis) + angle = _handle_angle_input(angle, dtype, device, "RotateAxisAngle") + angle = (angle / 180.0 * math.pi) if degrees else angle + # We assume the points on which this transformation will be applied + # are row vectors. The rotation matrix returned from _axis_angle_rotation + # is for transforming column vectors. Therefore we transpose this matrix. + # R will always be of shape (N, 3, 3) + R = _axis_angle_rotation(axis, angle).transpose(1, 2) + super().__init__(device=angle.device, R=R, dtype=dtype) + + +def _handle_coord(c, dtype: torch.dtype, device: torch.device) -> torch.Tensor: + """ + Helper function for _handle_input. + + Args: + c: Python scalar, torch scalar, or 1D torch tensor + + Returns: + c_vec: 1D torch tensor + """ + if not torch.is_tensor(c): + c = torch.tensor(c, dtype=dtype, device=device) + if c.dim() == 0: + c = c.view(1) + if c.device != device or c.dtype != dtype: + c = c.to(device=device, dtype=dtype) + return c + + +def _handle_input( + x, + y, + z, + dtype: torch.dtype, + device: Optional[Device], + name: str, + allow_singleton: bool = False, +) -> torch.Tensor: + """ + Helper function to handle parsing logic for building transforms. The output + is always a tensor of shape (N, 3), but there are several types of allowed + input. + + Case I: Single Matrix + In this case x is a tensor of shape (N, 3), and y and z are None. Here just + return x. + + Case II: Vectors and Scalars + In this case each of x, y, and z can be one of the following + - Python scalar + - Torch scalar + - Torch tensor of shape (N, 1) or (1, 1) + In this case x, y and z are broadcast to tensors of shape (N, 1) + and concatenated to a tensor of shape (N, 3) + + Case III: Singleton (only if allow_singleton=True) + In this case y and z are None, and x can be one of the following: + - Python scalar + - Torch scalar + - Torch tensor of shape (N, 1) or (1, 1) + Here x will be duplicated 3 times, and we return a tensor of shape (N, 3) + + Returns: + xyz: Tensor of shape (N, 3) + """ + device_ = get_device(x, device) + # If x is actually a tensor of shape (N, 3) then just return it + if torch.is_tensor(x) and x.dim() == 2: + if x.shape[1] != 3: + msg = "Expected tensor of shape (N, 3); got %r (in %s)" + raise ValueError(msg % (x.shape, name)) + if y is not None or z is not None: + msg = "Expected y and z to be None (in %s)" % name + raise ValueError(msg) + return x.to(device=device_, dtype=dtype) + + if allow_singleton and y is None and z is None: + y = x + z = x + + # Convert all to 1D tensors + xyz = [_handle_coord(c, dtype, device_) for c in [x, y, z]] + + # Broadcast and concatenate + sizes = [c.shape[0] for c in xyz] + N = max(sizes) + for c in xyz: + if c.shape[0] != 1 and c.shape[0] != N: + msg = "Got non-broadcastable sizes %r (in %s)" % (sizes, name) + raise ValueError(msg) + xyz = [c.expand(N) for c in xyz] + xyz = torch.stack(xyz, dim=1) + return xyz + + +def _handle_angle_input( + x, dtype: torch.dtype, device: Optional[Device], name: str +) -> torch.Tensor: + """ + Helper function for building a rotation function using angles. + The output is always of shape (N,). + + The input can be one of: + - Torch tensor of shape (N,) + - Python scalar + - Torch scalar + """ + device_ = get_device(x, device) + if torch.is_tensor(x) and x.dim() > 1: + msg = "Expected tensor of shape (N,); got %r (in %s)" + raise ValueError(msg % (x.shape, name)) + else: + return _handle_coord(x, dtype, device_) + + +def _broadcast_bmm(a, b) -> torch.Tensor: + """ + Batch multiply two matrices and broadcast if necessary. + + Args: + a: torch tensor of shape (P, K) or (M, P, K) + b: torch tensor of shape (N, K, K) + + Returns: + a and b broadcast multiplied. The output batch dimension is max(N, M). + + To broadcast transforms across a batch dimension if M != N then + expect that either M = 1 or N = 1. The tensor with batch dimension 1 is + expanded to have shape N or M. + """ + if a.dim() == 2: + a = a[None] + if len(a) != len(b): + if not ((len(a) == 1) or (len(b) == 1)): + msg = "Expected batch dim for bmm to be equal or 1; got %r, %r" + raise ValueError(msg % (a.shape, b.shape)) + if len(a) == 1: + a = a.expand(len(b), -1, -1) + if len(b) == 1: + b = b.expand(len(a), -1, -1) + return a.bmm(b) + + +@torch.no_grad() +def _check_valid_rotation_matrix(R, tol: float = 1e-7) -> None: + """ + Determine if R is a valid rotation matrix by checking it satisfies the + following conditions: + + ``RR^T = I and det(R) = 1`` + + Args: + R: an (N, 3, 3) matrix + + Returns: + None + + Emits a warning if R is an invalid rotation matrix. + """ + N = R.shape[0] + eye = torch.eye(3, dtype=R.dtype, device=R.device) + eye = eye.view(1, 3, 3).expand(N, -1, -1) + orthogonal = torch.allclose(R.bmm(R.transpose(1, 2)), eye, atol=tol) + det_R = _safe_det_3x3(R) + no_distortion = torch.allclose(det_R, torch.ones_like(det_R)) + if not (orthogonal and no_distortion): + msg = "R is not a valid rotation matrix" + warnings.warn(msg) + return diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..504dace63d92a8e4a964fc68e5e7f3e42c027b13 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .camera_conversions import ( + cameras_from_opencv_projection, + opencv_from_cameras_projection, + pulsar_from_cameras_projection, + pulsar_from_opencv_projection, +) + +from .checkerboard import checkerboard + +from .ico_sphere import ico_sphere + +from .torus import torus + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/camera_conversions.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/camera_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..b9fa43be089c64f8924cd8335b5a1c2d4b793b4b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/camera_conversions.py @@ -0,0 +1,159 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Tuple + +import torch + +from ..renderer import PerspectiveCameras +from ..renderer.camera_conversions import ( + _cameras_from_opencv_projection, + _opencv_from_cameras_projection, + _pulsar_from_cameras_projection, + _pulsar_from_opencv_projection, +) + + +def cameras_from_opencv_projection( + R: torch.Tensor, + tvec: torch.Tensor, + camera_matrix: torch.Tensor, + image_size: torch.Tensor, +) -> PerspectiveCameras: + """ + Converts a batch of OpenCV-conventioned cameras parametrized with the + rotation matrices `R`, translation vectors `tvec`, and the camera + calibration matrices `camera_matrix` to `PerspectiveCameras` in PyTorch3D + convention. + + More specifically, the conversion is carried out such that a projection + of a 3D shape to the OpenCV-conventioned screen of size `image_size` results + in the same image as a projection with the corresponding PyTorch3D camera + to the NDC screen convention of PyTorch3D. + + More specifically, the OpenCV convention projects points to the OpenCV screen + space as follows:: + + x_screen_opencv = camera_matrix @ (R @ x_world + tvec) + + followed by the homogenization of `x_screen_opencv`. + + Note: + The parameters `R, tvec, camera_matrix` correspond to the inputs of + `cv2.projectPoints(x_world, rvec, tvec, camera_matrix, [])`, + where `rvec` is an axis-angle vector that can be obtained from + the rotation matrix `R` expected here by calling the `so3_log_map` function. + Correspondingly, `R` can be obtained from `rvec` by calling `so3_exp_map`. + + Args: + R: A batch of rotation matrices of shape `(N, 3, 3)`. + tvec: A batch of translation vectors of shape `(N, 3)`. + camera_matrix: A batch of camera calibration matrices of shape `(N, 3, 3)`. + image_size: A tensor of shape `(N, 2)` containing the sizes of the images + (height, width) attached to each camera. + + Returns: + cameras_pytorch3d: A batch of `N` cameras in the PyTorch3D convention. + """ + return _cameras_from_opencv_projection(R, tvec, camera_matrix, image_size) + + +def opencv_from_cameras_projection( + cameras: PerspectiveCameras, + image_size: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Converts a batch of `PerspectiveCameras` into OpenCV-convention + rotation matrices `R`, translation vectors `tvec`, and the camera + calibration matrices `camera_matrix`. This operation is exactly the inverse + of `cameras_from_opencv_projection`. + + Note: + The outputs `R, tvec, camera_matrix` correspond to the inputs of + `cv2.projectPoints(x_world, rvec, tvec, camera_matrix, [])`, + where `rvec` is an axis-angle vector that can be obtained from + the rotation matrix `R` output here by calling the `so3_log_map` function. + Correspondingly, `R` can be obtained from `rvec` by calling `so3_exp_map`. + + Args: + cameras: A batch of `N` cameras in the PyTorch3D convention. + image_size: A tensor of shape `(N, 2)` containing the sizes of the images + (height, width) attached to each camera. + return_as_rotmat (bool): If set to True, return the full 3x3 rotation + matrices. Otherwise, return an axis-angle vector (default). + + Returns: + R: A batch of rotation matrices of shape `(N, 3, 3)`. + tvec: A batch of translation vectors of shape `(N, 3)`. + camera_matrix: A batch of camera calibration matrices of shape `(N, 3, 3)`. + """ + return _opencv_from_cameras_projection(cameras, image_size) + + +def pulsar_from_opencv_projection( + R: torch.Tensor, + tvec: torch.Tensor, + camera_matrix: torch.Tensor, + image_size: torch.Tensor, + znear: float = 0.1, +) -> torch.Tensor: + """ + Convert OpenCV style camera parameters to Pulsar style camera parameters. + + Note: + * Pulsar does NOT support different focal lengths for x and y. + For conversion, we use the average of fx and fy. + * The Pulsar renderer MUST use a left-handed coordinate system for this + mapping to work. + * The resulting image will be vertically flipped - which has to be + addressed AFTER rendering by the user. + * The parameters `R, tvec, camera_matrix` correspond to the outputs + of `cv2.decomposeProjectionMatrix`. + + Args: + R: A batch of rotation matrices of shape `(N, 3, 3)`. + tvec: A batch of translation vectors of shape `(N, 3)`. + camera_matrix: A batch of camera calibration matrices of shape `(N, 3, 3)`. + image_size: A tensor of shape `(N, 2)` containing the sizes of the images + (height, width) attached to each camera. + znear (float): The near clipping value to use for Pulsar. + + Returns: + cameras_pulsar: A batch of `N` Pulsar camera vectors in the Pulsar + convention `(N, 13)` (3 translation, 6 rotation, focal_length, sensor_width, + c_x, c_y). + """ + return _pulsar_from_opencv_projection(R, tvec, camera_matrix, image_size, znear) + + +def pulsar_from_cameras_projection( + cameras: PerspectiveCameras, + image_size: torch.Tensor, +) -> torch.Tensor: + """ + Convert PyTorch3D `PerspectiveCameras` to Pulsar style camera parameters. + + Note: + * Pulsar does NOT support different focal lengths for x and y. + For conversion, we use the average of fx and fy. + * The Pulsar renderer MUST use a left-handed coordinate system for this + mapping to work. + * The resulting image will be vertically flipped - which has to be + addressed AFTER rendering by the user. + + Args: + cameras: A batch of `N` cameras in the PyTorch3D convention. + image_size: A tensor of shape `(N, 2)` containing the sizes of the images + (height, width) attached to each camera. + + Returns: + cameras_pulsar: A batch of `N` Pulsar camera vectors in the Pulsar + convention `(N, 13)` (3 translation, 6 rotation, focal_length, sensor_width, + c_x, c_y). + """ + return _pulsar_from_cameras_projection(cameras, image_size) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/checkerboard.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/checkerboard.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d53fd9b3c56bc0f9bccb0424c23b76e16410e0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/checkerboard.py @@ -0,0 +1,91 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import Optional, Tuple + +import torch +from pytorch3d.common.compat import meshgrid_ij +from pytorch3d.renderer.mesh.textures import TexturesAtlas +from pytorch3d.structures.meshes import Meshes + + +def checkerboard( + radius: int = 4, + color1: Tuple[float, ...] = (0.0, 0.0, 0.0), + color2: Tuple[float, ...] = (1.0, 1.0, 1.0), + device: Optional[torch.types._device] = None, +) -> Meshes: + """ + Returns a mesh of squares in the xy-plane where each unit is one of the two given + colors and adjacent squares have opposite colors. + Args: + radius: how many squares in each direction from the origin + color1: background color + color2: foreground color (must have the same number of channels as color1) + Returns: + new Meshes object containing one mesh. + """ + + if device is None: + device = torch.device("cpu") + if radius < 1: + raise ValueError("radius must be > 0") + + num_verts_per_row = 2 * radius + 1 + + # construct 2D grid of 3D vertices + x = torch.arange(-radius, radius + 1, device=device) + grid_y, grid_x = meshgrid_ij(x, x) + verts = torch.stack( + [grid_x, grid_y, torch.zeros((2 * radius + 1, 2 * radius + 1))], dim=-1 + ) + verts = verts.view(1, -1, 3) + + top_triangle_idx = torch.arange(0, num_verts_per_row * (num_verts_per_row - 1)) + top_triangle_idx = torch.stack( + [ + top_triangle_idx, + top_triangle_idx + 1, + top_triangle_idx + num_verts_per_row + 1, + ], + dim=-1, + ) + + bottom_triangle_idx = top_triangle_idx[:, [0, 2, 1]] + torch.tensor( + [0, 0, num_verts_per_row - 1] + ) + + faces = torch.zeros( + (1, len(top_triangle_idx) + len(bottom_triangle_idx), 3), + dtype=torch.long, + device=device, + ) + faces[0, ::2] = top_triangle_idx + faces[0, 1::2] = bottom_triangle_idx + + # construct range of indices that excludes the boundary to avoid wrong triangles + indexing_range = torch.arange(0, 2 * num_verts_per_row * num_verts_per_row).view( + num_verts_per_row, num_verts_per_row, 2 + ) + indexing_range = indexing_range[:-1, :-1] # removes boundaries from list of indices + indexing_range = indexing_range.reshape( + 2 * (num_verts_per_row - 1) * (num_verts_per_row - 1) + ) + + faces = faces[:, indexing_range] + + # adding color + colors = torch.tensor(color1).repeat(2 * num_verts_per_row * num_verts_per_row, 1) + colors[2::4] = torch.tensor(color2) + colors[3::4] = torch.tensor(color2) + colors = colors[None, indexing_range, None, None] + + texture_atlas = TexturesAtlas(colors) + + return Meshes(verts=verts, faces=faces, textures=texture_atlas) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/ico_sphere.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/ico_sphere.py new file mode 100644 index 0000000000000000000000000000000000000000..bc7ca24fd4aac046ebc5aa949b7268ce418a1b28 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/ico_sphere.py @@ -0,0 +1,86 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +from pytorch3d.ops.subdivide_meshes import SubdivideMeshes +from pytorch3d.structures.meshes import Meshes + + +# Vertex coordinates for a level 0 ico-sphere. +_ico_verts0 = [ + [-0.5257, 0.8507, 0.0000], + [0.5257, 0.8507, 0.0000], + [-0.5257, -0.8507, 0.0000], + [0.5257, -0.8507, 0.0000], + [0.0000, -0.5257, 0.8507], + [0.0000, 0.5257, 0.8507], + [0.0000, -0.5257, -0.8507], + [0.0000, 0.5257, -0.8507], + [0.8507, 0.0000, -0.5257], + [0.8507, 0.0000, 0.5257], + [-0.8507, 0.0000, -0.5257], + [-0.8507, 0.0000, 0.5257], +] + + +# Faces for level 0 ico-sphere +_ico_faces0 = [ + [0, 11, 5], + [0, 5, 1], + [0, 1, 7], + [0, 7, 10], + [0, 10, 11], + [1, 5, 9], + [5, 11, 4], + [11, 10, 2], + [10, 7, 6], + [7, 1, 8], + [3, 9, 4], + [3, 4, 2], + [3, 2, 6], + [3, 6, 8], + [3, 8, 9], + [4, 9, 5], + [2, 4, 11], + [6, 2, 10], + [8, 6, 7], + [9, 8, 1], +] + + +def ico_sphere(level: int = 0, device=None): + """ + Create verts and faces for a unit ico-sphere, with all faces oriented + consistently. + + Args: + level: integer specifying the number of iterations for subdivision + of the mesh faces. Each additional level will result in four new + faces per face. + device: A torch.device object on which the outputs will be allocated. + + Returns: + Meshes object with verts and faces. + """ + if device is None: + device = torch.device("cpu") + if level < 0: + raise ValueError("level must be >= 0.") + if level == 0: + verts = torch.tensor(_ico_verts0, dtype=torch.float32, device=device) + faces = torch.tensor(_ico_faces0, dtype=torch.int64, device=device) + + else: + mesh = ico_sphere(level - 1, device) + subdivide = SubdivideMeshes() + mesh = subdivide(mesh) + verts = mesh.verts_list()[0] + verts /= verts.norm(p=2, dim=1, keepdim=True) + faces = mesh.faces_list()[0] + return Meshes(verts=[verts], faces=[faces]) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/torus.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/torus.py new file mode 100644 index 0000000000000000000000000000000000000000..853cd895c998e6b762ce2ae25414ede27e54c7e1 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/extern/pytorch3d/pytorch3d/utils/torus.py @@ -0,0 +1,73 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from itertools import tee +from math import cos, pi, sin +from typing import Iterator, Optional, Tuple + +import torch +from pytorch3d.structures.meshes import Meshes + + +# Make an iterator over the adjacent pairs: (-1, 0), (0, 1), ..., (N - 2, N - 1) +def _make_pair_range(N: int) -> Iterator[Tuple[int, int]]: + i, j = tee(range(-1, N)) + next(j, None) + return zip(i, j) + + +def torus( + r: float, R: float, sides: int, rings: int, device: Optional[torch.device] = None +) -> Meshes: + """ + Create vertices and faces for a torus. + + Args: + r: Inner radius of the torus. + R: Outer radius of the torus. + sides: Number of inner divisions. + rings: Number of outer divisions. + device: Device on which the outputs will be allocated. + + Returns: + Meshes object with the generated vertices and faces. + """ + if not (sides > 0): + raise ValueError("sides must be > 0.") + if not (rings > 0): + raise ValueError("rings must be > 0.") + device = device if device else torch.device("cpu") + + verts = [] + for i in range(rings): + # phi ranges from 0 to 2 pi (rings - 1) / rings + phi = 2 * pi * i / rings + for j in range(sides): + # theta ranges from 0 to 2 pi (sides - 1) / sides + theta = 2 * pi * j / sides + x = (R + r * cos(theta)) * cos(phi) + y = (R + r * cos(theta)) * sin(phi) + z = r * sin(theta) + # This vertex has index i * sides + j + verts.append([x, y, z]) + + faces = [] + for i0, i1 in _make_pair_range(rings): + index0 = (i0 % rings) * sides + index1 = (i1 % rings) * sides + for j0, j1 in _make_pair_range(sides): + index00 = index0 + (j0 % sides) + index01 = index0 + (j1 % sides) + index10 = index1 + (j0 % sides) + index11 = index1 + (j1 % sides) + faces.append([index00, index10, index11]) + faces.append([index11, index01, index00]) + + verts_list = [torch.tensor(verts, dtype=torch.float32, device=device)] + faces_list = [torch.tensor(faces, dtype=torch.int64, device=device)] + return Meshes(verts_list, faces_list)