diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d5cc0cccbad843c750a4653c3b72fbe90a57547e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_base.yaml @@ -0,0 +1,79 @@ +defaults: +- default_config +- _self_ +exp_dir: ./data/exps/overfit_base/ +training_loop_ImplicitronTrainingLoop_args: + visdom_port: 8097 + visualize_interval: 0 + max_epochs: 1000 +data_source_ImplicitronDataSource_args: + data_loader_map_provider_class_type: SequenceDataLoaderMapProvider + dataset_map_provider_class_type: JsonIndexDatasetMapProvider + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + dataset_length_train: 1000 + dataset_length_val: 1 + num_workers: 8 + dataset_map_provider_JsonIndexDatasetMapProvider_args: + dataset_root: ${oc.env:CO3D_DATASET_ROOT} + n_frames_per_sequence: -1 + test_on_train: true + test_restrict_sequence_id: 0 + dataset_JsonIndexDataset_args: + load_point_clouds: false + mask_depths: false + mask_images: false +model_factory_ImplicitronModelFactory_args: + model_class_type: "OverfitModel" + model_OverfitModel_args: + loss_weights: + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 1.0 + loss_autodecoder_norm: 0.01 + loss_rgb_mse: 1.0 + loss_prev_stage_rgb_mse: 1.0 + output_rasterized_mc: false + chunk_size_grid: 102400 + render_image_height: 400 + render_image_width: 400 + share_implicit_function_across_passes: false + implicit_function_class_type: "NeuralRadianceFieldImplicitFunction" + implicit_function_NeuralRadianceFieldImplicitFunction_args: + n_harmonic_functions_xyz: 10 + n_harmonic_functions_dir: 4 + n_hidden_neurons_xyz: 256 + n_hidden_neurons_dir: 128 + n_layers_xyz: 8 + append_xyz: + - 5 + coarse_implicit_function_class_type: "NeuralRadianceFieldImplicitFunction" + coarse_implicit_function_NeuralRadianceFieldImplicitFunction_args: + n_harmonic_functions_xyz: 10 + n_harmonic_functions_dir: 4 + n_hidden_neurons_xyz: 256 + n_hidden_neurons_dir: 128 + n_layers_xyz: 8 + append_xyz: + - 5 + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 1024 + scene_extent: 8.0 + n_pts_per_ray_training: 64 + n_pts_per_ray_evaluation: 64 + stratified_point_sampling_training: true + stratified_point_sampling_evaluation: false + renderer_MultiPassEmissionAbsorptionRenderer_args: + n_pts_per_ray_fine_training: 64 + n_pts_per_ray_fine_evaluation: 64 + append_coarse_samples_to_fine: true + density_noise_std_train: 1.0 +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + weight_decay: 0.0 + lr_policy: MultiStepLR + multistep_lr_milestones: [] + lr: 0.0005 + gamma: 0.1 + momentum: 0.9 + betas: + - 0.9 + - 0.999 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0349fd27a1ab25d7155f1d05c6258545acd6a5f7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_base.yaml @@ -0,0 +1,42 @@ +defaults: +- overfit_base +- _self_ +data_source_ImplicitronDataSource_args: + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + batch_size: 1 + dataset_length_train: 1000 + dataset_length_val: 1 + num_workers: 8 + dataset_map_provider_JsonIndexDatasetMapProvider_args: + assert_single_seq: true + n_frames_per_sequence: -1 + test_restrict_sequence_id: 0 + test_on_train: false +model_factory_ImplicitronModelFactory_args: + model_class_type: "OverfitModel" + model_OverfitModel_args: + render_image_height: 800 + render_image_width: 800 + log_vars: + - loss_rgb_psnr_fg + - loss_rgb_psnr + - loss_eikonal + - loss_prev_stage_rgb_psnr + - loss_mask_bce + - loss_prev_stage_mask_bce + - loss_rgb_mse + - loss_prev_stage_rgb_mse + - loss_depth_abs + - loss_depth_abs_fg + - loss_kl + - loss_mask_neg_iou + - objective + - epoch + - sec/it +optimizer_factory_ImplicitronOptimizerFactory_args: + lr: 0.0005 + multistep_lr_milestones: + - 200 + - 300 +training_loop_ImplicitronTrainingLoop_args: + max_epochs: 400 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_nerf_blender.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_nerf_blender.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c61d759f382beb27da12d8e9655599f367161fd9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/overfit_singleseq_nerf_blender.yaml @@ -0,0 +1,56 @@ +defaults: +- overfit_singleseq_base +- _self_ +exp_dir: "./data/overfit_nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}" +data_source_ImplicitronDataSource_args: + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + dataset_length_train: 100 + dataset_map_provider_class_type: BlenderDatasetMapProvider + dataset_map_provider_BlenderDatasetMapProvider_args: + base_dir: ${oc.env:BLENDER_DATASET_ROOT}/${oc.env:BLENDER_SINGLESEQ_CLASS} + n_known_frames_for_test: null + object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS} + path_manager_factory_class_type: PathManagerFactory + path_manager_factory_PathManagerFactory_args: + silence_logs: true + +model_factory_ImplicitronModelFactory_args: + model_class_type: "OverfitModel" + model_OverfitModel_args: + mask_images: false + raysampler_class_type: AdaptiveRaySampler + raysampler_AdaptiveRaySampler_args: + n_pts_per_ray_training: 64 + n_pts_per_ray_evaluation: 64 + n_rays_per_image_sampled_from_mask: 4096 + stratified_point_sampling_training: true + stratified_point_sampling_evaluation: false + scene_extent: 2.0 + scene_center: + - 0.0 + - 0.0 + - 0.0 + renderer_MultiPassEmissionAbsorptionRenderer_args: + density_noise_std_train: 0.0 + n_pts_per_ray_fine_training: 128 + n_pts_per_ray_fine_evaluation: 128 + raymarcher_EmissionAbsorptionRaymarcher_args: + blend_output: false + loss_weights: + loss_rgb_mse: 1.0 + loss_prev_stage_rgb_mse: 1.0 + loss_mask_bce: 0.0 + loss_prev_stage_mask_bce: 0.0 + loss_autodecoder_norm: 0.00 + +optimizer_factory_ImplicitronOptimizerFactory_args: + exponential_lr_step_size: 3001 + lr_policy: LinearExponential + linear_exponential_lr_milestone: 200 + +training_loop_ImplicitronTrainingLoop_args: + max_epochs: 6000 + metric_print_interval: 10 + store_checkpoints_purge: 3 + test_when_finished: true + validation_interval: 100 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9d6af2608fe23be8924a354e3cf5f20d690bdac9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_base.yaml @@ -0,0 +1,80 @@ +defaults: +- default_config +- _self_ +exp_dir: ./data/exps/base/ +training_loop_ImplicitronTrainingLoop_args: + visdom_port: 8097 + visualize_interval: 0 + max_epochs: 1000 +data_source_ImplicitronDataSource_args: + data_loader_map_provider_class_type: SequenceDataLoaderMapProvider + dataset_map_provider_class_type: JsonIndexDatasetMapProvider + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + dataset_length_train: 1000 + dataset_length_val: 1 + num_workers: 8 + dataset_map_provider_JsonIndexDatasetMapProvider_args: + dataset_root: ${oc.env:CO3D_DATASET_ROOT} + n_frames_per_sequence: -1 + test_on_train: true + test_restrict_sequence_id: 0 + dataset_JsonIndexDataset_args: + load_point_clouds: false + mask_depths: false + mask_images: false +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + loss_weights: + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 1.0 + loss_autodecoder_norm: 0.01 + loss_rgb_mse: 1.0 + loss_prev_stage_rgb_mse: 1.0 + output_rasterized_mc: false + chunk_size_grid: 102400 + render_image_height: 400 + render_image_width: 400 + num_passes: 2 + implicit_function_NeuralRadianceFieldImplicitFunction_args: + n_harmonic_functions_xyz: 10 + n_harmonic_functions_dir: 4 + n_hidden_neurons_xyz: 256 + n_hidden_neurons_dir: 128 + n_layers_xyz: 8 + append_xyz: + - 5 + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 1024 + scene_extent: 8.0 + n_pts_per_ray_training: 64 + n_pts_per_ray_evaluation: 64 + stratified_point_sampling_training: true + stratified_point_sampling_evaluation: false + renderer_MultiPassEmissionAbsorptionRenderer_args: + n_pts_per_ray_fine_training: 64 + n_pts_per_ray_fine_evaluation: 64 + append_coarse_samples_to_fine: true + density_noise_std_train: 1.0 + view_pooler_args: + view_sampler_args: + masked_sampling: false + image_feature_extractor_ResNetFeatureExtractor_args: + stages: + - 1 + - 2 + - 3 + - 4 + proj_dim: 16 + image_rescale: 0.32 + first_max_pool: false +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + weight_decay: 0.0 + lr_policy: MultiStepLR + multistep_lr_milestones: [] + lr: 0.0005 + gamma: 0.1 + momentum: 0.9 + betas: + - 0.9 + - 0.999 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_normed.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_normed.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2154c8bfa130d90073f70b7d54ac540a9e557ef --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_normed.yaml @@ -0,0 +1,18 @@ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + image_feature_extractor_class_type: ResNetFeatureExtractor + image_feature_extractor_ResNetFeatureExtractor_args: + add_images: true + add_masks: true + first_max_pool: true + image_rescale: 0.375 + l2_norm: true + name: resnet34 + normalize_image: true + pretrained: true + stages: + - 1 + - 2 + - 3 + - 4 + proj_dim: 32 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_transformer.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_transformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8d24495bbb15ad8d8770dadf5147ec49d2706b08 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_transformer.yaml @@ -0,0 +1,18 @@ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + image_feature_extractor_class_type: ResNetFeatureExtractor + image_feature_extractor_ResNetFeatureExtractor_args: + add_images: true + add_masks: true + first_max_pool: false + image_rescale: 0.375 + l2_norm: true + name: resnet34 + normalize_image: true + pretrained: true + stages: + - 1 + - 2 + - 3 + - 4 + proj_dim: 16 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_unnormed.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_unnormed.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d4eb3f861089e96bf63b9b0bced5bed7943f134 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_feat_extractor_unnormed.yaml @@ -0,0 +1,19 @@ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + image_feature_extractor_class_type: ResNetFeatureExtractor + image_feature_extractor_ResNetFeatureExtractor_args: + stages: + - 1 + - 2 + - 3 + first_max_pool: false + proj_dim: -1 + l2_norm: false + image_rescale: 0.375 + name: resnet34 + normalize_image: true + pretrained: true + view_pooler_args: + feature_aggregator_AngleWeightedReductionFeatureAggregator_args: + reduction_functions: + - AVG diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..578fe1a2ccfef253ed268fc84eaf202a1c88c91c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_base.yaml @@ -0,0 +1,38 @@ +defaults: +- repro_base.yaml +- _self_ +data_source_ImplicitronDataSource_args: + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + batch_size: 10 + dataset_length_train: 1000 + dataset_length_val: 1 + num_workers: 8 + train_conditioning_type: SAME + val_conditioning_type: SAME + test_conditioning_type: SAME + images_per_seq_options: + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 + dataset_map_provider_JsonIndexDatasetMapProvider_args: + assert_single_seq: false + task_str: multisequence + n_frames_per_sequence: -1 + test_on_train: true + test_restrict_sequence_id: 0 +optimizer_factory_ImplicitronOptimizerFactory_args: + multistep_lr_milestones: + - 1000 +training_loop_ImplicitronTrainingLoop_args: + max_epochs: 3000 + evaluator_ImplicitronEvaluator_args: + camera_difficulty_bin_breaks: + - 0.666667 + - 0.833334 + is_multisequence: true diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_co3dv2_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_co3dv2_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9eb9bd9030a5fbc0b48006416137762d89ac2757 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_co3dv2_base.yaml @@ -0,0 +1,8 @@ +data_source_ImplicitronDataSource_args: + dataset_map_provider_class_type: JsonIndexDatasetMapProviderV2 + dataset_map_provider_JsonIndexDatasetMapProviderV2_args: + category: teddybear + subset_name: fewview_dev +training_loop_ImplicitronTrainingLoop_args: + evaluator_ImplicitronEvaluator_args: + is_multisequence: true diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_idr_ad.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_idr_ad.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6bb1fe40ca47fb9456b74932e380b43a97e8d43 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_idr_ad.yaml @@ -0,0 +1,65 @@ +defaults: +- repro_multiseq_base.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + loss_weights: + loss_mask_bce: 100.0 + loss_kl: 0.0 + loss_rgb_mse: 1.0 + loss_eikonal: 0.1 + chunk_size_grid: 65536 + num_passes: 1 + output_rasterized_mc: true + sampling_mode_training: mask_sample + global_encoder_class_type: SequenceAutodecoder + global_encoder_SequenceAutodecoder_args: + autodecoder_args: + n_instances: 20000 + init_scale: 1.0 + encoding_dim: 256 + implicit_function_IdrFeatureField_args: + n_harmonic_functions_xyz: 6 + bias: 0.6 + d_in: 3 + d_out: 1 + dims: + - 512 + - 512 + - 512 + - 512 + - 512 + - 512 + - 512 + - 512 + geometric_init: true + pooled_feature_dim: 0 + skip_in: + - 6 + weight_norm: true + renderer_SignedDistanceFunctionRenderer_args: + ray_tracer_args: + line_search_step: 0.5 + line_step_iters: 3 + n_secant_steps: 8 + n_steps: 100 + sdf_threshold: 5.0e-05 + ray_normal_coloring_network_args: + d_in: 9 + d_out: 3 + dims: + - 512 + - 512 + - 512 + - 512 + mode: idr + n_harmonic_functions_dir: 4 + pooled_feature_dim: 0 + weight_norm: true + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 1024 + n_pts_per_ray_training: 0 + n_pts_per_ray_evaluation: 0 + scene_extent: 8.0 + renderer_class_type: SignedDistanceFunctionRenderer + implicit_function_class_type: IdrFeatureField diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_ad.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_ad.yaml new file mode 100644 index 0000000000000000000000000000000000000000..aa4291d3503cd731255a364db19f82b6f707f729 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_ad.yaml @@ -0,0 +1,12 @@ +defaults: +- repro_multiseq_base.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + chunk_size_grid: 16000 + view_pooler_enabled: false + global_encoder_class_type: SequenceAutodecoder + global_encoder_SequenceAutodecoder_args: + autodecoder_args: + n_instances: 20000 + encoding_dim: 256 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_wce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fa366d46ac4a2f09a437cf2632e5735aee34d5fa --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerf_wce.yaml @@ -0,0 +1,12 @@ +defaults: +- repro_multiseq_base.yaml +- repro_feat_extractor_unnormed.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + chunk_size_grid: 16000 + view_pooler_enabled: true + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 850 +training_loop_ImplicitronTrainingLoop_args: + clip_grad: 1.0 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9aa9f4c5fd0839bc4e3c6fc74f3db3190d559fb5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer.yaml @@ -0,0 +1,18 @@ +defaults: +- repro_multiseq_base.yaml +- repro_feat_extractor_transformer.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + chunk_size_grid: 16000 + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 800 + n_pts_per_ray_training: 32 + n_pts_per_ray_evaluation: 32 + renderer_MultiPassEmissionAbsorptionRenderer_args: + n_pts_per_ray_fine_training: 16 + n_pts_per_ray_fine_evaluation: 16 + implicit_function_class_type: NeRFormerImplicitFunction + view_pooler_enabled: true + view_pooler_args: + feature_aggregator_class_type: IdentityFeatureAggregator diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer_angle_w.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer_angle_w.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9c9a30fe79dd25afded6cffb80c29610a45803c0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_nerformer_angle_w.yaml @@ -0,0 +1,7 @@ +defaults: +- repro_multiseq_nerformer.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + view_pooler_args: + feature_aggregator_class_type: AngleWeightedIdentityFeatureAggregator diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1b4a2ef2d17d5a7a2d868b1603c996e2fb3ad7b2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet.yaml @@ -0,0 +1,35 @@ +defaults: +- repro_multiseq_base.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + chunk_size_grid: 16000 + view_pooler_enabled: false + n_train_target_views: -1 + num_passes: 1 + loss_weights: + loss_rgb_mse: 200.0 + loss_prev_stage_rgb_mse: 0.0 + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 0.0 + loss_autodecoder_norm: 0.001 + depth_neg_penalty: 10000.0 + global_encoder_class_type: SequenceAutodecoder + global_encoder_SequenceAutodecoder_args: + autodecoder_args: + encoding_dim: 256 + n_instances: 20000 + raysampler_class_type: NearFarRaySampler + raysampler_NearFarRaySampler_args: + n_rays_per_image_sampled_from_mask: 2048 + min_depth: 0.05 + max_depth: 0.05 + n_pts_per_ray_training: 1 + n_pts_per_ray_evaluation: 1 + stratified_point_sampling_training: false + stratified_point_sampling_evaluation: false + renderer_class_type: LSTMRenderer + implicit_function_class_type: SRNHyperNetImplicitFunction +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + lr: 5.0e-05 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet_noharm.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet_noharm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f29cbbe82ede4f4610949849433a67f91aff07f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_ad_hypernet_noharm.yaml @@ -0,0 +1,11 @@ +defaults: +- repro_multiseq_srn_ad_hypernet.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + num_passes: 1 + implicit_function_SRNHyperNetImplicitFunction_args: + pixel_generator_args: + n_harmonic_functions: 0 + hypernet_args: + n_harmonic_functions: 0 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4a72c32621d063276a2b765d34e1edd707c87eac --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce.yaml @@ -0,0 +1,31 @@ +defaults: +- repro_multiseq_base.yaml +- repro_feat_extractor_normed.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + chunk_size_grid: 32000 + num_passes: 1 + n_train_target_views: -1 + loss_weights: + loss_rgb_mse: 200.0 + loss_prev_stage_rgb_mse: 0.0 + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 0.0 + loss_autodecoder_norm: 0.0 + depth_neg_penalty: 10000.0 + raysampler_class_type: NearFarRaySampler + raysampler_NearFarRaySampler_args: + n_rays_per_image_sampled_from_mask: 2048 + min_depth: 0.05 + max_depth: 0.05 + n_pts_per_ray_training: 1 + n_pts_per_ray_evaluation: 1 + stratified_point_sampling_training: false + stratified_point_sampling_evaluation: false + renderer_class_type: LSTMRenderer + implicit_function_class_type: SRNImplicitFunction + view_pooler_enabled: true +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + lr: 5.0e-05 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce_noharm.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce_noharm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d2ea11e367e6b169895546286c80c939724a4754 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_srn_wce_noharm.yaml @@ -0,0 +1,11 @@ +defaults: +- repro_multiseq_srn_wce.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + num_passes: 1 + implicit_function_SRNImplicitFunction_args: + pixel_generator_args: + n_harmonic_functions: 0 + raymarch_function_args: + n_harmonic_functions: 0 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerf_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerf_wce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f3ac0553a9a05574626c1228873cd8ac370ec5a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerf_wce.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_multiseq_nerf_wce.yaml +- repro_multiseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerformer.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee7ef332310d444b377798faaf7b67e8575d5b0f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_nerformer.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_multiseq_nerformer.yaml +- repro_multiseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_ad_hypernet.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_ad_hypernet.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bdb544f3217e329a8940b117ceb2f47cdc501692 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_ad_hypernet.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_multiseq_srn_ad_hypernet.yaml +- repro_multiseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_wce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b8ae36746035bc35c93867fc01399c61476e14a6 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_multiseq_v2_srn_wce.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_multiseq_srn_wce.yaml +- repro_multiseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..572fc7d5e71323f61c9b099c56b7f7aeb900b614 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_base.yaml @@ -0,0 +1,41 @@ +defaults: +- repro_base +- _self_ +data_source_ImplicitronDataSource_args: + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + batch_size: 1 + dataset_length_train: 1000 + dataset_length_val: 1 + num_workers: 8 + dataset_map_provider_JsonIndexDatasetMapProvider_args: + assert_single_seq: true + n_frames_per_sequence: -1 + test_restrict_sequence_id: 0 + test_on_train: false +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + render_image_height: 800 + render_image_width: 800 + log_vars: + - loss_rgb_psnr_fg + - loss_rgb_psnr + - loss_eikonal + - loss_prev_stage_rgb_psnr + - loss_mask_bce + - loss_prev_stage_mask_bce + - loss_rgb_mse + - loss_prev_stage_rgb_mse + - loss_depth_abs + - loss_depth_abs_fg + - loss_kl + - loss_mask_neg_iou + - objective + - epoch + - sec/it +optimizer_factory_ImplicitronOptimizerFactory_args: + lr: 0.0005 + multistep_lr_milestones: + - 200 + - 300 +training_loop_ImplicitronTrainingLoop_args: + max_epochs: 400 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_co3dv2_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_co3dv2_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..54e1e2a42037013e0a55f8ad13ca11973d68d6b7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_co3dv2_base.yaml @@ -0,0 +1,8 @@ +data_source_ImplicitronDataSource_args: + dataset_map_provider_class_type: JsonIndexDatasetMapProviderV2 + dataset_map_provider_JsonIndexDatasetMapProviderV2_args: + category: teddybear + subset_name: manyview_dev_0 +training_loop_ImplicitronTrainingLoop_args: + evaluator_ImplicitronEvaluator_args: + is_multisequence: false diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_idr.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_idr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7224b9d5d9cecd791262a50dde5432cac0d7ed88 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_idr.yaml @@ -0,0 +1,57 @@ +defaults: +- repro_singleseq_base +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + loss_weights: + loss_mask_bce: 100.0 + loss_kl: 0.0 + loss_rgb_mse: 1.0 + loss_eikonal: 0.1 + chunk_size_grid: 65536 + num_passes: 1 + view_pooler_enabled: false + implicit_function_IdrFeatureField_args: + n_harmonic_functions_xyz: 6 + bias: 0.6 + d_in: 3 + d_out: 1 + dims: + - 512 + - 512 + - 512 + - 512 + - 512 + - 512 + - 512 + - 512 + geometric_init: true + pooled_feature_dim: 0 + skip_in: + - 6 + weight_norm: true + renderer_SignedDistanceFunctionRenderer_args: + ray_tracer_args: + line_search_step: 0.5 + line_step_iters: 3 + n_secant_steps: 8 + n_steps: 100 + sdf_threshold: 5.0e-05 + ray_normal_coloring_network_args: + d_in: 9 + d_out: 3 + dims: + - 512 + - 512 + - 512 + - 512 + mode: idr + n_harmonic_functions_dir: 4 + pooled_feature_dim: 0 + weight_norm: true + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 1024 + n_pts_per_ray_training: 0 + n_pts_per_ray_evaluation: 0 + renderer_class_type: SignedDistanceFunctionRenderer + implicit_function_class_type: IdrFeatureField diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fd85af5e7af23f5acd2abec6dae3255e7087cd7c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf.yaml @@ -0,0 +1,3 @@ +defaults: +- repro_singleseq_base +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2a92a92c1f20ea48a2b655211655dafa4e894c23 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_blender.yaml @@ -0,0 +1,55 @@ +defaults: +- repro_singleseq_base +- _self_ +exp_dir: "./data/nerf_blender_repro/${oc.env:BLENDER_SINGLESEQ_CLASS}" +data_source_ImplicitronDataSource_args: + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + dataset_length_train: 100 + dataset_map_provider_class_type: BlenderDatasetMapProvider + dataset_map_provider_BlenderDatasetMapProvider_args: + base_dir: ${oc.env:BLENDER_DATASET_ROOT}/${oc.env:BLENDER_SINGLESEQ_CLASS} + n_known_frames_for_test: null + object_name: ${oc.env:BLENDER_SINGLESEQ_CLASS} + path_manager_factory_class_type: PathManagerFactory + path_manager_factory_PathManagerFactory_args: + silence_logs: true + +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + mask_images: false + raysampler_class_type: AdaptiveRaySampler + raysampler_AdaptiveRaySampler_args: + n_pts_per_ray_training: 64 + n_pts_per_ray_evaluation: 64 + n_rays_per_image_sampled_from_mask: 4096 + stratified_point_sampling_training: true + stratified_point_sampling_evaluation: false + scene_extent: 2.0 + scene_center: + - 0.0 + - 0.0 + - 0.0 + renderer_MultiPassEmissionAbsorptionRenderer_args: + density_noise_std_train: 0.0 + n_pts_per_ray_fine_training: 128 + n_pts_per_ray_fine_evaluation: 128 + raymarcher_EmissionAbsorptionRaymarcher_args: + blend_output: false + loss_weights: + loss_rgb_mse: 1.0 + loss_prev_stage_rgb_mse: 1.0 + loss_mask_bce: 0.0 + loss_prev_stage_mask_bce: 0.0 + loss_autodecoder_norm: 0.00 + +optimizer_factory_ImplicitronOptimizerFactory_args: + exponential_lr_step_size: 3001 + lr_policy: LinearExponential + linear_exponential_lr_milestone: 200 + +training_loop_ImplicitronTrainingLoop_args: + max_epochs: 6000 + metric_print_interval: 10 + store_checkpoints_purge: 3 + test_when_finished: true + validation_interval: 100 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_wce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..38212e35707e2c26b93d3aa593e76579c483ca91 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerf_wce.yaml @@ -0,0 +1,10 @@ +defaults: +- repro_singleseq_wce_base.yaml +- repro_feat_extractor_unnormed.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + chunk_size_grid: 16000 + view_pooler_enabled: true + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 850 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerformer.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8983c26f34309fe35d41d43a87f53ddd564db3a5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_nerformer.yaml @@ -0,0 +1,18 @@ +defaults: +- repro_singleseq_wce_base.yaml +- repro_feat_extractor_transformer.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + chunk_size_grid: 16000 + view_pooler_enabled: true + implicit_function_class_type: NeRFormerImplicitFunction + raysampler_AdaptiveRaySampler_args: + n_rays_per_image_sampled_from_mask: 800 + n_pts_per_ray_training: 32 + n_pts_per_ray_evaluation: 32 + renderer_MultiPassEmissionAbsorptionRenderer_args: + n_pts_per_ray_fine_training: 16 + n_pts_per_ray_fine_evaluation: 16 + view_pooler_args: + feature_aggregator_class_type: IdentityFeatureAggregator diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f60f0b9480348a6660b90244600e7d59622470a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn.yaml @@ -0,0 +1,29 @@ +defaults: +- repro_singleseq_base.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + num_passes: 1 + chunk_size_grid: 32000 + view_pooler_enabled: false + loss_weights: + loss_rgb_mse: 200.0 + loss_prev_stage_rgb_mse: 0.0 + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 0.0 + loss_autodecoder_norm: 0.0 + depth_neg_penalty: 10000.0 + raysampler_class_type: NearFarRaySampler + raysampler_NearFarRaySampler_args: + n_rays_per_image_sampled_from_mask: 2048 + min_depth: 0.05 + max_depth: 0.05 + n_pts_per_ray_training: 1 + n_pts_per_ray_evaluation: 1 + stratified_point_sampling_training: false + stratified_point_sampling_evaluation: false + renderer_class_type: LSTMRenderer + implicit_function_class_type: SRNImplicitFunction +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + lr: 5.0e-05 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_noharm.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_noharm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..28b7570c8c9f49f3ecc5a45056c1467b3b3b2130 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_noharm.yaml @@ -0,0 +1,11 @@ +defaults: +- repro_singleseq_srn.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + num_passes: 1 + implicit_function_SRNImplicitFunction_args: + pixel_generator_args: + n_harmonic_functions: 0 + raymarch_function_args: + n_harmonic_functions: 0 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d190c28084f905a08d106976b45de7eb8560b3a0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce.yaml @@ -0,0 +1,30 @@ +defaults: +- repro_singleseq_wce_base +- repro_feat_extractor_normed.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + num_passes: 1 + chunk_size_grid: 32000 + view_pooler_enabled: true + loss_weights: + loss_rgb_mse: 200.0 + loss_prev_stage_rgb_mse: 0.0 + loss_mask_bce: 1.0 + loss_prev_stage_mask_bce: 0.0 + loss_autodecoder_norm: 0.0 + depth_neg_penalty: 10000.0 + raysampler_class_type: NearFarRaySampler + raysampler_NearFarRaySampler_args: + n_rays_per_image_sampled_from_mask: 2048 + min_depth: 0.05 + max_depth: 0.05 + n_pts_per_ray_training: 1 + n_pts_per_ray_evaluation: 1 + stratified_point_sampling_training: false + stratified_point_sampling_evaluation: false + renderer_class_type: LSTMRenderer + implicit_function_class_type: SRNImplicitFunction +optimizer_factory_ImplicitronOptimizerFactory_args: + breed: Adam + lr: 5.0e-05 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce_noharm.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce_noharm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3fc1254bd14e42266a1b8894d19bf081edced575 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_srn_wce_noharm.yaml @@ -0,0 +1,11 @@ +defaults: +- repro_singleseq_srn_wce.yaml +- _self_ +model_factory_ImplicitronModelFactory_args: + model_GenericModel_args: + num_passes: 1 + implicit_function_SRNImplicitFunction_args: + pixel_generator_args: + n_harmonic_functions: 0 + raymarch_function_args: + n_harmonic_functions: 0 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_idr.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_idr.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4b73e40797d30f70420e213588fa46f110895cde --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_idr.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_singleseq_idr.yaml +- repro_singleseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerf.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerf.yaml new file mode 100644 index 0000000000000000000000000000000000000000..89999cde6b2869bb4ba773e6f09819bdc4554cd4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerf.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_singleseq_nerf.yaml +- repro_singleseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerformer.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerformer.yaml new file mode 100644 index 0000000000000000000000000000000000000000..510589a0c048f1f915da6b0e4c57dfbc3f8f29b5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_nerformer.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_singleseq_nerformer.yaml +- repro_singleseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_srn_noharm.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_srn_noharm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8964a4a21e41286e9587cc2209a786b54482ab44 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_v2_srn_noharm.yaml @@ -0,0 +1,4 @@ +defaults: +- repro_singleseq_srn_noharm.yaml +- repro_singleseq_co3dv2_base.yaml +- _self_ diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_wce_base.yaml b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_wce_base.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f5b174c04a9b48646151509bdd22db24bc495702 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/projects/implicitron_trainer/configs/repro_singleseq_wce_base.yaml @@ -0,0 +1,22 @@ +defaults: +- repro_singleseq_base +- _self_ +data_source_ImplicitronDataSource_args: + data_loader_map_provider_SequenceDataLoaderMapProvider_args: + batch_size: 10 + dataset_length_train: 1000 + dataset_length_val: 1 + num_workers: 8 + train_conditioning_type: SAME + val_conditioning_type: SAME + test_conditioning_type: SAME + images_per_seq_options: + - 2 + - 3 + - 4 + - 5 + - 6 + - 7 + - 8 + - 9 + - 10 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d71a6bbae320f97bf5f4bd33ab60f37b9eba5594 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .datatypes import Device, get_device, make_device + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/compat.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..02a64cefab5501dca63d6dcbba94da4b0e8f1d46 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/compat.py @@ -0,0 +1,45 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Sequence, Tuple, Union + +import torch + + +""" +Some functions which depend on PyTorch or Python versions. +""" + + +def meshgrid_ij( + *A: Union[torch.Tensor, Sequence[torch.Tensor]], +) -> Tuple[torch.Tensor, ...]: # pragma: no cover + """ + Like torch.meshgrid was before PyTorch 1.10.0, i.e. with indexing set to ij + """ + if ( + # pyre-fixme[16]: Callable `meshgrid` has no attribute `__kwdefaults__`. + torch.meshgrid.__kwdefaults__ is not None + and "indexing" in torch.meshgrid.__kwdefaults__ + ): + # PyTorch >= 1.10.0 + # pyre-fixme[6]: For 1st param expected `Union[List[Tensor], Tensor]` but + # got `Union[Sequence[Tensor], Tensor]`. + return torch.meshgrid(*A, indexing="ij") + # pyre-fixme[6]: For 1st param expected `Union[List[Tensor], Tensor]` but got + # `Union[Sequence[Tensor], Tensor]`. + return torch.meshgrid(*A) + + +def prod(iterable, *, start=1): + """ + Like math.prod in Python 3.8 and later. + """ + for i in iterable: + start *= i + return start diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/datatypes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/datatypes.py new file mode 100644 index 0000000000000000000000000000000000000000..b5aa08d9ea32465ccda996ceeaafb3dec5fbea1c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/datatypes.py @@ -0,0 +1,60 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Optional, Union + +import torch + + +Device = Union[str, torch.device] + + +def make_device(device: Device) -> torch.device: + """ + Makes an actual torch.device object from the device specified as + either a string or torch.device object. If the device is `cuda` without + a specific index, the index of the current device is assigned. + + Args: + device: Device (as str or torch.device) + + Returns: + A matching torch.device object + """ + device = torch.device(device) if isinstance(device, str) else device + if device.type == "cuda" and device.index is None: + # If cuda but with no index, then the current cuda device is indicated. + # In that case, we fix to that device + device = torch.device(f"cuda:{torch.cuda.current_device()}") + return device + + +def get_device(x, device: Optional[Device] = None) -> torch.device: + """ + Gets the device of the specified variable x if it is a tensor, or + falls back to a default CPU device otherwise. Allows overriding by + providing an explicit device. + + Args: + x: a torch.Tensor to get the device from or another type + device: Device (as str or torch.device) to fall back to + + Returns: + A matching torch.device object + """ + + # User overrides device + if device is not None: + return make_device(device) + + # Set device based on input tensor + if torch.is_tensor(x): + return x.device + + # Default device is cpu + return torch.device("cpu") diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/linear_with_repeat.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/linear_with_repeat.py new file mode 100644 index 0000000000000000000000000000000000000000..4b234ed10aebe9b66cfa82ad02379c8c10e424a5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/linear_with_repeat.py @@ -0,0 +1,95 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import math +from typing import Tuple + +import torch +import torch.nn.functional as F +from torch.nn import init, Parameter + + +class LinearWithRepeat(torch.nn.Module): + """ + if x has shape (..., k, n1) + and y has shape (..., n2) + then + LinearWithRepeat(n1 + n2, out_features).forward((x,y)) + is equivalent to + Linear(n1 + n2, out_features).forward( + torch.cat([x, y.unsqueeze(-2).expand(..., k, n2)], dim=-1) + ) + + Or visually: + Given the following, for each ray, + + feature -> + + ray xxxxxxxx + position xxxxxxxx + | xxxxxxxx + v xxxxxxxx + + + and + yyyyyyyy + + where the y's do not depend on the position + but only on the ray, + we want to evaluate a Linear layer on both + types of data at every position. + + It's as if we constructed + + xxxxxxxxyyyyyyyy + xxxxxxxxyyyyyyyy + xxxxxxxxyyyyyyyy + xxxxxxxxyyyyyyyy + + and sent that through the Linear. + """ + + def __init__( + self, + in_features: int, + out_features: int, + bias: bool = True, + device=None, + dtype=None, + ) -> None: + """ + Copied from torch.nn.Linear. + """ + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + self.in_features = in_features + self.out_features = out_features + self.weight = Parameter( + torch.empty((out_features, in_features), **factory_kwargs) + ) + if bias: + self.bias = Parameter(torch.empty(out_features, **factory_kwargs)) + else: + self.register_parameter("bias", None) + self.reset_parameters() + + def reset_parameters(self) -> None: + """ + Copied from torch.nn.Linear. + """ + init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + if self.bias is not None: + fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) + bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0 + init.uniform_(self.bias, -bound, bound) + + def forward(self, input: Tuple[torch.Tensor, torch.Tensor]) -> torch.Tensor: + n1 = input[0].shape[-1] + output1 = F.linear(input[0], self.weight[:, :n1], self.bias) + output2 = F.linear(input[1], self.weight[:, n1:], None) + return output1 + output2.unsqueeze(-2) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..92ecd0473fa51ae10d2bc6c540bac3da375a645e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .symeig3x3 import symeig3x3 +from .utils import _safe_det_3x3 diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/symeig3x3.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/symeig3x3.py new file mode 100644 index 0000000000000000000000000000000000000000..03d2735937bf39025b9d7fb8ca624939427d6bda --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/symeig3x3.py @@ -0,0 +1,319 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import math +from typing import Optional, Tuple + +import torch +import torch.nn.functional as F +from torch import nn + + +class _SymEig3x3(nn.Module): + """ + Optimized implementation of eigenvalues and eigenvectors computation for symmetric 3x3 + matrices. + + Please see https://en.wikipedia.org/wiki/Eigenvalue_algorithm#3.C3.973_matrices + and https://www.geometrictools.com/Documentation/RobustEigenSymmetric3x3.pdf + """ + + def __init__(self, eps: Optional[float] = None) -> None: + """ + Args: + eps: epsilon to specify, if None then use torch.float eps + """ + super().__init__() + + self.register_buffer("_identity", torch.eye(3)) + self.register_buffer("_rotation_2d", torch.tensor([[0.0, -1.0], [1.0, 0.0]])) + self.register_buffer( + "_rotations_3d", self._create_rotation_matrices(self._rotation_2d) + ) + + self._eps = eps or torch.finfo(torch.float).eps + + @staticmethod + def _create_rotation_matrices(rotation_2d) -> torch.Tensor: + """ + Compute rotations for later use in U V computation + + Args: + rotation_2d: a π/2 rotation matrix. + + Returns: + a (3, 3, 3) tensor containing 3 rotation matrices around each of the coordinate axes + by π/2 + """ + + rotations_3d = torch.zeros((3, 3, 3)) + rotation_axes = set(range(3)) + for rotation_axis in rotation_axes: + rest = list(rotation_axes - {rotation_axis}) + rotations_3d[rotation_axis][rest[0], rest] = rotation_2d[0] + rotations_3d[rotation_axis][rest[1], rest] = rotation_2d[1] + + return rotations_3d + + def forward( + self, inputs: torch.Tensor, eigenvectors: bool = True + ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """ + Compute eigenvalues and (optionally) eigenvectors + + Args: + inputs: symmetric matrices with shape of (..., 3, 3) + eigenvectors: whether should we compute only eigenvalues or eigenvectors as well + + Returns: + Either a tuple of (eigenvalues, eigenvectors) or eigenvalues only, depending on + given params. Eigenvalues are of shape (..., 3) and eigenvectors (..., 3, 3) + """ + if inputs.shape[-2:] != (3, 3): + raise ValueError("Only inputs of shape (..., 3, 3) are supported.") + + inputs_diag = inputs.diagonal(dim1=-2, dim2=-1) + inputs_trace = inputs_diag.sum(-1) + q = inputs_trace / 3.0 + + # Calculate squared sum of elements outside the main diagonal / 2 + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + p1 = ((inputs**2).sum(dim=(-1, -2)) - (inputs_diag**2).sum(-1)) / 2 + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + p2 = ((inputs_diag - q[..., None]) ** 2).sum(dim=-1) + 2.0 * p1.clamp(self._eps) + + p = torch.sqrt(p2 / 6.0) + B = (inputs - q[..., None, None] * self._identity) / p[..., None, None] + + r = torch.det(B) / 2.0 + # Keep r within (-1.0, 1.0) boundaries with a margin to prevent exploding gradients. + r = r.clamp(-1.0 + self._eps, 1.0 - self._eps) + + phi = torch.acos(r) / 3.0 + eig1 = q + 2 * p * torch.cos(phi) + eig2 = q + 2 * p * torch.cos(phi + 2 * math.pi / 3) + eig3 = 3 * q - eig1 - eig2 + # eigenvals[..., i] is the i-th eigenvalue of the input, α0 ≤ α1 ≤ α2. + eigenvals = torch.stack((eig2, eig3, eig1), dim=-1) + + # Soft dispatch between the degenerate case (diagonal A) and general. + # diag_soft_cond -> 1.0 when p1 < 6 * eps and diag_soft_cond -> 0.0 otherwise. + # We use 6 * eps to take into account the error accumulated during the p1 summation + diag_soft_cond = torch.exp(-((p1 / (6 * self._eps)) ** 2)).detach()[..., None] + + # Eigenvalues are the ordered elements of main diagonal in the degenerate case + diag_eigenvals, _ = torch.sort(inputs_diag, dim=-1) + eigenvals = diag_soft_cond * diag_eigenvals + (1.0 - diag_soft_cond) * eigenvals + + if eigenvectors: + eigenvecs = self._construct_eigenvecs_set(inputs, eigenvals) + else: + eigenvecs = None + + return eigenvals, eigenvecs + + def _construct_eigenvecs_set( + self, inputs: torch.Tensor, eigenvals: torch.Tensor + ) -> torch.Tensor: + """ + Construct orthonormal set of eigenvectors by given inputs and pre-computed eigenvalues + + Args: + inputs: tensor of symmetric matrices of shape (..., 3, 3) + eigenvals: tensor of pre-computed eigenvalues of of shape (..., 3, 3) + + Returns: + Tuple of three eigenvector tensors of shape (..., 3, 3), composing an orthonormal + set + """ + eigenvecs_tuple_for_01 = self._construct_eigenvecs( + inputs, eigenvals[..., 0], eigenvals[..., 1] + ) + eigenvecs_for_01 = torch.stack(eigenvecs_tuple_for_01, dim=-1) + + eigenvecs_tuple_for_21 = self._construct_eigenvecs( + inputs, eigenvals[..., 2], eigenvals[..., 1] + ) + eigenvecs_for_21 = torch.stack(eigenvecs_tuple_for_21[::-1], dim=-1) + + # The result will be smooth here even if both parts of comparison + # are close, because eigenvecs_01 and eigenvecs_21 would be mostly equal as well + eigenvecs_cond = ( + eigenvals[..., 1] - eigenvals[..., 0] + > eigenvals[..., 2] - eigenvals[..., 1] + ).detach() + eigenvecs = torch.where( + eigenvecs_cond[..., None, None], eigenvecs_for_01, eigenvecs_for_21 + ) + + return eigenvecs + + def _construct_eigenvecs( + self, inputs: torch.Tensor, alpha0: torch.Tensor, alpha1: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Construct an orthonormal set of eigenvectors by given pair of eigenvalues. + + Args: + inputs: tensor of symmetric matrices of shape (..., 3, 3) + alpha0: first eigenvalues of shape (..., 3) + alpha1: second eigenvalues of shape (..., 3) + + Returns: + Tuple of three eigenvector tensors of shape (..., 3, 3), composing an orthonormal + set + """ + + # Find the eigenvector corresponding to alpha0, its eigenvalue is distinct + ev0 = self._get_ev0(inputs - alpha0[..., None, None] * self._identity) + u, v = self._get_uv(ev0) + ev1 = self._get_ev1(inputs - alpha1[..., None, None] * self._identity, u, v) + # Third eigenvector is computed as the cross-product of the other two + ev2 = torch.cross(ev0, ev1, dim=-1) + + return ev0, ev1, ev2 + + def _get_ev0(self, char_poly: torch.Tensor) -> torch.Tensor: + """ + Construct the first normalized eigenvector given a characteristic polynomial + + Args: + char_poly: a characteristic polynomials of the input matrices of shape (..., 3, 3) + + Returns: + Tensor of first eigenvectors of shape (..., 3) + """ + + r01 = torch.cross(char_poly[..., 0, :], char_poly[..., 1, :], dim=-1) + r12 = torch.cross(char_poly[..., 1, :], char_poly[..., 2, :], dim=-1) + r02 = torch.cross(char_poly[..., 0, :], char_poly[..., 2, :], dim=-1) + + cross_products = torch.stack((r01, r12, r02), dim=-2) + # Regularize it with + or -eps depending on the sign of the first vector + cross_products += self._eps * self._sign_without_zero( + cross_products[..., :1, :] + ) + + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + norms_sq = (cross_products**2).sum(dim=-1) + max_norms_index = norms_sq.argmax(dim=-1) + + # Pick only the cross-product with highest squared norm for each input + max_cross_products = self._gather_by_index( + cross_products, max_norms_index[..., None, None], -2 + ) + # Pick corresponding squared norms for each cross-product + max_norms_sq = self._gather_by_index(norms_sq, max_norms_index[..., None], -1) + + # Normalize cross-product vectors by thier norms + return max_cross_products / torch.sqrt(max_norms_sq[..., None]) + + def _gather_by_index( + self, source: torch.Tensor, index: torch.Tensor, dim: int + ) -> torch.Tensor: + """ + Selects elements from the given source tensor by provided index tensor. + Number of dimensions should be the same for source and index tensors. + + Args: + source: input tensor to gather from + index: index tensor with indices to gather from source + dim: dimension to gather across + + Returns: + Tensor of shape same as the source with exception of specified dimension. + """ + + index_shape = list(source.shape) + index_shape[dim] = 1 + + return source.gather(dim, index.expand(index_shape)).squeeze(dim) + + def _get_uv(self, w: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Computes unit-length vectors U and V such that {U, V, W} is a right-handed + orthonormal set. + + Args: + w: eigenvector tensor of shape (..., 3) + + Returns: + Tuple of U and V unit-length vector tensors of shape (..., 3) + """ + + min_idx = w.abs().argmin(dim=-1) + rotation_2d = self._rotations_3d[min_idx].to(w) + + u = F.normalize((rotation_2d @ w[..., None])[..., 0], dim=-1) + v = torch.cross(w, u, dim=-1) + return u, v + + def _get_ev1( + self, char_poly: torch.Tensor, u: torch.Tensor, v: torch.Tensor + ) -> torch.Tensor: + """ + Computes the second normalized eigenvector given a characteristic polynomial + and U and V vectors + + Args: + char_poly: a characteristic polynomials of the input matrices of shape (..., 3, 3) + u: unit-length vectors from _get_uv method + v: unit-length vectors from _get_uv method + + Returns: + desc + """ + + j = torch.stack((u, v), dim=-1) + m = j.transpose(-1, -2) @ char_poly @ j + + # If angle between those vectors is acute, take their sum = m[..., 0, :] + m[..., 1, :], + # otherwise take the difference = m[..., 0, :] - m[..., 1, :] + # m is in theory of rank 1 (or 0), so it snaps only when one of the rows is close to 0 + is_acute_sign = self._sign_without_zero( + (m[..., 0, :] * m[..., 1, :]).sum(dim=-1) + ).detach() + + rowspace = m[..., 0, :] + is_acute_sign[..., None] * m[..., 1, :] + # rowspace will be near zero for second-order eigenvalues + # this regularization guarantees abs(rowspace[0]) >= eps in a smooth'ish way + rowspace += self._eps * self._sign_without_zero(rowspace[..., :1]) + + return ( + j + @ F.normalize(rowspace @ self._rotation_2d.to(rowspace), dim=-1)[..., None] + )[..., 0] + + @staticmethod + def _sign_without_zero(tensor): + """ + Args: + tensor: an arbitrary shaped tensor + + Returns: + Tensor of the same shape as an input, but with 1.0 if tensor > 0.0 and -1.0 + otherwise + """ + return 2.0 * (tensor > 0.0).to(tensor.dtype) - 1.0 + + +def symeig3x3( + inputs: torch.Tensor, eigenvectors: bool = True +) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: + """ + Compute eigenvalues and (optionally) eigenvectors + + Args: + inputs: symmetric matrices with shape of (..., 3, 3) + eigenvectors: whether should we compute only eigenvalues or eigenvectors as well + + Returns: + Either a tuple of (eigenvalues, eigenvectors) or eigenvalues only, depending on + given params. Eigenvalues are of shape (..., 3) and eigenvectors (..., 3, 3) + """ + return _SymEig3x3().to(inputs.device)(inputs, eigenvectors=eigenvectors) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..412f40a4b6f6114b5dc1c38eba5bb14018844712 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/common/workaround/utils.py @@ -0,0 +1,33 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch + + +def _safe_det_3x3(t: torch.Tensor): + """ + Fast determinant calculation for a batch of 3x3 matrices. + + Note, result of this function might not be the same as `torch.det()`. + The differences might be in the last significant digit. + + Args: + t: Tensor of shape (N, 3, 3). + + Returns: + Tensor of shape (N) with determinants. + """ + + det = ( + t[..., 0, 0] * (t[..., 1, 1] * t[..., 2, 2] - t[..., 1, 2] * t[..., 2, 1]) + - t[..., 0, 1] * (t[..., 1, 0] * t[..., 2, 2] - t[..., 2, 0] * t[..., 1, 2]) + + t[..., 0, 2] * (t[..., 1, 0] * t[..., 2, 1] - t[..., 2, 0] * t[..., 1, 1]) + ) + + return det diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query.cu new file mode 100644 index 0000000000000000000000000000000000000000..586701c18150b2fbd91c7b48989d9b96b1cfd55f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query.cu @@ -0,0 +1,129 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include + +// A chunk of work is blocksize-many points of P1. +// The number of potential chunks to do is N*(1+(P1-1)/blocksize) +// call (1+(P1-1)/blocksize) chunks_per_cloud +// These chunks are divided among the gridSize-many blocks. +// In block b, we work on chunks b, b+gridSize, b+2*gridSize etc . +// In chunk i, we work on cloud i/chunks_per_cloud on points starting from +// blocksize*(i%chunks_per_cloud). + +template +__global__ void BallQueryKernel( + const at::PackedTensorAccessor64 p1, + const at::PackedTensorAccessor64 p2, + const at::PackedTensorAccessor64 + lengths1, + const at::PackedTensorAccessor64 + lengths2, + at::PackedTensorAccessor64 idxs, + at::PackedTensorAccessor64 dists, + const int64_t K, + const float radius2) { + const int64_t N = p1.size(0); + const int64_t chunks_per_cloud = (1 + (p1.size(1) - 1) / blockDim.x); + const int64_t chunks_to_do = N * chunks_per_cloud; + const int D = p1.size(2); + + for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { + const int64_t n = chunk / chunks_per_cloud; // batch_index + const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); + int64_t i = start_point + threadIdx.x; + + // Check if point is valid in heterogeneous tensor + if (i >= lengths1[n]) { + continue; + } + + // Iterate over points in p2 until desired count is reached or + // all points have been considered + for (int64_t j = 0, count = 0; j < lengths2[n] && count < K; ++j) { + // Calculate the distance between the points + scalar_t dist2 = 0.0; + for (int d = 0; d < D; ++d) { + scalar_t diff = p1[n][i][d] - p2[n][j][d]; + dist2 += (diff * diff); + } + + if (dist2 < radius2) { + // If the point is within the radius + // Set the value of the index to the point index + idxs[n][i][count] = j; + dists[n][i][count] = dist2; + + // increment the number of selected samples for the point i + ++count; + } + } + } +} + +std::tuple BallQueryCuda( + const at::Tensor& p1, // (N, P1, 3) + const at::Tensor& p2, // (N, P2, 3) + const at::Tensor& lengths1, // (N,) + const at::Tensor& lengths2, // (N,) + int K, + float radius) { + // Check inputs are on the same device + at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, + lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}; + at::CheckedFrom c = "BallQueryCuda"; + at::checkAllSameGPU(c, {p1_t, p2_t, lengths1_t, lengths2_t}); + at::checkAllSameType(c, {p1_t, p2_t}); + + // Set the device for the kernel launch based on the device of p1 + at::cuda::CUDAGuard device_guard(p1.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + TORCH_CHECK( + p2.size(2) == p1.size(2), "Point sets must have the same last dimension"); + + const int N = p1.size(0); + const int P1 = p1.size(1); + const int64_t K_64 = K; + const float radius2 = radius * radius; + + // Output tensor with indices of neighbors for each point in p1 + auto long_dtype = lengths1.options().dtype(at::kLong); + auto idxs = at::full({N, P1, K}, -1, long_dtype); + auto dists = at::zeros({N, P1, K}, p1.options()); + + if (idxs.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(idxs, dists); + } + + const size_t blocks = 256; + const size_t threads = 256; + + AT_DISPATCH_FLOATING_TYPES( + p1.scalar_type(), "ball_query_kernel_cuda", ([&] { + BallQueryKernel<<>>( + p1.packed_accessor64(), + p2.packed_accessor64(), + lengths1.packed_accessor64(), + lengths2.packed_accessor64(), + idxs.packed_accessor64(), + dists.packed_accessor64(), + K_64, + radius2); + })); + + AT_CUDA_CHECK(cudaGetLastError()); + + return std::make_tuple(idxs, dists); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query.h new file mode 100644 index 0000000000000000000000000000000000000000..eb8f54da29d074dc51874ce61aa87dc0ec752343 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include "utils/pytorch3d_cutils.h" + +// Compute indices of K neighbors in pointcloud p2 to points +// in pointcloud p1 which fall within a specified radius +// +// Args: +// p1: FloatTensor of shape (N, P1, D) giving a batch of pointclouds each +// containing P1 points of dimension D. +// p2: FloatTensor of shape (N, P2, D) giving a batch of pointclouds each +// containing P2 points of dimension D. +// lengths1: LongTensor, shape (N,), giving actual length of each P1 cloud. +// lengths2: LongTensor, shape (N,), giving actual length of each P2 cloud. +// K: Integer giving the upper bound on the number of samples to take +// within the radius +// radius: the radius around each point within which the neighbors need to be +// located +// +// Returns: +// p1_neighbor_idx: LongTensor of shape (N, P1, K), where +// p1_neighbor_idx[n, i, k] = j means that the kth +// neighbor to p1[n, i] in the cloud p2[n] is p2[n, j]. +// This is padded with -1s both where a cloud in p2 has fewer than +// S points and where a cloud in p1 has fewer than P1 points and +// also if there are fewer than K points which satisfy the radius +// threshold. +// +// p1_neighbor_dists: FloatTensor of shape (N, P1, K) containing the squared +// distance from each point p1[n, p, :] to its K neighbors +// p2[n, p1_neighbor_idx[n, p, k], :]. + +// CPU implementation +std::tuple BallQueryCpu( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const int K, + const float radius); + +// CUDA implementation +std::tuple BallQueryCuda( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const int K, + const float radius); + +// Implementation which is exposed +// Note: the backward pass reuses the KNearestNeighborBackward kernel +inline std::tuple BallQuery( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + int K, + float radius) { + if (p1.is_cuda() || p2.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(p1); + CHECK_CUDA(p2); + return BallQueryCuda( + p1.contiguous(), + p2.contiguous(), + lengths1.contiguous(), + lengths2.contiguous(), + K, + radius); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(p1); + CHECK_CPU(p2); + return BallQueryCpu( + p1.contiguous(), + p2.contiguous(), + lengths1.contiguous(), + lengths2.contiguous(), + K, + radius); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..24cdf388f0cef9dc6e9ed9ce7085862d11448165 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/ball_query/ball_query_cpu.cpp @@ -0,0 +1,54 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include + +std::tuple BallQueryCpu( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + int K, + float radius) { + const int N = p1.size(0); + const int P1 = p1.size(1); + const int D = p1.size(2); + + auto long_opts = lengths1.options().dtype(torch::kInt64); + torch::Tensor idxs = torch::full({N, P1, K}, -1, long_opts); + torch::Tensor dists = torch::full({N, P1, K}, 0, p1.options()); + const float radius2 = radius * radius; + + auto p1_a = p1.accessor(); + auto p2_a = p2.accessor(); + auto lengths1_a = lengths1.accessor(); + auto lengths2_a = lengths2.accessor(); + auto idxs_a = idxs.accessor(); + auto dists_a = dists.accessor(); + + for (int n = 0; n < N; ++n) { + const int64_t length1 = lengths1_a[n]; + const int64_t length2 = lengths2_a[n]; + for (int64_t i = 0; i < length1; ++i) { + for (int64_t j = 0, count = 0; j < length2 && count < K; ++j) { + float dist2 = 0; + for (int d = 0; d < D; ++d) { + float diff = p1_a[n][i][d] - p2_a[n][j][d]; + dist2 += diff * diff; + } + if (dist2 < radius2) { + dists_a[n][i][count] = dist2; + idxs_a[n][i][count] = j; + ++count; + } + } + } + } + return std::make_tuple(idxs, dists); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite.cu new file mode 100644 index 0000000000000000000000000000000000000000..2bfe79dc95b240b1565aa6cd0988965660b91eff --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite.cu @@ -0,0 +1,233 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +__constant__ const float kEpsilon = 1e-9; + +// TODO(gkioxari) support all data types once AtomicAdd supports doubles. +// Currently, support is for floats only. +__global__ void alphaCompositeCudaForwardKernel( + // clang-format off + at::PackedTensorAccessor64 result, + const at::PackedTensorAccessor64 features, + const at::PackedTensorAccessor64 alphas, + const at::PackedTensorAccessor64 points_idx) { + // clang-format on + const int64_t C = features.size(0); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + + // Get the batch and index + const auto batch = blockIdx.x; + + const int num_pixels = C * H * W; + const auto num_threads = gridDim.y * blockDim.x; + const auto tid = blockIdx.y * blockDim.x + threadIdx.x; + + // Iterate over each feature in each pixel + for (int pid = tid; pid < num_pixels; pid += num_threads) { + int ch = pid / (H * W); + int j = (pid % (H * W)) / W; + int i = (pid % (H * W)) % W; + + // alphacomposite the different values + float cum_alpha = 1.; + // Iterate through the closest K points for this pixel + for (int k = 0; k < points_idx.size(1); ++k) { + int n_idx = points_idx[batch][k][j][i]; + + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + + float alpha = alphas[batch][k][j][i]; + // TODO(gkioxari) It might be more efficient to have threads write in a + // local variable, and move atomicAdd outside of the loop such that + // atomicAdd is executed once per thread. + atomicAdd( + &result[batch][ch][j][i], features[ch][n_idx] * cum_alpha * alpha); + cum_alpha = cum_alpha * (1 - alpha); + } + } +} + +// TODO(gkioxari) support all data types once AtomicAdd supports doubles. +// Currently, support is for floats only. +__global__ void alphaCompositeCudaBackwardKernel( + // clang-format off + at::PackedTensorAccessor64 grad_features, + at::PackedTensorAccessor64 grad_alphas, + const at::PackedTensorAccessor64 grad_outputs, + const at::PackedTensorAccessor64 features, + const at::PackedTensorAccessor64 alphas, + const at::PackedTensorAccessor64 points_idx) { + // clang-format on + const int64_t C = features.size(0); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + + // Get the batch and index + const auto batch = blockIdx.x; + + const int num_pixels = C * H * W; + const auto num_threads = gridDim.y * blockDim.x; + const auto tid = blockIdx.y * blockDim.x + threadIdx.x; + + // Parallelize over each feature in each pixel in images of size H * W, + // for each image in the batch of size batch_size + for (int pid = tid; pid < num_pixels; pid += num_threads) { + int ch = pid / (H * W); + int j = (pid % (H * W)) / W; + int i = (pid % (H * W)) % W; + + // alphacomposite the different values + float cum_alpha = 1.; + // Iterate through the closest K points for this pixel + for (int k = 0; k < points_idx.size(1); ++k) { + int n_idx = points_idx[batch][k][j][i]; + + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + float alpha = alphas[batch][k][j][i]; + + // TODO(gkioxari) It might be more efficient to have threads write in a + // local variable, and move atomicAdd outside of the loop such that + // atomicAdd is executed once per thread. + atomicAdd( + &grad_alphas[batch][k][j][i], + cum_alpha * features[ch][n_idx] * grad_outputs[batch][ch][j][i]); + atomicAdd( + &grad_features[ch][n_idx], + cum_alpha * alpha * grad_outputs[batch][ch][j][i]); + + // Iterate over all (K-1) nearest points to update gradient + for (int t = 0; t < k; ++t) { + int t_idx = points_idx[batch][t][j][i]; + // Sentinel value is -1, indicating no point overlaps this pixel + if (t_idx < 0) { + continue; + } + float alpha_tvalue = alphas[batch][t][j][i]; + // TODO(gkioxari) It might be more efficient to have threads write in a + // local variable, and move atomicAdd outside of the loop such that + // atomicAdd is executed once per thread. + atomicAdd( + &grad_alphas[batch][t][j][i], + -grad_outputs[batch][ch][j][i] * features[ch][n_idx] * cum_alpha * + alpha / (1 - alpha_tvalue + kEpsilon)); + } + + cum_alpha = cum_alpha * (1 - alphas[batch][k][j][i]); + } + } +} + +at::Tensor alphaCompositeCudaForward( + const at::Tensor& features, + const at::Tensor& alphas, + const at::Tensor& points_idx) { + // Check inputs are on the same device + at::TensorArg features_t{features, "features", 1}, + alphas_t{alphas, "alphas", 2}, points_idx_t{points_idx, "points_idx", 3}; + at::CheckedFrom c = "alphaCompositeCudaForward"; + at::checkAllSameGPU(c, {features_t, alphas_t, points_idx_t}); + at::checkAllSameType(c, {features_t, alphas_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(features.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int64_t batch_size = points_idx.size(0); + const int64_t C = features.size(0); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + + auto result = at::zeros({batch_size, C, H, W}, features.options()); + + if (result.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return result; + } + + const dim3 threadsPerBlock(64); + const dim3 numBlocks(batch_size, 1024 / batch_size + 1); + + // TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports + // doubles. Currently, support is for floats only. + alphaCompositeCudaForwardKernel<<>>( + // clang-format off + // As we are using packed accessors here the tensors + // do not need to be made contiguous. + result.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); + // clang-format on + AT_CUDA_CHECK(cudaGetLastError()); + return result; +} + +std::tuple alphaCompositeCudaBackward( + const at::Tensor& grad_outputs, + const at::Tensor& features, + const at::Tensor& alphas, + const at::Tensor& points_idx) { + // Check inputs are on the same device + at::TensorArg grad_outputs_t{grad_outputs, "grad_outputs", 1}, + features_t{features, "features", 2}, alphas_t{alphas, "alphas", 3}, + points_idx_t{points_idx, "points_idx", 4}; + at::CheckedFrom c = "alphaCompositeCudaBackward"; + at::checkAllSameGPU(c, {grad_outputs_t, features_t, alphas_t, points_idx_t}); + at::checkAllSameType(c, {grad_outputs_t, features_t, alphas_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(features.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + auto grad_features = at::zeros_like(features); + auto grad_alphas = at::zeros_like(alphas); + + if (grad_features.numel() == 0 || grad_alphas.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(grad_features, grad_alphas); + } + + const int64_t bs = alphas.size(0); + + const dim3 threadsPerBlock(64); + const dim3 numBlocks(bs, 1024 / bs + 1); + + // TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports + // doubles. Currently, support is for floats only. + alphaCompositeCudaBackwardKernel<<>>( + // clang-format off + // As we are using packed accessors here the tensors + // do not need to be made contiguous. + grad_features.packed_accessor64(), + grad_alphas.packed_accessor64(), + grad_outputs.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); + // clang-format on + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(grad_features, grad_alphas); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite.h new file mode 100644 index 0000000000000000000000000000000000000000..a9ec7b43e9778dc7a71057b66e387fa83e5e36da --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include "utils/pytorch3d_cutils.h" + +#include + +// Perform alpha compositing of points in a z-buffer. +// +// Inputs: +// features: FloatTensor of shape (C, P) which gives the features +// of each point where C is the size of the feature and +// P the number of points. +// alphas: FloatTensor of shape (N, points_per_pixel, H, W) where +// points_per_pixel is the number of points in the z-buffer +// sorted in z-order, and (H, W) is the image size. +// points_idx: IntTensor of shape (N, points_per_pixel, H, W) giving the +// indices of the nearest points at each pixel, sorted in z-order. +// Returns: +// weighted_fs: FloatTensor of shape (N, C, H, W) giving the accumulated +// feature for each point. Concretely, it gives: +// weighted_fs[b,c,i,j] = sum_k cum_alpha_k * +// features[c,points_idx[b,k,i,j]] +// where cum_alpha_k = +// alphas[b,k,i,j] * prod_l=0..k-1 (1 - alphas[b,l,i,j]) + +// CUDA declarations +#ifdef WITH_CUDA +torch::Tensor alphaCompositeCudaForward( + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); + +std::tuple alphaCompositeCudaBackward( + const torch::Tensor& grad_outputs, + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); +#endif + +// C++ declarations +torch::Tensor alphaCompositeCpuForward( + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); + +std::tuple alphaCompositeCpuBackward( + const torch::Tensor& grad_outputs, + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); + +torch::Tensor alphaCompositeForward( + torch::Tensor& features, + torch::Tensor& alphas, + torch::Tensor& points_idx) { + features = features.contiguous(); + alphas = alphas.contiguous(); + points_idx = points_idx.contiguous(); + + if (features.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(features); + CHECK_CUDA(alphas); + CHECK_CUDA(points_idx); + return alphaCompositeCudaForward(features, alphas, points_idx); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + CHECK_CPU(features); + CHECK_CPU(alphas); + CHECK_CPU(points_idx); + return alphaCompositeCpuForward(features, alphas, points_idx); + } +} + +std::tuple alphaCompositeBackward( + torch::Tensor& grad_outputs, + torch::Tensor& features, + torch::Tensor& alphas, + torch::Tensor& points_idx) { + grad_outputs = grad_outputs.contiguous(); + features = features.contiguous(); + alphas = alphas.contiguous(); + points_idx = points_idx.contiguous(); + + if (grad_outputs.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(grad_outputs); + CHECK_CUDA(features); + CHECK_CUDA(alphas); + CHECK_CUDA(points_idx); + + return alphaCompositeCudaBackward( + grad_outputs, features, alphas, points_idx); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + CHECK_CPU(grad_outputs); + CHECK_CPU(features); + CHECK_CPU(alphas); + CHECK_CPU(points_idx); + + return alphaCompositeCpuBackward( + grad_outputs, features, alphas, points_idx); + } +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..41bc0ec76794228e7f770f0436453306ffc8aec0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/alpha_composite_cpu.cpp @@ -0,0 +1,124 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +#include +#include + +// Epsilon float +const float kEps = 1e-9; + +torch::Tensor alphaCompositeCpuForward( + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx) { + const int64_t B = points_idx.size(0); + const int64_t K = points_idx.size(1); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + const int64_t C = features.size(0); + + torch::Tensor result = torch::zeros({B, C, H, W}, features.options()); + + auto features_a = features.accessor(); + auto alphas_a = alphas.accessor(); + auto points_idx_a = points_idx.accessor(); + auto result_a = result.accessor(); + + // Iterate over the batch + for (int b = 0; b < B; ++b) { + // Iterate over the features + for (int c = 0; c < C; ++c) { + // Iterate through the horizontal lines of the image from top to bottom + for (int j = 0; j < H; ++j) { + // Iterate over pixels in a horizontal line, left to right + for (int i = 0; i < W; ++i) { + float cum_alpha = 1.; + // Iterate through the closest K points for this pixel + for (int k = 0; k < K; ++k) { + int64_t n_idx = points_idx_a[b][k][j][i]; + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + float alpha = alphas_a[b][k][j][i]; + result_a[b][c][j][i] += cum_alpha * alpha * features_a[c][n_idx]; + cum_alpha = cum_alpha * (1 - alpha); + } + } + } + } + } + return result; +} + +std::tuple alphaCompositeCpuBackward( + const torch::Tensor& grad_outputs, + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx) { + torch::Tensor grad_features = torch::zeros_like(features); + torch::Tensor grad_alphas = torch::zeros_like(alphas); + + const int64_t B = points_idx.size(0); + const int64_t K = points_idx.size(1); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + const int64_t C = features.size(0); + + auto grad_outputs_a = grad_outputs.accessor(); + auto features_a = features.accessor(); + auto alphas_a = alphas.accessor(); + auto points_idx_a = points_idx.accessor(); + auto grad_features_a = grad_features.accessor(); + auto grad_alphas_a = grad_alphas.accessor(); + + // Iterate over the batch + for (int b = 0; b < B; ++b) { + // Iterate over the features + for (int c = 0; c < C; ++c) { + // Iterate through the horizontal lines of the image from top to bottom + for (int j = 0; j < H; ++j) { + // Iterate over pixels in a horizontal line, left to right + for (int i = 0; i < W; ++i) { + float cum_alpha = 1.; + // Iterate through the closest K points for this pixel + for (int k = 0; k < K; ++k) { + int64_t n_idx = points_idx_a[b][k][j][i]; + // Sentinal value is -1, indicating no point overlaps this pixel + if (n_idx < 0) { + continue; + } + float alpha = alphas_a[b][k][j][i]; + grad_alphas_a[b][k][j][i] += + grad_outputs_a[b][c][j][i] * features_a[c][n_idx] * cum_alpha; + grad_features_a[c][n_idx] += + grad_outputs_a[b][c][j][i] * cum_alpha * alpha; + + // Iterate over all (K-1) nearer points to update gradient + for (int t = 0; t < k; t++) { + int64_t t_idx = points_idx_a[b][t][j][i]; + // Sentinal value is -1, indicating no point overlaps this pixel + if (t_idx < 0) { + continue; + } + float alpha_tvalue = alphas_a[b][t][j][i]; + grad_alphas_a[b][t][j][i] -= grad_outputs_a[b][c][j][i] * + features_a[c][n_idx] * cum_alpha * alpha / + (1 - alpha_tvalue + kEps); + } + + cum_alpha = cum_alpha * (1 - alpha); + } + } + } + } + } + return std::make_tuple(grad_features, grad_alphas); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/norm_weighted_sum.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/norm_weighted_sum.cu new file mode 100644 index 0000000000000000000000000000000000000000..e21617d2665f75fc1768d50a8294ae748470d26e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/norm_weighted_sum.cu @@ -0,0 +1,247 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +__constant__ const float kEpsilon = 1e-4; + +// TODO(gkioxari) support all data types once AtomicAdd supports doubles. +// Currently, support is for floats only. +__global__ void weightedSumNormCudaForwardKernel( + // clang-format off + at::PackedTensorAccessor64 result, + const at::PackedTensorAccessor64 features, + const at::PackedTensorAccessor64 alphas, + const at::PackedTensorAccessor64 points_idx) { + // clang-format on + const int64_t C = features.size(0); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + + // Get the batch and index + const auto batch = blockIdx.x; + + const int num_pixels = C * H * W; + const auto num_threads = gridDim.y * blockDim.x; + const auto tid = blockIdx.y * blockDim.x + threadIdx.x; + + // Parallelize over each feature in each pixel in images of size H * W, + // for each image in the batch of size batch_size + for (int pid = tid; pid < num_pixels; pid += num_threads) { + int ch = pid / (H * W); + int j = (pid % (H * W)) / W; + int i = (pid % (H * W)) % W; + + // Store the accumulated alpha value + float cum_alpha = 0.; + // Iterate through the closest K points for this pixel + for (int k = 0; k < points_idx.size(1); ++k) { + int n_idx = points_idx[batch][k][j][i]; + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + + cum_alpha += alphas[batch][k][j][i]; + } + + if (cum_alpha < kEpsilon) { + cum_alpha = kEpsilon; + } + + // Iterate through the closest K points for this pixel + for (int k = 0; k < points_idx.size(1); ++k) { + int n_idx = points_idx[batch][k][j][i]; + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + float alpha = alphas[batch][k][j][i]; + // TODO(gkioxari) It might be more efficient to have threads write in a + // local variable, and move atomicAdd outside of the loop such that + // atomicAdd is executed once per thread. + atomicAdd( + &result[batch][ch][j][i], features[ch][n_idx] * alpha / cum_alpha); + } + } +} + +// TODO(gkioxari) support all data types once AtomicAdd supports doubles. +// Currently, support is for floats only. +__global__ void weightedSumNormCudaBackwardKernel( + // clang-format off + at::PackedTensorAccessor64 grad_features, + at::PackedTensorAccessor64 grad_alphas, + const at::PackedTensorAccessor64 grad_outputs, + const at::PackedTensorAccessor64 features, + const at::PackedTensorAccessor64 alphas, + const at::PackedTensorAccessor64 points_idx) { + // clang-format on + const int64_t C = features.size(0); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + + // Get the batch and index + const auto batch = blockIdx.x; + + const int num_pixels = C * W * H; + const auto num_threads = gridDim.y * blockDim.x; + const auto tid = blockIdx.y * blockDim.x + threadIdx.x; + + // Parallelize over each feature in each pixel in images of size H * W, + // for each image in the batch of size batch_size + for (int pid = tid; pid < num_pixels; pid += num_threads) { + int ch = pid / (H * W); + int j = (pid % (H * W)) / W; + int i = (pid % (H * W)) % W; + + float sum_alpha = 0.; + float sum_alphafs = 0.; + // Iterate through the closest K points for this pixel to calculate the + // cumulative sum of the alphas for this pixel + for (int k = 0; k < points_idx.size(1); ++k) { + int n_idx = points_idx[batch][k][j][i]; + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + + sum_alpha += alphas[batch][k][j][i]; + sum_alphafs += alphas[batch][k][j][i] * features[ch][n_idx]; + } + + if (sum_alpha < kEpsilon) { + sum_alpha = kEpsilon; + } + + // Iterate again through the closest K points for this pixel to calculate + // the gradient. + for (int k = 0; k < points_idx.size(1); ++k) { + int n_idx = points_idx[batch][k][j][i]; + + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + float alpha = alphas[batch][k][j][i]; + + // TODO(gkioxari) It might be more efficient to have threads write in a + // local variable, and move atomicAdd outside of the loop such that + // atomicAdd is executed once per thread. + atomicAdd( + &grad_alphas[batch][k][j][i], + (features[ch][n_idx] * sum_alpha - sum_alphafs) / + (sum_alpha * sum_alpha) * grad_outputs[batch][ch][j][i]); + atomicAdd( + &grad_features[ch][n_idx], + alpha * grad_outputs[batch][ch][j][i] / sum_alpha); + } + } +} + +at::Tensor weightedSumNormCudaForward( + const at::Tensor& features, + const at::Tensor& alphas, + const at::Tensor& points_idx) { + // Check inputs are on the same device + at::TensorArg features_t{features, "features", 1}, + alphas_t{alphas, "alphas", 2}, points_idx_t{points_idx, "points_idx", 3}; + at::CheckedFrom c = "weightedSumNormCudaForward"; + at::checkAllSameGPU(c, {features_t, alphas_t, points_idx_t}); + at::checkAllSameType(c, {features_t, alphas_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(features.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int64_t batch_size = points_idx.size(0); + const int64_t C = features.size(0); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + + auto result = at::zeros({batch_size, C, H, W}, features.options()); + + if (result.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return result; + } + + const dim3 threadsPerBlock(64); + const dim3 numBlocks(batch_size, 1024 / batch_size + 1); + + // TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports + // doubles. Currently, support is for floats only. + // clang-format off + weightedSumNormCudaForwardKernel<<>>( + // As we are using packed accessors here the tensors + // do not need to be made contiguous. + result.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); + // clang-format on + + AT_CUDA_CHECK(cudaGetLastError()); + return result; +} + +std::tuple weightedSumNormCudaBackward( + const at::Tensor& grad_outputs, + const at::Tensor& features, + const at::Tensor& alphas, + const at::Tensor& points_idx) { + // Check inputs are on the same device + at::TensorArg grad_outputs_t{grad_outputs, "grad_outputs", 1}, + features_t{features, "features", 2}, alphas_t{alphas, "alphas", 3}, + points_idx_t{points_idx, "points_idx", 4}; + at::CheckedFrom c = "weightedSumNormCudaBackward"; + at::checkAllSameGPU(c, {grad_outputs_t, features_t, alphas_t, points_idx_t}); + at::checkAllSameType(c, {grad_outputs_t, features_t, alphas_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(features.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + auto grad_features = at::zeros_like(features); + auto grad_alphas = at::zeros_like(alphas); + + if (grad_features.numel() == 0 || grad_alphas.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(grad_features, grad_alphas); + } + + const int64_t bs = points_idx.size(0); + + const dim3 threadsPerBlock(64); + const dim3 numBlocks(bs, 1024 / bs + 1); + + // TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports + // doubles. Currently, support is for floats only. + weightedSumNormCudaBackwardKernel<<>>( + // clang-format off + // As we are using packed accessors here the tensors + // do not need to be made contiguous. + grad_features.packed_accessor64(), + grad_alphas.packed_accessor64(), + grad_outputs.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); + // clang-format on + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(grad_features, grad_alphas); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/norm_weighted_sum.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/norm_weighted_sum.h new file mode 100644 index 0000000000000000000000000000000000000000..5f72ee2dc052eb4c368f2eb213c7a63a8d35acbd --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/norm_weighted_sum.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include "utils/pytorch3d_cutils.h" + +#include + +// Perform normalized weighted sum compositing of points in a z-buffer. +// +// Inputs: +// features: FloatTensor of shape (C, P) which gives the features +// of each point where C is the size of the feature and +// P the number of points. +// alphas: FloatTensor of shape (N, points_per_pixel, H, W) where +// points_per_pixel is the number of points in the z-buffer +// sorted in z-order, and (H, W) is the image size. +// points_idx: IntTensor of shape (N, points_per_pixel, H, W) giving the +// indices of the nearest points at each pixel, sorted in z-order. +// Returns: +// weighted_fs: FloatTensor of shape (N, C, H, W) giving the accumulated +// feature in each point. Concretely, it gives: +// weighted_fs[b,c,i,j] = sum_k alphas[b,k,i,j] * +// features[c,points_idx[b,k,i,j]] / sum_k alphas[b,k,i,j] + +// CUDA declarations +#ifdef WITH_CUDA +torch::Tensor weightedSumNormCudaForward( + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); + +std::tuple weightedSumNormCudaBackward( + const torch::Tensor& grad_outputs, + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); +#endif + +// C++ declarations +torch::Tensor weightedSumNormCpuForward( + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); + +std::tuple weightedSumNormCpuBackward( + const torch::Tensor& grad_outputs, + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); + +torch::Tensor weightedSumNormForward( + torch::Tensor& features, + torch::Tensor& alphas, + torch::Tensor& points_idx) { + features = features.contiguous(); + alphas = alphas.contiguous(); + points_idx = points_idx.contiguous(); + + if (features.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(features); + CHECK_CUDA(alphas); + CHECK_CUDA(points_idx); + + return weightedSumNormCudaForward(features, alphas, points_idx); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + CHECK_CPU(features); + CHECK_CPU(alphas); + CHECK_CPU(points_idx); + + return weightedSumNormCpuForward(features, alphas, points_idx); + } +} + +std::tuple weightedSumNormBackward( + torch::Tensor& grad_outputs, + torch::Tensor& features, + torch::Tensor& alphas, + torch::Tensor& points_idx) { + grad_outputs = grad_outputs.contiguous(); + features = features.contiguous(); + alphas = alphas.contiguous(); + points_idx = points_idx.contiguous(); + + if (grad_outputs.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(grad_outputs); + CHECK_CUDA(features); + CHECK_CUDA(alphas); + CHECK_CUDA(points_idx); + + return weightedSumNormCudaBackward( + grad_outputs, features, alphas, points_idx); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + CHECK_CPU(grad_outputs); + CHECK_CPU(features); + CHECK_CPU(alphas); + CHECK_CPU(points_idx); + + return weightedSumNormCpuBackward( + grad_outputs, features, alphas, points_idx); + } +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/norm_weighted_sum_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/norm_weighted_sum_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..840ef3d24ae652fb42384afc755c0a889543e649 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/norm_weighted_sum_cpu.cpp @@ -0,0 +1,140 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +#include +#include + +// Epsilon float +const float kEps = 1e-4; + +torch::Tensor weightedSumNormCpuForward( + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx) { + const int64_t B = points_idx.size(0); + const int64_t K = points_idx.size(1); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + const int64_t C = features.size(0); + + torch::Tensor result = torch::zeros({B, C, H, W}, features.options()); + + auto features_a = features.accessor(); + auto alphas_a = alphas.accessor(); + auto points_idx_a = points_idx.accessor(); + auto result_a = result.accessor(); + + // Iterate over the batch + for (int b = 0; b < B; ++b) { + // Iterate oer the features + for (int c = 0; c < C; ++c) { + // Iterate through the horizontal lines of the image from top to bottom + for (int j = 0; j < H; ++j) { + // Iterate over pixels in a horizontal line, left to right + for (int i = 0; i < W; ++i) { + float t_alpha = 0.; + for (int k = 0; k < K; ++k) { + int64_t n_idx = points_idx_a[b][k][j][i]; + if (n_idx < 0) { + continue; + } + + t_alpha += alphas_a[b][k][j][i]; + } + + if (t_alpha < kEps) { + t_alpha = kEps; + } + + // Iterate over the different zs to combine + for (int k = 0; k < K; ++k) { + int64_t n_idx = points_idx_a[b][k][j][i]; + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + float alpha = alphas_a[b][k][j][i]; + result_a[b][c][j][i] += alpha * features_a[c][n_idx] / t_alpha; + } + } + } + } + } + return result; +} + +std::tuple weightedSumNormCpuBackward( + const torch::Tensor& grad_outputs, + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx) { + torch::Tensor grad_features = torch::zeros_like(features); + torch::Tensor grad_alphas = torch::zeros_like(alphas); + + const int64_t B = points_idx.size(0); + const int64_t K = points_idx.size(1); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + const int64_t C = features.size(0); + + auto grad_outputs_a = grad_outputs.accessor(); + auto features_a = features.accessor(); + auto alphas_a = alphas.accessor(); + auto points_idx_a = points_idx.accessor(); + auto grad_features_a = grad_features.accessor(); + auto grad_alphas_a = grad_alphas.accessor(); + + // Iterate over the batch + for (int b = 0; b < B; ++b) { + // Iterate oer the features + for (int c = 0; c < C; ++c) { + // Iterate through the horizontal lines of the image from top to bottom + for (int j = 0; j < H; ++j) { + // Iterate over pixels in a horizontal line, left to right + for (int i = 0; i < W; ++i) { + float t_alpha = 0.; + float t_alphafs = 0.; + // Iterate through the closest K points for this pixel + for (int k = 0; k < K; ++k) { + int64_t n_idx = points_idx_a[b][k][j][i]; + // Sentinel value is -1, indicating no point overlaps this pixel + if (n_idx < 0) { + continue; + } + + t_alpha += alphas_a[b][k][j][i]; + t_alphafs += alphas_a[b][k][j][i] * features_a[c][n_idx]; + } + + if (t_alpha < kEps) { + t_alpha = kEps; + } + + // Iterate through the closest K points for this pixel ordered by z + // distance. + for (int k = 0; k < K; ++k) { + int64_t n_idx = points_idx_a[b][k][j][i]; + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + float alpha = alphas_a[b][k][j][i]; + grad_alphas_a[b][k][j][i] += grad_outputs_a[b][c][j][i] * + (features_a[c][n_idx] * t_alpha - t_alphafs) / + (t_alpha * t_alpha); + grad_features_a[c][n_idx] += + grad_outputs_a[b][c][j][i] * alpha / t_alpha; + } + } + } + } + } + return std::make_tuple(grad_features, grad_alphas); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/weighted_sum.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/weighted_sum.cu new file mode 100644 index 0000000000000000000000000000000000000000..2e0904e760d57d8df8ec67a889febde9ccfdc9eb --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/weighted_sum.cu @@ -0,0 +1,205 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +#include +#include + +#include +#include + +// TODO(gkioxari) support all data types once AtomicAdd supports doubles. +// Currently, support is for floats only. +__global__ void weightedSumCudaForwardKernel( + // clang-format off + at::PackedTensorAccessor64 result, + const at::PackedTensorAccessor64 features, + const at::PackedTensorAccessor64 alphas, + const at::PackedTensorAccessor64 points_idx) { + // clang-format on + const int64_t C = features.size(0); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + + // Get the batch and index + const auto batch = blockIdx.x; + + const int num_pixels = C * H * W; + const auto num_threads = gridDim.y * blockDim.x; + const auto tid = blockIdx.y * blockDim.x + threadIdx.x; + + // Parallelize over each feature in each pixel in images of size H * W, + // for each image in the batch of size batch_size + for (int pid = tid; pid < num_pixels; pid += num_threads) { + int ch = pid / (H * W); + int j = (pid % (H * W)) / W; + int i = (pid % (H * W)) % W; + + // Iterate through the closest K points for this pixel + for (int k = 0; k < points_idx.size(1); ++k) { + int n_idx = points_idx[batch][k][j][i]; + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + + // Accumulate the values + float alpha = alphas[batch][k][j][i]; + // TODO(gkioxari) It might be more efficient to have threads write in a + // local variable, and move atomicAdd outside of the loop such that + // atomicAdd is executed once per thread. + atomicAdd(&result[batch][ch][j][i], features[ch][n_idx] * alpha); + } + } +} + +// TODO(gkioxari) support all data types once AtomicAdd supports doubles. +// Currently, support is for floats only. +__global__ void weightedSumCudaBackwardKernel( + // clang-format off + at::PackedTensorAccessor64 grad_features, + at::PackedTensorAccessor64 grad_alphas, + const at::PackedTensorAccessor64 grad_outputs, + const at::PackedTensorAccessor64 features, + const at::PackedTensorAccessor64 alphas, + const at::PackedTensorAccessor64 points_idx) { + // clang-format on + const int64_t C = features.size(0); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + + // Get the batch and index + const auto batch = blockIdx.x; + + const int num_pixels = C * H * W; + const auto num_threads = gridDim.y * blockDim.x; + const auto tid = blockIdx.y * blockDim.x + threadIdx.x; + + // Iterate over each pixel to compute the contribution to the + // gradient for the features and weights + for (int pid = tid; pid < num_pixels; pid += num_threads) { + int ch = pid / (H * W); + int j = (pid % (H * W)) / W; + int i = (pid % (H * W)) % W; + + // Iterate through the closest K points for this pixel + for (int k = 0; k < points_idx.size(1); ++k) { + int n_idx = points_idx[batch][k][j][i]; + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + float alpha = alphas[batch][k][j][i]; + + // TODO(gkioxari) It might be more efficient to have threads write in a + // local variable, and move atomicAdd outside of the loop such that + // atomicAdd is executed once per thread. + atomicAdd( + &grad_alphas[batch][k][j][i], + features[ch][n_idx] * grad_outputs[batch][ch][j][i]); + atomicAdd( + &grad_features[ch][n_idx], alpha * grad_outputs[batch][ch][j][i]); + } + } +} + +at::Tensor weightedSumCudaForward( + const at::Tensor& features, + const at::Tensor& alphas, + const at::Tensor& points_idx) { + // Check inputs are on the same device + at::TensorArg features_t{features, "features", 1}, + alphas_t{alphas, "alphas", 2}, points_idx_t{points_idx, "points_idx", 3}; + at::CheckedFrom c = "weightedSumCudaForward"; + at::checkAllSameGPU(c, {features_t, alphas_t, points_idx_t}); + at::checkAllSameType(c, {features_t, alphas_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(features.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int64_t batch_size = points_idx.size(0); + const int64_t C = features.size(0); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + + auto result = at::zeros({batch_size, C, H, W}, features.options()); + + if (result.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return result; + } + + const dim3 threadsPerBlock(64); + const dim3 numBlocks(batch_size, 1024 / batch_size + 1); + + // TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports + // doubles. Currently, support is for floats only. + weightedSumCudaForwardKernel<<>>( + // clang-format off + // As we are using packed accessors here the tensors + // do not need to be made contiguous. + result.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); + // clang-format on + AT_CUDA_CHECK(cudaGetLastError()); + return result; +} + +std::tuple weightedSumCudaBackward( + const at::Tensor& grad_outputs, + const at::Tensor& features, + const at::Tensor& alphas, + const at::Tensor& points_idx) { + // Check inputs are on the same device + at::TensorArg grad_outputs_t{grad_outputs, "grad_outputs", 1}, + features_t{features, "features", 2}, alphas_t{alphas, "alphas", 3}, + points_idx_t{points_idx, "points_idx", 4}; + at::CheckedFrom c = "weightedSumCudaBackward"; + at::checkAllSameGPU(c, {grad_outputs_t, features_t, alphas_t, points_idx_t}); + at::checkAllSameType(c, {grad_outputs_t, features_t, alphas_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(features.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + auto grad_features = at::zeros_like(features); + auto grad_alphas = at::zeros_like(alphas); + + if (grad_features.numel() == 0 || grad_alphas.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(grad_features, grad_alphas); + } + + const int64_t bs = points_idx.size(0); + + const dim3 threadsPerBlock(64); + const dim3 numBlocks(bs, 1024 / bs + 1); + + // TODO(gkioxari) add AT_DISPATCH_FLOATING_TYPES once atomicAdd supports + // doubles. Currently, support is for floats only. + weightedSumCudaBackwardKernel<<>>( + // clang-format off + // As we are using packed accessors here the tensors + // do not need to be made contiguous. + grad_features.packed_accessor64(), + grad_alphas.packed_accessor64(), + grad_outputs.packed_accessor64(), + features.packed_accessor64(), + alphas.packed_accessor64(), + points_idx.packed_accessor64()); + // clang-format on + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(grad_features, grad_alphas); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/weighted_sum.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/weighted_sum.h new file mode 100644 index 0000000000000000000000000000000000000000..cdc3fdf56531a3b84b0f71f7ce4aa9966b114a2c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/weighted_sum.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include "utils/pytorch3d_cutils.h" + +#include + +// Perform weighted sum compositing of points in a z-buffer. +// +// Inputs: +// features: FloatTensor of shape (C, P) which gives the features +// of each point where C is the size of the feature and +// P the number of points. +// alphas: FloatTensor of shape (N, points_per_pixel, H, W) where +// points_per_pixel is the number of points in the z-buffer +// sorted in z-order, and (H, W) is the image size. +// points_idx: IntTensor of shape (N, points_per_pixel, W, W) giving the +// indices of the nearest points at each pixel, sorted in z-order. +// Returns: +// weighted_fs: FloatTensor of shape (N, C, H, W) giving the accumulated +// feature in each point. Concretely, it gives: +// weighted_fs[b,c,i,j] = sum_k alphas[b,k,i,j] * +// features[c,points_idx[b,k,i,j]] + +// CUDA declarations +#ifdef WITH_CUDA +torch::Tensor weightedSumCudaForward( + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); + +std::tuple weightedSumCudaBackward( + const torch::Tensor& grad_outputs, + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); +#endif + +// C++ declarations +torch::Tensor weightedSumCpuForward( + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); + +std::tuple weightedSumCpuBackward( + const torch::Tensor& grad_outputs, + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx); + +torch::Tensor weightedSumForward( + torch::Tensor& features, + torch::Tensor& alphas, + torch::Tensor& points_idx) { + features = features.contiguous(); + alphas = alphas.contiguous(); + points_idx = points_idx.contiguous(); + + if (features.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(features); + CHECK_CUDA(alphas); + CHECK_CUDA(points_idx); + return weightedSumCudaForward(features, alphas, points_idx); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + CHECK_CPU(features); + CHECK_CPU(alphas); + CHECK_CPU(points_idx); + return weightedSumCpuForward(features, alphas, points_idx); + } +} + +std::tuple weightedSumBackward( + torch::Tensor& grad_outputs, + torch::Tensor& features, + torch::Tensor& alphas, + torch::Tensor& points_idx) { + grad_outputs = grad_outputs.contiguous(); + features = features.contiguous(); + alphas = alphas.contiguous(); + points_idx = points_idx.contiguous(); + + if (grad_outputs.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(grad_outputs); + CHECK_CUDA(features); + CHECK_CUDA(alphas); + CHECK_CUDA(points_idx); + + return weightedSumCudaBackward(grad_outputs, features, alphas, points_idx); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + CHECK_CPU(grad_outputs); + CHECK_CPU(features); + CHECK_CPU(alphas); + CHECK_CPU(points_idx); + + return weightedSumCpuBackward(grad_outputs, features, alphas, points_idx); + } +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/weighted_sum_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/weighted_sum_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b7bddee3c9791647352b686d368dd2e6adccf27f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/compositing/weighted_sum_cpu.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +#include +#include + +torch::Tensor weightedSumCpuForward( + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx) { + const int64_t B = points_idx.size(0); + const int64_t K = points_idx.size(1); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + const int64_t C = features.size(0); + + torch::Tensor result = torch::zeros({B, C, H, W}, features.options()); + + auto features_a = features.accessor(); + auto alphas_a = alphas.accessor(); + auto points_idx_a = points_idx.accessor(); + auto result_a = result.accessor(); + + // Iterate over the batch + for (int b = 0; b < B; ++b) { + // Iterate over the features + for (int c = 0; c < C; ++c) { + // Iterate through the horizontal lines of the image from top to bottom + for (int j = 0; j < H; ++j) { + // Iterate over pixels in a horizontal line, left to right + for (int i = 0; i < W; ++i) { + // Iterate through the closest K points for this pixel + for (int k = 0; k < K; ++k) { + int64_t n_idx = points_idx_a[b][k][j][i]; + // Sentinel value is -1 indicating no point overlaps the pixel + if (n_idx < 0) { + continue; + } + + float alpha = alphas_a[b][k][j][i]; + result_a[b][c][j][i] += alpha * features_a[c][n_idx]; + } + } + } + } + } + return result; +} + +std::tuple weightedSumCpuBackward( + const torch::Tensor& grad_outputs, + const torch::Tensor& features, + const torch::Tensor& alphas, + const torch::Tensor& points_idx) { + const int64_t B = points_idx.size(0); + const int64_t K = points_idx.size(1); + const int64_t H = points_idx.size(2); + const int64_t W = points_idx.size(3); + const int64_t C = features.size(0); + + torch::Tensor grad_features = torch::zeros_like(features); + torch::Tensor grad_alphas = torch::zeros_like(alphas); + + auto grad_outputs_a = grad_outputs.accessor(); + auto features_a = features.accessor(); + auto alphas_a = alphas.accessor(); + auto points_idx_a = points_idx.accessor(); + auto grad_features_a = grad_features.accessor(); + auto grad_alphas_a = grad_alphas.accessor(); + + // Iterate over the batch + for (int b = 0; b < B; ++b) { + // Iterate oer the features + for (int c = 0; c < C; ++c) { + // Iterate through the horizontal lines of the image from top to bottom + for (int j = 0; j < H; ++j) { + // Iterate over pixels in a horizontal line, left to right + for (int i = 0; i < W; ++i) { + // Iterate through the closest K points for this pixel + for (int k = 0; k < K; ++k) { + int64_t n_idx = points_idx_a[b][k][j][i]; + // Sentinal value is -1, indicating no point overlaps this pixel + if (n_idx < 0) { + continue; + } + + float alpha = alphas_a[b][k][j][i]; + grad_alphas_a[b][k][j][i] += + grad_outputs_a[b][c][j][i] * features_a[c][n_idx]; + grad_features_a[c][n_idx] += grad_outputs_a[b][c][j][i] * alpha; + } + } + } + } + } + return std::make_tuple(grad_features, grad_alphas); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.cu new file mode 100644 index 0000000000000000000000000000000000000000..d4affd4ba3f006b99d9b0fa3e5ae1d3ac3ee45c1 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.cu @@ -0,0 +1,91 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +// TODO(T47953967) to make this cuda kernel support all datatypes. +__global__ void GatherScatterCudaKernel( + const float* __restrict__ input, + const int64_t* __restrict__ edges, + float* __restrict__ output, + bool directed, + bool backward, + const size_t V, + const size_t D, + const size_t E) { + const auto tid = threadIdx.x; + + // Reverse the vertex order if backward. + const int v0_idx = backward ? 1 : 0; + const int v1_idx = backward ? 0 : 1; + + // Edges are split evenly across the blocks. + for (auto e = blockIdx.x; e < E; e += gridDim.x) { + // Get indices of vertices which form the edge. + const int64_t v0 = edges[2 * e + v0_idx]; + const int64_t v1 = edges[2 * e + v1_idx]; + + // Split vertex features evenly across threads. + // This implementation will be quite wasteful when D<128 since there will be + // a lot of threads doing nothing. + for (auto d = tid; d < D; d += blockDim.x) { + const float val = input[v1 * D + d]; + float* address = output + v0 * D + d; + atomicAdd(address, val); + if (!directed) { + const float val = input[v0 * D + d]; + float* address = output + v1 * D + d; + atomicAdd(address, val); + } + } + __syncthreads(); + } +} + +at::Tensor GatherScatterCuda( + const at::Tensor& input, + const at::Tensor& edges, + bool directed, + bool backward) { + // Check inputs are on the same device + at::TensorArg input_t{input, "input", 1}, edges_t{edges, "edges", 2}; + at::CheckedFrom c = "GatherScatterCuda"; + at::checkAllSameGPU(c, {input_t, edges_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(input.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const auto num_vertices = input.size(0); + const auto input_feature_dim = input.size(1); + const auto num_edges = edges.size(0); + + auto output = at::zeros({num_vertices, input_feature_dim}, input.options()); + const size_t threads = 128; + const size_t max_blocks = 1920; + const size_t blocks = num_edges < max_blocks ? num_edges : max_blocks; + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return output; + } + + GatherScatterCudaKernel<<>>( + input.contiguous().data_ptr(), + edges.contiguous().data_ptr(), + output.data_ptr(), + directed, + backward, + num_vertices, + input_feature_dim, + num_edges); + AT_CUDA_CHECK(cudaGetLastError()); + return output; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.h new file mode 100644 index 0000000000000000000000000000000000000000..21d5f7b15f2651d20050702c071da5c5d081f449 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include "utils/pytorch3d_cutils.h" + +// Fused gather scatter operation for aggregating features of neighbor nodes +// in a graph. This gather scatter operation is specific to graphs as edge +// indices are used as input. +// +// Args: +// input: float32 Tensor of shape (V, D) where V is the number of vertices +// and D is the feature dimension. +// edges: int64 Tensor of shape (E, 2) giving the indices of the vertices that +// make up the edge. E is the number of edges. +// directed: Bool indicating if edges in the graph are directed. For a +// directed graph v0 -> v1 the updated feature for v0 depends on v1. +// backward: Bool indicating if the operation is the backward pass. +// +// Returns: +// output: float32 Tensor of same shape as input. + +at::Tensor GatherScatterCuda( + const at::Tensor& input, + const at::Tensor& edges, + bool directed, + bool backward); + +at::Tensor GatherScatterCpu( + const at::Tensor& input, + const at::Tensor& edges, + bool directed, + bool backward); + +// Exposed implementation. +at::Tensor GatherScatter( + const at::Tensor& input, + const at::Tensor& edges, + bool directed, + bool backward) { + if (input.is_cuda() && edges.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(input); + CHECK_CUDA(edges); + return GatherScatterCuda(input, edges, directed, backward); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(input); + CHECK_CPU(edges); + return GatherScatterCpu(input, edges, directed, backward); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8511e125519cf50f6b538da1adc33b39e4b16171 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/gather_scatter/gather_scatter_cpu.cpp @@ -0,0 +1,41 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +at::Tensor GatherScatterCpu( + const at::Tensor& input, + const at::Tensor& edges, + bool directed, + bool backward) { + const auto num_vertices = input.size(0); + const auto input_feature_dim = input.size(1); + const auto num_edges = edges.size(0); + + auto output = at::zeros({num_vertices, input_feature_dim}, input.options()); + + auto input_a = input.accessor(); + auto edges_a = edges.accessor(); + auto output_a = output.accessor(); + const int v0_idx = backward ? 1 : 0; + const int v1_idx = backward ? 0 : 1; + + for (int e = 0; e < num_edges; ++e) { + // Get indices of vertices which form the edge. + const int64_t v0 = edges_a[e][v0_idx]; + const int64_t v1 = edges_a[e][v1_idx]; + + for (int d = 0; d < input_feature_dim; ++d) { + output_a[v0][d] += input_a[v1][d]; + if (!directed) { + output_a[v1][d] += input_a[v0][d]; + } + } + } + return output; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/interp_face_attrs/interp_face_attrs.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/interp_face_attrs/interp_face_attrs.cu new file mode 100644 index 0000000000000000000000000000000000000000..8fe292ae4ec93a17243c7551fc0c4cf8452bbb92 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/interp_face_attrs/interp_face_attrs.cu @@ -0,0 +1,170 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include + +template +__global__ void InterpFaceAttrsForwardKernel( + const int64_t* __restrict__ pix_to_face, // (P,) + const scalar_t* __restrict__ barycentric_coords, // (P, 3) + const scalar_t* __restrict__ face_attrs, // (F, 3, D) + scalar_t* pix_attrs, // (P, D) + const size_t P, + const size_t F, + const size_t D) { + const auto tid = threadIdx.x + blockIdx.x * blockDim.x; + const auto num_threads = blockDim.x * gridDim.x; + for (int pd = tid; pd < P * D; pd += num_threads) { + const int p = pd / D; + const int d = pd % D; + const int64_t f = pix_to_face[p]; + if (f < 0) { + continue; + } + scalar_t pix_attr = 0.0; + for (int i = 0; i < 3; ++i) { + scalar_t weight = barycentric_coords[p * 3 + i]; + scalar_t vert_attr = face_attrs[f * 3 * D + i * D + d]; + pix_attr += weight * vert_attr; + } + pix_attrs[p * D + d] = pix_attr; + } +} + +at::Tensor InterpFaceAttrsForwardCuda( + const at::Tensor& pix_to_face, + const at::Tensor& barycentric_coords, + const at::Tensor& face_attrs) { + // Make sure all inputs are on the same device + at::TensorArg pix_to_face_t{pix_to_face, "pix_to_face", 1}, + barycentric_coords_t{barycentric_coords, "barycentric_coords", 2}, + face_attrs_t{face_attrs, "face_attributes", 3}; + at::CheckedFrom c = "InterpFaceAttrsForwardCuda"; + at::checkAllSameGPU(c, {pix_to_face_t, barycentric_coords_t, face_attrs_t}); + at::checkAllSameType(c, {barycentric_coords_t, face_attrs_t}); + + // Set the device for the kernel launch based on the input + at::cuda::CUDAGuard device_guard(pix_to_face.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const auto P = pix_to_face.size(0); + const auto F = face_attrs.size(0); + const auto D = face_attrs.size(2); + + TORCH_CHECK( + barycentric_coords.size(0) == P && barycentric_coords.size(1) == 3, + "barycentric_coords must have size (P, 3)"); + TORCH_CHECK(face_attrs.size(1) == 3, "face_attrs must have size (F, 3, D)"); + + auto pix_attrs = at::zeros({P, D}, face_attrs.options()); + const int threads = 1024; + const int blocks = 512; + AT_DISPATCH_FLOATING_TYPES( + face_attrs.scalar_type(), "interp_face_attrs_cuda", ([&] { + InterpFaceAttrsForwardKernel<<>>( + pix_to_face.contiguous().data_ptr(), + barycentric_coords.contiguous().data_ptr(), + face_attrs.contiguous().data_ptr(), + pix_attrs.contiguous().data_ptr(), + P, + F, + D); + })); + AT_CUDA_CHECK(cudaGetLastError()); + return pix_attrs; +} + +template +__global__ void InterpFaceAttrsBackwardKernel( + const int64_t* __restrict__ pix_to_face, // (P,) + const scalar_t* __restrict__ barycentric_coords, // (P, 3) + const scalar_t* __restrict__ face_attrs, // (F, 3, D) + const scalar_t* __restrict__ grad_pix_attrs, // (P, D) + scalar_t* __restrict__ grad_barycentric_coords, // (P, 3) + scalar_t* __restrict__ grad_face_attrs, // (F, 3, D) + const size_t P, + const size_t F, + const size_t D) { + const auto tid = threadIdx.x + blockIdx.x * blockDim.x; + const auto num_threads = blockDim.x * gridDim.x; + for (int pd = tid; pd < P * D; pd += num_threads) { + const int p = pd / D; + const int d = pd % D; + const int64_t f = pix_to_face[p]; + if (f < 0) { + continue; + } + scalar_t upstream_grad = grad_pix_attrs[p * D + d]; + for (int i = 0; i < 3; ++i) { + scalar_t weight = barycentric_coords[p * 3 + i]; + scalar_t vert_attr = face_attrs[f * 3 * D + i * D + d]; + scalar_t grad_bary_down = vert_attr * upstream_grad; + scalar_t grad_face_down = weight * upstream_grad; + atomicAdd(grad_barycentric_coords + p * 3 + i, grad_bary_down); + atomicAdd(grad_face_attrs + f * 3 * D + i * D + d, grad_face_down); + } + } +} + +std::tuple InterpFaceAttrsBackwardCuda( + const at::Tensor& pix_to_face, + const at::Tensor& barycentric_coords, + const at::Tensor& face_attrs, + const at::Tensor& grad_pix_attrs) { + // Make sure all inputs are on the same device + at::TensorArg pix_to_face_t{pix_to_face, "pix_to_face", 1}, + barycentric_coords_t{barycentric_coords, "barycentric_coords", 2}, + face_attrs_t{face_attrs, "face_attributes", 3}, + grad_pix_attrs_t{grad_pix_attrs, "pix_attrs", 4}; + at::CheckedFrom c = "InterpFaceAttrsBackwarduda"; + at::checkAllSameGPU( + c, {pix_to_face_t, barycentric_coords_t, face_attrs_t, grad_pix_attrs_t}); + at::checkAllSameType( + c, {barycentric_coords_t, face_attrs_t, grad_pix_attrs_t}); + + // This is nondeterministic because atomicAdd + at::globalContext().alertNotDeterministic("InterpFaceAttrsBackwardCuda"); + + // Set the device for the kernel launch based on the input + at::cuda::CUDAGuard device_guard(pix_to_face.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const auto P = pix_to_face.size(0); + const auto F = face_attrs.size(0); + const auto D = face_attrs.size(2); + + TORCH_CHECK( + barycentric_coords.size(0) == P && barycentric_coords.size(1) == 3, + "barycentric_coords must have size (P, 3)"); + TORCH_CHECK(face_attrs.size(1) == 3, "face_attrs must have size (F, 3, D)"); + TORCH_CHECK( + grad_pix_attrs.size(0) == P && grad_pix_attrs.size(1) == D, + "grad_pix_attrs must have size (P, D)"); + + auto grad_barycentric_coords = at::zeros_like(barycentric_coords); + auto grad_face_attrs = at::zeros_like(face_attrs); + const int threads = 1024; + const int blocks = 512; + // Only allow float for now. + // TODO: Add support for double once we fix atomicAdd + // clang-format off + InterpFaceAttrsBackwardKernel<<>>( + pix_to_face.contiguous().data_ptr(), + barycentric_coords.contiguous().data_ptr(), + face_attrs.contiguous().data_ptr(), + grad_pix_attrs.contiguous().data_ptr(), + grad_barycentric_coords.contiguous().data_ptr(), + grad_face_attrs.contiguous().data_ptr(), + P, F, D); + AT_CUDA_CHECK(cudaGetLastError()); + // clang-format on + return std::make_tuple(grad_barycentric_coords, grad_face_attrs); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/interp_face_attrs/interp_face_attrs.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/interp_face_attrs/interp_face_attrs.h new file mode 100644 index 0000000000000000000000000000000000000000..42f932301ce3da34c0e00db624b0ce06d11bc969 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/interp_face_attrs/interp_face_attrs.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include "utils/pytorch3d_cutils.h" + +// Interpolates per-face attributes (forward pass) +// +// Inputs: +// pix_to_face: LongTensor of shape (P,) giving a face index for each pixel. +// Each element should be < F, the total number of faces. +// Face indices < 0 indicate that the pixel is not covered by a face. +// barycentric_coords: FloatTensor of shape (P, 3) giving barycentric coords. +// face_attrs: FloatTensor of shape (F, 3, D) giving a D-dimensional +// value for each vertex of each face. +// +// Returns: +// pix_attributes: FloatTensor of shape (P, D) giving an interpolated value +// for each pixel. + +// CPU implementation +at::Tensor InterpFaceAttrsForwardCpu( + const at::Tensor& pix_to_face, + const at::Tensor& barycentric_coords, + const at::Tensor& face_attrs) { + AT_ERROR("Not Implemented"); + return pix_to_face; +} + +#ifdef WITH_CUDA +// Cuda implementation. +at::Tensor InterpFaceAttrsForwardCuda( + const at::Tensor& pix_to_face, + const at::Tensor& barycentric_coords, + const at::Tensor& face_attrs); +#endif + +// General implementation +at::Tensor InterpFaceAttrsForward( + const at::Tensor& pix_to_face, + const at::Tensor& barycentric_coords, + const at::Tensor& face_attrs) { + if (pix_to_face.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(face_attrs); + CHECK_CUDA(barycentric_coords); + return InterpFaceAttrsForwardCuda( + pix_to_face, barycentric_coords, face_attrs); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(face_attrs); + CHECK_CPU(barycentric_coords); + return InterpFaceAttrsForwardCpu(pix_to_face, barycentric_coords, face_attrs); +} + +// Interpolates per-face attributes (backward pass) +// +// Inputs: +// pix_to_face: LongTensor of shape (P,) giving a face index for each pixel. +// Each element should be < F, the total number of faces. +// Face indices < 0 indicate that the pixel is not covered by a face. +// barycentric_coords: FloatTensor of shape (P, 3) giving barycentric coords. +// face_attrs: FloatTensor of shape (F, 3, D) giving a D-dimensional +// value for each vertex of each face. +// grad_pix_attrs: Upstream gradients of shape (P, D) +// +// Returns a tuple of: +// grad_barycentric_coords: FloatTensor of shape (P, 3) +// grad_face_attrs: FloatTensor of shape (F, 3, D) + +std::tuple InterpFaceAttrsBackwardCpu( + const at::Tensor& pix_to_face, + const at::Tensor& barycentric_coords, + const at::Tensor& face_attrs, + const at::Tensor& grad_pix_attrs) { + AT_ERROR("Not Implemented"); + return std::make_tuple(pix_to_face, pix_to_face); +} + +std::tuple InterpFaceAttrsBackwardCuda( + const at::Tensor& pix_to_face, + const at::Tensor& barycentric_coords, + const at::Tensor& face_attrs, + const at::Tensor& grad_pix_attrs); + +std::tuple InterpFaceAttrsBackward( + const at::Tensor& pix_to_face, + const at::Tensor& barycentric_coords, + const at::Tensor& face_attrs, + const at::Tensor& grad_pix_attrs) { + if (pix_to_face.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(face_attrs); + CHECK_CUDA(barycentric_coords); + CHECK_CUDA(grad_pix_attrs); + return InterpFaceAttrsBackwardCuda( + pix_to_face, barycentric_coords, face_attrs, grad_pix_attrs); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(face_attrs); + CHECK_CPU(barycentric_coords); + CHECK_CPU(grad_pix_attrs); + return InterpFaceAttrsBackwardCpu( + pix_to_face, barycentric_coords, face_attrs, grad_pix_attrs); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_box3d.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_box3d.cu new file mode 100644 index 0000000000000000000000000000000000000000..a315550f639ba9353016d8012db453f6d952a5b0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_box3d.cu @@ -0,0 +1,175 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include "iou_box3d/iou_utils.cuh" + +// Parallelize over N*M computations which can each be done +// independently +__global__ void IoUBox3DKernel( + const at::PackedTensorAccessor64 boxes1, + const at::PackedTensorAccessor64 boxes2, + at::PackedTensorAccessor64 vols, + at::PackedTensorAccessor64 ious) { + const size_t N = boxes1.size(0); + const size_t M = boxes2.size(0); + + const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; + const size_t stride = gridDim.x * blockDim.x; + + FaceVerts box1_tris[NUM_TRIS]; + FaceVerts box2_tris[NUM_TRIS]; + FaceVerts box1_planes[NUM_PLANES]; + FaceVerts box2_planes[NUM_PLANES]; + + for (size_t i = tid; i < N * M; i += stride) { + const size_t n = i / M; // box1 index + const size_t m = i % M; // box2 index + + // Convert to array of structs of face vertices i.e. effectively (F, 3, 3) + // FaceVerts is a data type defined in iou_utils.cuh + GetBoxTris(boxes1[n], box1_tris); + GetBoxTris(boxes2[m], box2_tris); + + // Calculate the position of the center of the box which is used in + // several calculations. This requires a tensor as input. + const float3 box1_center = BoxCenter(boxes1[n]); + const float3 box2_center = BoxCenter(boxes2[m]); + + // Convert to an array of face vertices + GetBoxPlanes(boxes1[n], box1_planes); + GetBoxPlanes(boxes2[m], box2_planes); + + // Get Box Volumes + const float box1_vol = BoxVolume(box1_tris, box1_center, NUM_TRIS); + const float box2_vol = BoxVolume(box2_tris, box2_center, NUM_TRIS); + + // Tris in Box1 intersection with Planes in Box2 + // Initialize box1 intersecting faces. MAX_TRIS is the + // max faces possible in the intersecting shape. + // TODO: determine if the value of MAX_TRIS is sufficient or + // if we should store the max tris for each NxM computation + // and throw an error if any exceeds the max. + FaceVerts box1_intersect[MAX_TRIS]; + for (int j = 0; j < NUM_TRIS; ++j) { + // Initialize the faces from the box + box1_intersect[j] = box1_tris[j]; + } + // Get the count of the actual number of faces in the intersecting shape + int box1_count = BoxIntersections(box2_planes, box2_center, box1_intersect); + + // Tris in Box2 intersection with Planes in Box1 + FaceVerts box2_intersect[MAX_TRIS]; + for (int j = 0; j < NUM_TRIS; ++j) { + box2_intersect[j] = box2_tris[j]; + } + const int box2_count = + BoxIntersections(box1_planes, box1_center, box2_intersect); + + // If there are overlapping regions in Box2, remove any coplanar faces + if (box2_count > 0) { + // Identify if any triangles in Box2 are coplanar with Box1 + Keep tri2_keep[MAX_TRIS]; + for (int j = 0; j < MAX_TRIS; ++j) { + // Initialize the valid faces to be true + tri2_keep[j].keep = j < box2_count ? true : false; + } + for (int b1 = 0; b1 < box1_count; ++b1) { + for (int b2 = 0; b2 < box2_count; ++b2) { + const bool is_coplanar = + IsCoplanarTriTri(box1_intersect[b1], box2_intersect[b2]); + const float area = FaceArea(box1_intersect[b1]); + if ((is_coplanar) && (area > aEpsilon)) { + tri2_keep[b2].keep = false; + } + } + } + + // Keep only the non coplanar triangles in Box2 - add them to the + // Box1 triangles. + for (int b2 = 0; b2 < box2_count; ++b2) { + if (tri2_keep[b2].keep) { + box1_intersect[box1_count] = box2_intersect[b2]; + // box1_count will determine the total faces in the + // intersecting shape + box1_count++; + } + } + } + + // Initialize the vol and iou to 0.0 in case there are no triangles + // in the intersecting shape. + float vol = 0.0; + float iou = 0.0; + + // If there are triangles in the intersecting shape + if (box1_count > 0) { + // The intersecting shape is a polyhedron made up of the + // triangular faces that are all now in box1_intersect. + // Calculate the polyhedron center + const float3 poly_center = PolyhedronCenter(box1_intersect, box1_count); + // Compute intersecting polyhedron volume + vol = BoxVolume(box1_intersect, poly_center, box1_count); + // Compute IoU + iou = vol / (box1_vol + box2_vol - vol); + } + + // Write the volume and IoU to global memory + vols[n][m] = vol; + ious[n][m] = iou; + } +} + +std::tuple IoUBox3DCuda( + const at::Tensor& boxes1, // (N, 8, 3) + const at::Tensor& boxes2) { // (M, 8, 3) + // Check inputs are on the same device + at::TensorArg boxes1_t{boxes1, "boxes1", 1}, boxes2_t{boxes2, "boxes2", 2}; + at::CheckedFrom c = "IoUBox3DCuda"; + at::checkAllSameGPU(c, {boxes1_t, boxes2_t}); + at::checkAllSameType(c, {boxes1_t, boxes2_t}); + + // Set the device for the kernel launch based on the device of boxes1 + at::cuda::CUDAGuard device_guard(boxes1.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + TORCH_CHECK(boxes2.size(2) == boxes1.size(2), "Boxes must have shape (8, 3)"); + + TORCH_CHECK( + (boxes2.size(1) == 8) && (boxes1.size(1) == 8), + "Boxes must have shape (8, 3)"); + + const int64_t N = boxes1.size(0); + const int64_t M = boxes2.size(0); + + auto vols = at::zeros({N, M}, boxes1.options()); + auto ious = at::zeros({N, M}, boxes1.options()); + + if (vols.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(vols, ious); + } + + const size_t blocks = 512; + const size_t threads = 256; + + IoUBox3DKernel<<>>( + boxes1.packed_accessor64(), + boxes2.packed_accessor64(), + vols.packed_accessor64(), + ious.packed_accessor64()); + + AT_CUDA_CHECK(cudaGetLastError()); + + return std::make_tuple(vols, ious); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_box3d.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_box3d.h new file mode 100644 index 0000000000000000000000000000000000000000..d5034b3d5e58817edbb55975ca5d3751a50f3d78 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_box3d.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include "utils/pytorch3d_cutils.h" + +// Calculate the intersection volume and IoU metric for two batches of boxes +// +// Args: +// boxes1: tensor of shape (N, 8, 3) of the coordinates of the 1st boxes +// boxes2: tensor of shape (M, 8, 3) of the coordinates of the 2nd boxes +// Returns: +// vol: (N, M) tensor of the volume of the intersecting convex shapes +// iou: (N, M) tensor of the intersection over union which is +// defined as: `iou = vol / (vol1 + vol2 - vol)` + +// CPU implementation +std::tuple IoUBox3DCpu( + const at::Tensor& boxes1, + const at::Tensor& boxes2); + +// CUDA implementation +std::tuple IoUBox3DCuda( + const at::Tensor& boxes1, + const at::Tensor& boxes2); + +// Implementation which is exposed +inline std::tuple IoUBox3D( + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + if (boxes1.is_cuda() || boxes2.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(boxes1); + CHECK_CUDA(boxes2); + return IoUBox3DCuda(boxes1.contiguous(), boxes2.contiguous()); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(boxes1); + CHECK_CPU(boxes2); + return IoUBox3DCpu(boxes1.contiguous(), boxes2.contiguous()); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_box3d_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_box3d_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..957d3acc422c1cda57c9425e327a7f2c8cb32681 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_box3d_cpu.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include "iou_box3d/iou_utils.h" + +std::tuple IoUBox3DCpu( + const at::Tensor& boxes1, + const at::Tensor& boxes2) { + const int N = boxes1.size(0); + const int M = boxes2.size(0); + auto float_opts = boxes1.options().dtype(torch::kFloat32); + torch::Tensor vols = torch::zeros({N, M}, float_opts); + torch::Tensor ious = torch::zeros({N, M}, float_opts); + + // Create tensor accessors + auto boxes1_a = boxes1.accessor(); + auto boxes2_a = boxes2.accessor(); + auto vols_a = vols.accessor(); + auto ious_a = ious.accessor(); + + // Iterate through the N boxes in boxes1 + for (int n = 0; n < N; ++n) { + const auto& box1 = boxes1_a[n]; + // Convert to vector of face vertices i.e. effectively (F, 3, 3) + // face_verts is a data type defined in iou_utils.h + const face_verts box1_tris = GetBoxTris(box1); + + // Calculate the position of the center of the box which is used in + // several calculations. This requires a tensor as input. + const vec3 box1_center = BoxCenter(boxes1[n]); + + // Convert to vector of face vertices i.e. effectively (P, 4, 3) + const face_verts box1_planes = GetBoxPlanes(box1); + + // Get Box Volumes + const float box1_vol = BoxVolume(box1_tris, box1_center); + + // Iterate through the M boxes in boxes2 + for (int m = 0; m < M; ++m) { + // Repeat above steps for box2 + // TODO: check if caching these value helps performance. + const auto& box2 = boxes2_a[m]; + const face_verts box2_tris = GetBoxTris(box2); + const vec3 box2_center = BoxCenter(boxes2[m]); + const face_verts box2_planes = GetBoxPlanes(box2); + const float box2_vol = BoxVolume(box2_tris, box2_center); + + // Every triangle in one box will be compared to each plane in the other + // box. There are 3 possible outcomes: + // 1. If the triangle is fully inside, then it will + // remain as is. + // 2. If the triagnle it is fully outside, it will be removed. + // 3. If the triangle intersects with the (infinite) plane, it + // will be broken into subtriangles such that each subtriangle is full + // inside the plane and part of the intersecting tetrahedron. + + // Tris in Box1 -> Planes in Box2 + face_verts box1_intersect = + BoxIntersections(box1_tris, box2_planes, box2_center); + // Tris in Box2 -> Planes in Box1 + face_verts box2_intersect = + BoxIntersections(box2_tris, box1_planes, box1_center); + + // If there are overlapping regions in Box2, remove any coplanar faces + if (box2_intersect.size() > 0) { + // Identify if any triangles in Box2 are coplanar with Box1 + std::vector tri2_keep(box2_intersect.size()); + std::fill(tri2_keep.begin(), tri2_keep.end(), 1); + for (int b1 = 0; b1 < box1_intersect.size(); ++b1) { + for (int b2 = 0; b2 < box2_intersect.size(); ++b2) { + const bool is_coplanar = + IsCoplanarTriTri(box1_intersect[b1], box2_intersect[b2]); + const float area = FaceArea(box1_intersect[b1]); + if ((is_coplanar) && (area > aEpsilon)) { + tri2_keep[b2] = 0; + } + } + } + + // Keep only the non coplanar triangles in Box2 - add them to the + // Box1 triangles. + for (int b2 = 0; b2 < box2_intersect.size(); ++b2) { + if (tri2_keep[b2] == 1) { + box1_intersect.push_back((box2_intersect[b2])); + } + } + } + + // Initialize the vol and iou to 0.0 in case there are no triangles + // in the intersecting shape. + float vol = 0.0; + float iou = 0.0; + + // If there are triangles in the intersecting shape + if (box1_intersect.size() > 0) { + // The intersecting shape is a polyhedron made up of the + // triangular faces that are all now in box1_intersect. + // Calculate the polyhedron center + const vec3 polyhedron_center = PolyhedronCenter(box1_intersect); + // Compute intersecting polyhedron volume + vol = BoxVolume(box1_intersect, polyhedron_center); + // Compute IoU + iou = vol / (box1_vol + box2_vol - vol); + } + // Save out volume and IoU + vols_a[n][m] = vol; + ious_a[n][m] = iou; + } + } + return std::make_tuple(vols, ious); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_utils.cuh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_utils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..1d702069a7757a6de79951d3024d5518880a5eb4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_utils.cuh @@ -0,0 +1,734 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include "utils/float_math.cuh" + +// dEpsilon: Used in dot products and is used to assess whether two unit vectors +// are orthogonal (or coplanar). It's an epsilon on cos(θ). +// With dEpsilon = 0.001, two unit vectors are considered co-planar +// if their θ = 2.5 deg. +__constant__ const float dEpsilon = 1e-3; +// aEpsilon: Used once in main function to check for small face areas +__constant__ const float aEpsilon = 1e-4; +// kEpsilon: Used only for norm(u) = u/max(||u||, kEpsilon) +__constant__ const float kEpsilon = 1e-8; + +/* +_PLANES and _TRIS define the 4- and 3-connectivity +of the 8 box corners. +_PLANES gives the quad faces of the 3D box +_TRIS gives the triangle faces of the 3D box +*/ +const int NUM_PLANES = 6; +const int NUM_TRIS = 12; +// This is required for iniitalizing the faces +// in the intersecting shape +const int MAX_TRIS = 100; + +// Create data types for representing the +// verts for each face and the indices. +// We will use struct arrays for representing +// the data for each box and intersecting +// triangles +struct FaceVerts { + float3 v0; + float3 v1; + float3 v2; + float3 v3; // Can be empty for triangles +}; + +struct FaceVertsIdx { + int v0; + int v1; + int v2; + int v3; // Can be empty for triangles +}; + +// This is used when deciding which faces to +// keep that are not coplanar +struct Keep { + bool keep; +}; + +__device__ FaceVertsIdx _PLANES[] = { + {0, 1, 2, 3}, + {3, 2, 6, 7}, + {0, 1, 5, 4}, + {0, 3, 7, 4}, + {1, 5, 6, 2}, + {4, 5, 6, 7}, +}; +__device__ FaceVertsIdx _TRIS[] = { + {0, 1, 2}, + {0, 3, 2}, + {4, 5, 6}, + {4, 6, 7}, + {1, 5, 6}, + {1, 6, 2}, + {0, 4, 7}, + {0, 7, 3}, + {3, 2, 6}, + {3, 6, 7}, + {0, 1, 5}, + {0, 4, 5}, +}; + +// Args +// box: (8, 3) tensor accessor for the box vertices +// box_tris: Array of structs of type FaceVerts, +// effectively (F, 3, 3) where the coordinates of the +// verts for each face will be saved to. +// +// Returns: None (output saved to box_tris) +// +template +__device__ inline void GetBoxTris(const Box& box, BoxTris& box_tris) { + for (int t = 0; t < NUM_TRIS; ++t) { + const float3 v0 = make_float3( + box[_TRIS[t].v0][0], box[_TRIS[t].v0][1], box[_TRIS[t].v0][2]); + const float3 v1 = make_float3( + box[_TRIS[t].v1][0], box[_TRIS[t].v1][1], box[_TRIS[t].v1][2]); + const float3 v2 = make_float3( + box[_TRIS[t].v2][0], box[_TRIS[t].v2][1], box[_TRIS[t].v2][2]); + box_tris[t] = {v0, v1, v2}; + } +} + +// Args +// box: (8, 3) tensor accessor for the box vertices +// box_planes: Array of structs of type FaceVerts, effectively (P, 4, 3) +// where the coordinates of the verts for the four corners of each plane +// will be saved to +// +// Returns: None (output saved to box_planes) +// +template +__device__ inline void GetBoxPlanes( + const Box& box, + FaceVertsBoxPlanes& box_planes) { + for (int t = 0; t < NUM_PLANES; ++t) { + const float3 v0 = make_float3( + box[_PLANES[t].v0][0], box[_PLANES[t].v0][1], box[_PLANES[t].v0][2]); + const float3 v1 = make_float3( + box[_PLANES[t].v1][0], box[_PLANES[t].v1][1], box[_PLANES[t].v1][2]); + const float3 v2 = make_float3( + box[_PLANES[t].v2][0], box[_PLANES[t].v2][1], box[_PLANES[t].v2][2]); + const float3 v3 = make_float3( + box[_PLANES[t].v3][0], box[_PLANES[t].v3][1], box[_PLANES[t].v3][2]); + box_planes[t] = {v0, v1, v2, v3}; + } +} + +// The geometric center of a list of vertices. +// +// Args +// vertices: A list of float3 vertices {v0, ..., vN}. +// +// Returns +// float3: Geometric center of the vertices. +// +__device__ inline float3 FaceCenter( + std::initializer_list vertices) { + auto sumVertices = float3{}; + for (const auto& vertex : vertices) { + sumVertices = sumVertices + vertex; + } + return sumVertices / vertices.size(); +} + +// The normal of a plane spanned by vectors e0 and e1 +// +// Args +// e0, e1: float3 vectors defining a plane +// +// Returns +// float3: normal of the plane +// +__device__ inline float3 GetNormal(const float3 e0, const float3 e1) { + float3 n = cross(e0, e1); + n = n / std::fmaxf(norm(n), kEpsilon); + return n; +} + +// The normal of a face with vertices (v0, v1, v2) or (v0, ..., v3). +// We find the "best" edges connecting the face center to the vertices, +// such that the cross product between the edges is maximized. +// +// Args +// vertices: a list of float3 coordinates of the vertices. +// +// Returns +// float3: center of the plane +// +__device__ inline float3 FaceNormal( + std::initializer_list vertices) { + const auto faceCenter = FaceCenter(vertices); + auto normal = float3(); + auto maxDist = -1; + for (auto v1 = vertices.begin(); v1 != vertices.end() - 1; ++v1) { + for (auto v2 = v1 + 1; v2 != vertices.end(); ++v2) { + const auto v1ToCenter = *v1 - faceCenter; + const auto v2ToCenter = *v2 - faceCenter; + const auto dist = norm(cross(v1ToCenter, v2ToCenter)); + if (dist > maxDist) { + normal = GetNormal(v1ToCenter, v2ToCenter); + maxDist = dist; + } + } + } + return normal; +} + +// The area of the face defined by vertices (v0, v1, v2) +// Define e0 to be the edge connecting (v1, v0) +// Define e1 to be the edge connecting (v2, v0) +// Area is the norm of the cross product of e0, e1 divided by 2.0 +// +// Args +// tri: FaceVerts of float3 coordinates of the vertices of the face +// +// Returns +// float: area for the face +// +__device__ inline float FaceArea(const FaceVerts& tri) { + // Get verts for face 1 + const float3 n = cross(tri.v1 - tri.v0, tri.v2 - tri.v0); + return norm(n) / 2.0; +} + +// The normal of a box plane defined by the verts in `plane` such that it +// points toward the centroid of the box given by `center`. +// +// Args +// plane: float3 coordinates of the vertices of the plane +// center: float3 coordinates of the center of the box from +// which the plane originated +// +// Returns +// float3: normal for the plane such that it points towards +// the center of the box +// +template +__device__ inline float3 PlaneNormalDirection( + const FaceVertsPlane& plane, + const float3& center) { + // The plane's center + const float3 plane_center = + FaceCenter({plane.v0, plane.v1, plane.v2, plane.v3}); + + // The plane's normal + float3 n = FaceNormal({plane.v0, plane.v1, plane.v2, plane.v3}); + + // We project the center on the plane defined by (v0, v1, v2, v3) + // We can write center = plane_center + a * e0 + b * e1 + c * n + // We know that = 0 and = 0 and + // is the dot product between a and b. + // This means we can solve for c as: + // c =
+ // =
+ const float c = dot((center - plane_center), n); + + // If c is negative, then we revert the direction of n such that n + // points "inside" + if (c < 0.0f) { + n = -1.0f * n; + } + + return n; +} + +// Calculate the volume of the box by summing the volume of +// each of the tetrahedrons formed with a triangle face and +// the box centroid. +// +// Args +// box_tris: vector of float3 coordinates of the vertices of each +// of the triangles in the box +// box_center: float3 coordinates of the center of the box +// +// Returns +// float: volume of the box +// +template +__device__ inline float BoxVolume( + const BoxTris& box_tris, + const float3& box_center, + const int num_tris) { + float box_vol = 0.0; + // Iterate through each triange, calculate the area of the + // tetrahedron formed with the box_center and sum them + for (int t = 0; t < num_tris; ++t) { + // Subtract the center: + float3 v0 = box_tris[t].v0; + float3 v1 = box_tris[t].v1; + float3 v2 = box_tris[t].v2; + + v0 = v0 - box_center; + v1 = v1 - box_center; + v2 = v2 - box_center; + + // Compute the area + const float area = dot(v0, cross(v1, v2)); + const float vol = abs(area) / 6.0; + box_vol = box_vol + vol; + } + return box_vol; +} + +// Compute the box center as the mean of the verts +// +// Args +// box_verts: (8, 3) tensor of the corner vertices of the box +// +// Returns +// float3: coordinates of the center of the box +// +template +__device__ inline float3 BoxCenter(const Box box_verts) { + float x = 0.0; + float y = 0.0; + float z = 0.0; + const int num_verts = box_verts.size(0); // Should be 8 + // Sum all x, y, z, and take the mean + for (int t = 0; t < num_verts; ++t) { + x = x + box_verts[t][0]; + y = y + box_verts[t][1]; + z = z + box_verts[t][2]; + } + // Take the mean of all the vertex positions + x = x / num_verts; + y = y / num_verts; + z = z / num_verts; + const float3 center = make_float3(x, y, z); + return center; +} + +// Compute the polyhedron center as the mean of the face centers +// of the triangle faces +// +// Args +// tris: vector of float3 coordinates of the +// vertices of each of the triangles in the polyhedron +// +// Returns +// float3: coordinates of the center of the polyhedron +// +template +__device__ inline float3 PolyhedronCenter( + const Tris& tris, + const int num_tris) { + float x = 0.0; + float y = 0.0; + float z = 0.0; + + // Find the center point of each face + for (int t = 0; t < num_tris; ++t) { + const float3 v0 = tris[t].v0; + const float3 v1 = tris[t].v1; + const float3 v2 = tris[t].v2; + const float x_face = (v0.x + v1.x + v2.x) / 3.0; + const float y_face = (v0.y + v1.y + v2.y) / 3.0; + const float z_face = (v0.z + v1.z + v2.z) / 3.0; + x = x + x_face; + y = y + y_face; + z = z + z_face; + } + + // Take the mean of the centers of all faces + x = x / num_tris; + y = y / num_tris; + z = z / num_tris; + + const float3 center = make_float3(x, y, z); + return center; +} + +// Compute a boolean indicator for whether a point +// is inside a plane, where inside refers to whether +// or not the point has a component in the +// normal direction of the plane. +// +// Args +// plane: vector of float3 coordinates of the +// vertices of each of the triangles in the box +// normal: float3 of the direction of the plane normal +// point: float3 of the position of the point of interest +// +// Returns +// bool: whether or not the point is inside the plane +// +__device__ inline bool +IsInside(const FaceVerts& plane, const float3& normal, const float3& point) { + // The center of the plane + const float3 plane_ctr = FaceCenter({plane.v0, plane.v1, plane.v2, plane.v3}); + + // Every point p can be written as p = plane_ctr + a e0 + b e1 + c n + // Solving for c: + // c = (point - plane_ctr - a * e0 - b * e1).dot(n) + // We know that = 0 and = 0 + // So the calculation can be simplified as: + const float c = dot((point - plane_ctr), normal); + const bool inside = c >= 0.0f; + return inside; +} + +// Find the point of intersection between a plane +// and a line given by the end points (p0, p1) +// +// Args +// plane: vector of float3 coordinates of the +// vertices of each of the triangles in the box +// normal: float3 of the direction of the plane normal +// p0, p1: float3 of the start and end point of the line +// +// Returns +// float3: position of the intersection point +// +__device__ inline float3 PlaneEdgeIntersection( + const FaceVerts& plane, + const float3& normal, + const float3& p0, + const float3& p1) { + // The center of the plane + const float3 plane_ctr = FaceCenter({plane.v0, plane.v1, plane.v2, plane.v3}); + + // The point of intersection can be parametrized + // p = p0 + a (p1 - p0) where a in [0, 1] + // We want to find a such that p is on plane + //

= 0 + + float3 direc = p1 - p0; + direc = direc / fmaxf(norm(direc), kEpsilon); + + float3 p = (p1 + p0) / 2.0f; + + if (abs(dot(direc, normal)) >= dEpsilon) { + const float top = -1.0f * dot(p0 - plane_ctr, normal); + const float bot = dot(p1 - p0, normal); + const float a = top / bot; + p = p0 + a * (p1 - p0); + } + + return p; +} + +// Compute the most distant points between two sets of vertices +// +// Args +// verts1, verts2: list of float3 defining the list of vertices +// +// Returns +// v1m, v2m: float3 vectors of the most distant points +// in verts1 and verts2 respectively +// +__device__ inline std::tuple ArgMaxVerts( + std::initializer_list verts1, + std::initializer_list verts2) { + auto v1m = float3(); + auto v2m = float3(); + float maxdist = -1.0f; + + for (const auto& v1 : verts1) { + for (const auto& v2 : verts2) { + if (norm(v1 - v2) > maxdist) { + v1m = v1; + v2m = v2; + maxdist = norm(v1 - v2); + } + } + } + return std::make_tuple(v1m, v2m); +} + +// Compute a boolean indicator for whether or not two faces +// are coplanar +// +// Args +// tri1, tri2: FaceVerts struct of the vertex coordinates of +// the triangle face +// +// Returns +// bool: whether or not the two faces are coplanar +// +__device__ inline bool IsCoplanarTriTri( + const FaceVerts& tri1, + const FaceVerts& tri2) { + const float3 tri1_n = FaceNormal({tri1.v0, tri1.v1, tri1.v2}); + + const float3 tri2_n = FaceNormal({tri2.v0, tri2.v1, tri2.v2}); + + // Check if parallel + const bool check1 = abs(dot(tri1_n, tri2_n)) > 1 - dEpsilon; + + // Compute most distant points + const auto v1mAndv2m = + ArgMaxVerts({tri1.v0, tri1.v1, tri1.v2}, {tri2.v0, tri2.v1, tri2.v2}); + const auto v1m = std::get<0>(v1mAndv2m); + const auto v2m = std::get<1>(v1mAndv2m); + + float3 n12m = v1m - v2m; + n12m = n12m / fmaxf(norm(n12m), kEpsilon); + + const bool check2 = (abs(dot(n12m, tri1_n)) < dEpsilon) || + (abs(dot(n12m, tri2_n)) < dEpsilon); + + return (check1 && check2); +} + +// Compute a boolean indicator for whether or not a triangular and a planar +// face are coplanar +// +// Args +// tri, plane: FaceVerts struct of the vertex coordinates of +// the triangle and planar face +// normal: the normal direction of the plane pointing "inside" +// +// Returns +// bool: whether or not the two faces are coplanar +// +__device__ inline bool IsCoplanarTriPlane( + const FaceVerts& tri, + const FaceVerts& plane, + const float3& normal) { + const float3 nt = FaceNormal({tri.v0, tri.v1, tri.v2}); + + // check if parallel + const bool check1 = abs(dot(nt, normal)) > 1 - dEpsilon; + + // Compute most distant points + const auto v1mAndv2m = ArgMaxVerts( + {tri.v0, tri.v1, tri.v2}, {plane.v0, plane.v1, plane.v2, plane.v3}); + const auto v1m = std::get<0>(v1mAndv2m); + const auto v2m = std::get<1>(v1mAndv2m); + + float3 n12m = v1m - v2m; + n12m = n12m / fmaxf(norm(n12m), kEpsilon); + + const bool check2 = abs(dot(n12m, normal)) < dEpsilon; + + return (check1 && check2); +} + +// Triangle is clipped into a quadrilateral +// based on the intersection points with the plane. +// Then the quadrilateral is divided into two triangles. +// +// Args +// plane: vector of float3 coordinates of the +// vertices of each of the triangles in the box +// normal: float3 of the direction of the plane normal +// vout: float3 of the point in the triangle which is outside +// the plane +// vin1, vin2: float3 of the points in the triangle which are +// inside the plane +// face_verts_out: Array of structs of type FaceVerts, +// with the coordinates of the new triangle faces +// formed after clipping. +// All triangles are now "inside" the plane. +// +// Returns: +// count: (int) number of new faces after clipping the triangle +// i.e. the valid faces which have been saved +// to face_verts_out +// +template +__device__ inline int ClipTriByPlaneOneOut( + const FaceVerts& plane, + const float3& normal, + const float3& vout, + const float3& vin1, + const float3& vin2, + FaceVertsBox& face_verts_out) { + // point of intersection between plane and (vin1, vout) + const float3 pint1 = PlaneEdgeIntersection(plane, normal, vin1, vout); + // point of intersection between plane and (vin2, vout) + const float3 pint2 = PlaneEdgeIntersection(plane, normal, vin2, vout); + + face_verts_out[0] = {vin1, pint1, pint2}; + face_verts_out[1] = {vin1, pint2, vin2}; + + return 2; +} + +// Triangle is clipped into a smaller triangle based +// on the intersection points with the plane. +// +// Args +// plane: vector of float3 coordinates of the +// vertices of each of the triangles in the box +// normal: float3 of the direction of the plane normal +// vout1, vout2: float3 of the points in the triangle which are +// outside the plane +// vin: float3 of the point in the triangle which is inside +// the plane +// face_verts_out: Array of structs of type FaceVerts, +// with the coordinates of the new triangle faces +// formed after clipping. +// All triangles are now "inside" the plane. +// +// Returns: +// count: (int) number of new faces after clipping the triangle +// i.e. the valid faces which have been saved +// to face_verts_out +// +template +__device__ inline int ClipTriByPlaneTwoOut( + const FaceVerts& plane, + const float3& normal, + const float3& vout1, + const float3& vout2, + const float3& vin, + FaceVertsBox& face_verts_out) { + // point of intersection between plane and (vin, vout1) + const float3 pint1 = PlaneEdgeIntersection(plane, normal, vin, vout1); + // point of intersection between plane and (vin, vout2) + const float3 pint2 = PlaneEdgeIntersection(plane, normal, vin, vout2); + + face_verts_out[0] = {vin, pint1, pint2}; + + return 1; +} + +// Clip the triangle faces so that they lie within the +// plane, creating new triangle faces where necessary. +// +// Args +// plane: Array of structs of type FaceVerts with the coordinates +// of the vertices of each of the triangles in the box +// tri: Array of structs of type FaceVerts with the vertex +// coordinates of the triangle faces +// normal: float3 of the direction of the plane normal +// face_verts_out: Array of structs of type FaceVerts, +// with the coordinates of the new triangle faces +// formed after clipping. +// All triangles are now "inside" the plane. +// +// Returns: +// count: (int) number of new faces after clipping the triangle +// i.e. the valid faces which have been saved +// to face_verts_out +// +template +__device__ inline int ClipTriByPlane( + const FaceVerts& plane, + const FaceVerts& tri, + const float3& normal, + FaceVertsBox& face_verts_out) { + // Get Triangle vertices + const float3 v0 = tri.v0; + const float3 v1 = tri.v1; + const float3 v2 = tri.v2; + + // Check each of the triangle vertices to see if it is inside the plane + const bool isin0 = IsInside(plane, normal, v0); + const bool isin1 = IsInside(plane, normal, v1); + const bool isin2 = IsInside(plane, normal, v2); + + // Check coplanar + const bool iscoplanar = IsCoplanarTriPlane(tri, plane, normal); + if (iscoplanar) { + // Return input vertices + face_verts_out[0] = {v0, v1, v2}; + return 1; + } + + // All in + if (isin0 && isin1 && isin2) { + // Return input vertices + face_verts_out[0] = {v0, v1, v2}; + return 1; + } + + // All out + if (!isin0 && !isin1 && !isin2) { + return 0; + } + + // One vert out + if (isin0 && isin1 && !isin2) { + return ClipTriByPlaneOneOut(plane, normal, v2, v0, v1, face_verts_out); + } + if (isin0 && !isin1 && isin2) { + return ClipTriByPlaneOneOut(plane, normal, v1, v0, v2, face_verts_out); + } + if (!isin0 && isin1 && isin2) { + return ClipTriByPlaneOneOut(plane, normal, v0, v1, v2, face_verts_out); + } + + // Two verts out + if (isin0 && !isin1 && !isin2) { + return ClipTriByPlaneTwoOut(plane, normal, v1, v2, v0, face_verts_out); + } + if (!isin0 && !isin1 && isin2) { + return ClipTriByPlaneTwoOut(plane, normal, v0, v1, v2, face_verts_out); + } + if (!isin0 && isin1 && !isin2) { + return ClipTriByPlaneTwoOut(plane, normal, v0, v2, v1, face_verts_out); + } + + // Else return empty (should not be reached) + return 0; +} + +// Get the triangles from each box which are part of the +// intersecting polyhedron by computing the intersection +// points with each of the planes. +// +// Args +// planes: Array of structs of type FaceVerts with the coordinates +// of the vertices of each of the triangles in the box +// center: float3 coordinates of the center of the box from which +// the planes originate +// face_verts_out: Array of structs of type FaceVerts, +// where the coordinates of the new triangle faces +// formed after clipping will be saved to. +// All triangles are now "inside" the plane. +// +// Returns: +// count: (int) number of faces in the intersecting shape +// i.e. the valid faces which have been saved +// to face_verts_out +// +template +__device__ inline int BoxIntersections( + const FaceVertsPlane& planes, + const float3& center, + FaceVertsBox& face_verts_out) { + // Initialize num tris to 12 + int num_tris = NUM_TRIS; + for (int p = 0; p < NUM_PLANES; ++p) { + // Get plane normal direction + const float3 n2 = PlaneNormalDirection(planes[p], center); + // Create intermediate vector to store the updated tris + FaceVerts tri_verts_updated[MAX_TRIS]; + int offset = 0; + + // Iterate through triangles in face_verts_out + // for the valid tris given by num_tris + for (int t = 0; t < num_tris; ++t) { + // Clip tri by plane, can max be split into 2 triangles + FaceVerts tri_updated[2]; + const int count = + ClipTriByPlane(planes[p], face_verts_out[t], n2, tri_updated); + // Add to the tri_verts_updated output if not empty + for (int v = 0; v < count; ++v) { + tri_verts_updated[offset] = tri_updated[v]; + offset++; + } + } + // Update the face_verts_out tris + num_tris = min(MAX_TRIS, offset); + for (int j = 0; j < num_tris; ++j) { + face_verts_out[j] = tri_verts_updated[j]; + } + } + return num_tris; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_utils.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..283822a112daa9bec5e2e2fe083fa983210273ca --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/iou_box3d/iou_utils.h @@ -0,0 +1,733 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "utils/vec3.h" + +// dEpsilon: Used in dot products and is used to assess whether two unit vectors +// are orthogonal (or coplanar). It's an epsilon on cos(θ). +// With dEpsilon = 0.001, two unit vectors are considered co-planar +// if their θ = 2.5 deg. +const auto dEpsilon = 1e-3; +// aEpsilon: Used once in main function to check for small face areas +const auto aEpsilon = 1e-4; +// kEpsilon: Used only for norm(u) = u/max(||u||, kEpsilon) +const auto kEpsilon = 1e-8; + +/* +_PLANES and _TRIS define the 4- and 3-connectivity +of the 8 box corners. +_PLANES gives the quad faces of the 3D box +_TRIS gives the triangle faces of the 3D box +*/ +const int NUM_PLANES = 6; +const int NUM_TRIS = 12; +const int _PLANES[6][4] = { + {0, 1, 2, 3}, + {3, 2, 6, 7}, + {0, 1, 5, 4}, + {0, 3, 7, 4}, + {1, 5, 6, 2}, + {4, 5, 6, 7}, +}; +const int _TRIS[12][3] = { + {0, 1, 2}, + {0, 3, 2}, + {4, 5, 6}, + {4, 6, 7}, + {1, 5, 6}, + {1, 6, 2}, + {0, 4, 7}, + {0, 7, 3}, + {3, 2, 6}, + {3, 6, 7}, + {0, 1, 5}, + {0, 4, 5}, +}; + +// Create a new data type for representing the +// verts for each face which can be triangle or plane. +// This helps make the code more readable. +using face_verts = std::vector>>; + +// Args +// box: (8, 3) tensor accessor for the box vertices +// plane_idx: index of the plane in the box +// vert_idx: index of the vertex in the plane +// +// Returns +// vec3 (x, y, x) vertex coordinates +// +template +inline vec3 +ExtractVertsPlane(const Box& box, const int plane_idx, const int vert_idx) { + return vec3( + box[_PLANES[plane_idx][vert_idx]][0], + box[_PLANES[plane_idx][vert_idx]][1], + box[_PLANES[plane_idx][vert_idx]][2]); +} + +// Args +// box: (8, 3) tensor accessor for the box vertices +// tri_idx: index of the triangle face in the box +// vert_idx: index of the vertex in the triangle +// +// Returns +// vec3 (x, y, x) vertex coordinates +// +template +inline vec3 +ExtractVertsTri(const Box& box, const int tri_idx, const int vert_idx) { + return vec3( + box[_TRIS[tri_idx][vert_idx]][0], + box[_TRIS[tri_idx][vert_idx]][1], + box[_TRIS[tri_idx][vert_idx]][2]); +} + +// Args +// box: (8, 3) tensor accessor for the box vertices +// +// Returns +// std::vector>> effectively (F, 3, 3) +// coordinates of the verts for each face +// +template +inline face_verts GetBoxTris(const Box& box) { + face_verts box_tris; + for (int t = 0; t < NUM_TRIS; ++t) { + vec3 v0 = ExtractVertsTri(box, t, 0); + vec3 v1 = ExtractVertsTri(box, t, 1); + vec3 v2 = ExtractVertsTri(box, t, 2); + box_tris.push_back({v0, v1, v2}); + } + return box_tris; +} + +// Args +// box: (8, 3) tensor accessor for the box vertices +// +// Returns +// std::vector>> effectively (P, 3, 3) +// coordinates of the 4 verts for each plane +// +template +inline face_verts GetBoxPlanes(const Box& box) { + face_verts box_planes; + for (int t = 0; t < NUM_PLANES; ++t) { + vec3 v0 = ExtractVertsPlane(box, t, 0); + vec3 v1 = ExtractVertsPlane(box, t, 1); + vec3 v2 = ExtractVertsPlane(box, t, 2); + vec3 v3 = ExtractVertsPlane(box, t, 3); + box_planes.push_back({v0, v1, v2, v3}); + } + return box_planes; +} + +// The normal of a plane spanned by vectors e0 and e1 +// +// Args +// e0, e1: vec3 vectors defining a plane +// +// Returns +// vec3: normal of the plane +// +inline vec3 GetNormal(const vec3 e0, const vec3 e1) { + vec3 n = cross(e0, e1); + n = n / std::fmaxf(norm(n), kEpsilon); + return n; +} + +// The center of a triangle tri +// +// Args +// tri: vec3 coordinates of the vertices of the triangle +// +// Returns +// vec3: center of the triangle +// +inline vec3 TriCenter(const std::vector>& tri) { + // Vertices of the triangle + const vec3 v0 = tri[0]; + const vec3 v1 = tri[1]; + const vec3 v2 = tri[2]; + + return (v0 + v1 + v2) / 3.0f; +} + +// The normal of the triangle defined by vertices (v0, v1, v2) +// We find the "best" edges connecting the face center to the vertices, +// such that the cross product between the edges is maximized. +// +// Args +// tri: vec3 coordinates of the vertices of the face +// +// Returns +// vec3: normal for the face +// +inline vec3 TriNormal(const std::vector>& tri) { + // Get center of triangle + const vec3 ctr = TriCenter(tri); + + // find the "best" normal as cross product of edges from center + float max_dist = -1.0f; + vec3 n = {0.0f, 0.0f, 0.0f}; + for (int i = 0; i < 2; ++i) { + for (int j = i + 1; j < 3; ++j) { + const float dist = norm(cross(tri[i] - ctr, tri[j] - ctr)); + if (dist > max_dist) { + n = GetNormal(tri[i] - ctr, tri[j] - ctr); + } + } + } + return n; +} + +// The center of a plane +// +// Args +// plane: vec3 coordinates of the vertices of the plane +// +// Returns +// vec3: center of the plane +// +inline vec3 PlaneCenter(const std::vector>& plane) { + // Vertices of the plane + const vec3 v0 = plane[0]; + const vec3 v1 = plane[1]; + const vec3 v2 = plane[2]; + const vec3 v3 = plane[3]; + + return (v0 + v1 + v2 + v3) / 4.0f; +} + +// The normal of a planar face with vertices (v0, v1, v2, v3) +// We find the "best" edges connecting the face center to the vertices, +// such that the cross product between the edges is maximized. +// +// Args +// plane: vec3 coordinates of the vertices of the planar face +// +// Returns +// vec3: normal of the planar face +// +inline vec3 PlaneNormal(const std::vector>& plane) { + // Get center of planar face + vec3 ctr = PlaneCenter(plane); + + // find the "best" normal as cross product of edges from center + float max_dist = -1.0f; + vec3 n = {0.0f, 0.0f, 0.0f}; + for (int i = 0; i < 3; ++i) { + for (int j = i + 1; j < 4; ++j) { + const float dist = norm(cross(plane[i] - ctr, plane[j] - ctr)); + if (dist > max_dist) { + n = GetNormal(plane[i] - ctr, plane[j] - ctr); + } + } + } + return n; +} + +// The area of the face defined by vertices (v0, v1, v2) +// Define e0 to be the edge connecting (v1, v0) +// Define e1 to be the edge connecting (v2, v0) +// Area is the norm of the cross product of e0, e1 divided by 2.0 +// +// Args +// tri: vec3 coordinates of the vertices of the face +// +// Returns +// float: area for the face +// +inline float FaceArea(const std::vector>& tri) { + // Get verts for face + const vec3 v0 = tri[0]; + const vec3 v1 = tri[1]; + const vec3 v2 = tri[2]; + const vec3 n = cross(v1 - v0, v2 - v0); + return norm(n) / 2.0; +} + +// The normal of a box plane defined by the verts in `plane` such that it +// points toward the centroid of the box given by `center`. +// +// Args +// plane: vec3 coordinates of the vertices of the plane +// center: vec3 coordinates of the center of the box from +// which the plane originated +// +// Returns +// vec3: normal for the plane such that it points towards +// the center of the box +// +inline vec3 PlaneNormalDirection( + const std::vector>& plane, + const vec3& center) { + // The plane's center & normal + const vec3 plane_center = PlaneCenter(plane); + vec3 n = PlaneNormal(plane); + + // We project the center on the plane defined by (v0, v1, v2, v3) + // We can write center = plane_center + a * e0 + b * e1 + c * n + // We know that = 0 and = 0 and + // is the dot product between a and b. + // This means we can solve for c as: + // c =

+ // =
+ const float c = dot((center - plane_center), n); + + // If c is negative, then we revert the direction of n such that n + // points "inside" + if (c < 0.0f) { + n = -1.0f * n; + } + + return n; +} + +// Calculate the volume of the box by summing the volume of +// each of the tetrahedrons formed with a triangle face and +// the box centroid. +// +// Args +// box_tris: vector of vec3 coordinates of the vertices of each +// of the triangles in the box +// box_center: vec3 coordinates of the center of the box +// +// Returns +// float: volume of the box +// +inline float BoxVolume( + const face_verts& box_tris, + const vec3& box_center) { + float box_vol = 0.0; + // Iterate through each triange, calculate the area of the + // tetrahedron formed with the box_center and sum them + for (int t = 0; t < box_tris.size(); ++t) { + // Subtract the center: + const vec3 v0 = box_tris[t][0] - box_center; + const vec3 v1 = box_tris[t][1] - box_center; + const vec3 v2 = box_tris[t][2] - box_center; + + // Compute the area + const float area = dot(v0, cross(v1, v2)); + const float vol = std::abs(area) / 6.0; + box_vol = box_vol + vol; + } + return box_vol; +} + +// Compute the box center as the mean of the verts +// +// Args +// box_verts: (8, 3) tensor of the corner vertices of the box +// +// Returns +// vec3: coordinates of the center of the box +// +inline vec3 BoxCenter(const at::Tensor& box_verts) { + const auto& box_center_t = at::mean(box_verts, 0); + const vec3 box_center( + box_center_t[0].item(), + box_center_t[1].item(), + box_center_t[2].item()); + return box_center; +} + +// Compute the polyhedron center as the mean of the face centers +// of the triangle faces +// +// Args +// tris: vector of vec3 coordinates of the +// vertices of each of the triangles in the polyhedron +// +// Returns +// vec3: coordinates of the center of the polyhedron +// +inline vec3 PolyhedronCenter(const face_verts& tris) { + float x = 0.0; + float y = 0.0; + float z = 0.0; + const int num_tris = tris.size(); + + // Find the center point of each face + for (int t = 0; t < num_tris; ++t) { + const vec3 v0 = tris[t][0]; + const vec3 v1 = tris[t][1]; + const vec3 v2 = tris[t][2]; + const float x_face = (v0.x + v1.x + v2.x) / 3.0; + const float y_face = (v0.y + v1.y + v2.y) / 3.0; + const float z_face = (v0.z + v1.z + v2.z) / 3.0; + x = x + x_face; + y = y + y_face; + z = z + z_face; + } + + // Take the mean of the centers of all faces + x = x / num_tris; + y = y / num_tris; + z = z / num_tris; + + const vec3 center(x, y, z); + return center; +} + +// Compute a boolean indicator for whether a point +// is inside a plane, where inside refers to whether +// or not the point has a component in the +// normal direction of the plane. +// +// Args +// plane: vector of vec3 coordinates of the +// vertices of each of the triangles in the box +// normal: vec3 of the direction of the plane normal +// point: vec3 of the position of the point of interest +// +// Returns +// bool: whether or not the point is inside the plane +// +inline bool IsInside( + const std::vector>& plane, + const vec3& normal, + const vec3& point) { + // The center of the plane + const vec3 plane_ctr = PlaneCenter(plane); + + // Every point p can be written as p = plane_ctr + a e0 + b e1 + c n + // Solving for c: + // c = (point - plane_ctr - a * e0 - b * e1).dot(n) + // We know that = 0 and = 0 + // So the calculation can be simplified as: + const float c = dot((point - plane_ctr), normal); + const bool inside = c >= 0.0f; + return inside; +} + +// Find the point of intersection between a plane +// and a line given by the end points (p0, p1) +// +// Args +// plane: vector of vec3 coordinates of the +// vertices of each of the triangles in the box +// normal: vec3 of the direction of the plane normal +// p0, p1: vec3 of the start and end point of the line +// +// Returns +// vec3: position of the intersection point +// +inline vec3 PlaneEdgeIntersection( + const std::vector>& plane, + const vec3& normal, + const vec3& p0, + const vec3& p1) { + // The center of the plane + const vec3 plane_ctr = PlaneCenter(plane); + + // The point of intersection can be parametrized + // p = p0 + a (p1 - p0) where a in [0, 1] + // We want to find a such that p is on plane + //

= 0 + + vec3 direc = p1 - p0; + direc = direc / std::fmaxf(norm(direc), kEpsilon); + + vec3 p = (p1 + p0) / 2.0f; + + if (std::abs(dot(direc, normal)) >= dEpsilon) { + const float top = -1.0f * dot(p0 - plane_ctr, normal); + const float bot = dot(p1 - p0, normal); + const float a = top / bot; + p = p0 + a * (p1 - p0); + } + return p; +} + +// Compute the most distant points between two sets of vertices +// +// Args +// verts1, verts2: vec3 defining the list of vertices +// +// Returns +// v1m, v2m: vec3 vectors of the most distant points +// in verts1 and verts2 respectively +// +inline std::tuple, vec3> ArgMaxVerts( + const std::vector>& verts1, + const std::vector>& verts2) { + vec3 v1m = {0.0f, 0.0f, 0.0f}; + vec3 v2m = {0.0f, 0.0f, 0.0f}; + float maxdist = -1.0f; + + for (const auto& v1 : verts1) { + for (const auto& v2 : verts2) { + if (norm(v1 - v2) > maxdist) { + v1m = v1; + v2m = v2; + maxdist = norm(v1 - v2); + } + } + } + return std::make_tuple(v1m, v2m); +} + +// Compute a boolean indicator for whether or not two faces +// are coplanar +// +// Args +// tri1, tri2: std:vector of the vertex coordinates of +// triangle faces +// +// Returns +// bool: whether or not the two faces are coplanar +// +inline bool IsCoplanarTriTri( + const std::vector>& tri1, + const std::vector>& tri2) { + // Get normal for tri 1 + const vec3 n1 = TriNormal(tri1); + + // Get normal for tri 2 + const vec3 n2 = TriNormal(tri2); + + // Check if parallel + const bool check1 = std::abs(dot(n1, n2)) > 1 - dEpsilon; + + // Compute most distant points + auto argvs = ArgMaxVerts(tri1, tri2); + const auto v1m = std::get<0>(argvs); + const auto v2m = std::get<1>(argvs); + + vec3 n12m = v1m - v2m; + n12m = n12m / std::fmaxf(norm(n12m), kEpsilon); + + const bool check2 = (std::abs(dot(n12m, n1)) < dEpsilon) || + (std::abs(dot(n12m, n2)) < dEpsilon); + + return (check1 && check2); +} + +// Compute a boolean indicator for whether or not a triangular and a planar +// face are coplanar +// +// Args +// tri, plane: std:vector of the vertex coordinates of +// triangular face and planar face +// normal: the normal direction of the plane pointing "inside" +// +// Returns +// bool: whether or not the two faces are coplanar +// +inline bool IsCoplanarTriPlane( + const std::vector>& tri, + const std::vector>& plane, + const vec3& normal) { + // Get normal for tri + const vec3 nt = TriNormal(tri); + + // check if parallel + const bool check1 = std::abs(dot(nt, normal)) > 1 - dEpsilon; + + // Compute most distant points + auto argvs = ArgMaxVerts(tri, plane); + const auto v1m = std::get<0>(argvs); + const auto v2m = std::get<1>(argvs); + + vec3 n12m = v1m - v2m; + n12m = n12m / std::fmaxf(norm(n12m), kEpsilon); + + const bool check2 = std::abs(dot(n12m, normal)) < dEpsilon; + + return (check1 && check2); +} + +// Triangle is clipped into a quadrilateral +// based on the intersection points with the plane. +// Then the quadrilateral is divided into two triangles. +// +// Args +// plane: vector of vec3 coordinates of the +// vertices of each of the triangles in the box +// normal: vec3 of the direction of the plane normal +// vout: vec3 of the point in the triangle which is outside +// the plane +// vin1, vin2: vec3 of the points in the triangle which are +// inside the plane +// +// Returns +// std::vector>: vector of vertex coordinates +// of the new triangle faces +// +inline face_verts ClipTriByPlaneOneOut( + const std::vector>& plane, + const vec3& normal, + const vec3& vout, + const vec3& vin1, + const vec3& vin2) { + // point of intersection between plane and (vin1, vout) + const vec3 pint1 = PlaneEdgeIntersection(plane, normal, vin1, vout); + // point of intersection between plane and (vin2, vout) + const vec3 pint2 = PlaneEdgeIntersection(plane, normal, vin2, vout); + const face_verts face_verts = {{vin1, pint1, pint2}, {vin1, pint2, vin2}}; + return face_verts; +} + +// Triangle is clipped into a smaller triangle based +// on the intersection points with the plane. +// +// Args +// plane: vector of vec3 coordinates of the +// vertices of each of the triangles in the box +// normal: vec3 of the direction of the plane normal +// vout1, vout2: vec3 of the points in the triangle which are +// outside the plane +// vin: vec3 of the point in the triangle which is inside +// the plane +// Returns +// std::vector>: vector of vertex coordinates +// of the new triangle face +// +inline face_verts ClipTriByPlaneTwoOut( + const std::vector>& plane, + const vec3& normal, + const vec3& vout1, + const vec3& vout2, + const vec3& vin) { + // point of intersection between plane and (vin, vout1) + const vec3 pint1 = PlaneEdgeIntersection(plane, normal, vin, vout1); + // point of intersection between plane and (vin, vout2) + const vec3 pint2 = PlaneEdgeIntersection(plane, normal, vin, vout2); + const face_verts face_verts = {{vin, pint1, pint2}}; + return face_verts; +} + +// Clip the triangle faces so that they lie within the +// plane, creating new triangle faces where necessary. +// +// Args +// plane: vector of vec3 coordinates of the +// vertices of each of the triangles in the box +// tri: std:vector of the vertex coordinates of the +// triangle faces +// normal: vec3 of the direction of the plane normal +// +// Returns +// std::vector>: vector of vertex coordinates +// of the new triangle faces formed after clipping. +// All triangles are now "inside" the plane. +// +inline face_verts ClipTriByPlane( + const std::vector>& plane, + const std::vector>& tri, + const vec3& normal) { + // Get Triangle vertices + const vec3 v0 = tri[0]; + const vec3 v1 = tri[1]; + const vec3 v2 = tri[2]; + + // Check coplanar + const bool iscoplanar = IsCoplanarTriPlane(tri, plane, normal); + if (iscoplanar) { + // Return input vertices + face_verts tris = {{v0, v1, v2}}; + return tris; + } + + // Check each of the triangle vertices to see if it is inside the plane + const bool isin0 = IsInside(plane, normal, v0); + const bool isin1 = IsInside(plane, normal, v1); + const bool isin2 = IsInside(plane, normal, v2); + + // All in + if (isin0 && isin1 && isin2) { + // Return input vertices + face_verts tris = {{v0, v1, v2}}; + return tris; + } + + face_verts empty_tris = {}; + // All out + if (!isin0 && !isin1 && !isin2) { + return empty_tris; + } + + // One vert out + if (isin0 && isin1 && !isin2) { + return ClipTriByPlaneOneOut(plane, normal, v2, v0, v1); + } + if (isin0 && !isin1 && isin2) { + return ClipTriByPlaneOneOut(plane, normal, v1, v0, v2); + } + if (!isin0 && isin1 && isin2) { + return ClipTriByPlaneOneOut(plane, normal, v0, v1, v2); + } + + // Two verts out + if (isin0 && !isin1 && !isin2) { + return ClipTriByPlaneTwoOut(plane, normal, v1, v2, v0); + } + if (!isin0 && !isin1 && isin2) { + return ClipTriByPlaneTwoOut(plane, normal, v0, v1, v2); + } + if (!isin0 && isin1 && !isin2) { + return ClipTriByPlaneTwoOut(plane, normal, v0, v2, v1); + } + + // Else return empty (should not be reached) + return empty_tris; +} + +// Get the triangles from each box which are part of the +// intersecting polyhedron by computing the intersection +// points with each of the planes. +// +// Args +// tris: vertex coordinates of all the triangle faces +// in the box +// planes: vertex coordinates of all the planes in the box +// center: vec3 coordinates of the center of the box from which +// the planes originate +// +// Returns +// std::vector>> vector of vertex coordinates +// of the new triangle faces formed after clipping. +// All triangles are now "inside" the planes. +// +inline face_verts BoxIntersections( + const face_verts& tris, + const face_verts& planes, + const vec3& center) { + // Create a new vector to avoid modifying in place + face_verts out_tris = tris; + for (int p = 0; p < NUM_PLANES; ++p) { + // Get plane normal direction + const vec3 n2 = PlaneNormalDirection(planes[p], center); + // Iterate through triangles in tris + // Create intermediate vector to store the updated tris + face_verts tri_verts_updated; + for (int t = 0; t < out_tris.size(); ++t) { + // Clip tri by plane + const face_verts tri_updated = ClipTriByPlane(planes[p], out_tris[t], n2); + // Add to the tri_verts_updated output if not empty + for (int v = 0; v < tri_updated.size(); ++v) { + tri_verts_updated.push_back(tri_updated[v]); + } + } + // Update the tris + out_tris = tri_verts_updated; + } + return out_tris; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/knn/knn.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/knn/knn.cu new file mode 100644 index 0000000000000000000000000000000000000000..ad9dce247912c29f49fab3270671c0ea559c8451 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/knn/knn.cu @@ -0,0 +1,587 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include + +#include "utils/dispatch.cuh" +#include "utils/mink.cuh" + +// A chunk of work is blocksize-many points of P1. +// The number of potential chunks to do is N*(1+(P1-1)/blocksize) +// call (1+(P1-1)/blocksize) chunks_per_cloud +// These chunks are divided among the gridSize-many blocks. +// In block b, we work on chunks b, b+gridSize, b+2*gridSize etc . +// In chunk i, we work on cloud i/chunks_per_cloud on points starting from +// blocksize*(i%chunks_per_cloud). + +template +__global__ void KNearestNeighborKernelV0( + const scalar_t* __restrict__ points1, + const scalar_t* __restrict__ points2, + const int64_t* __restrict__ lengths1, + const int64_t* __restrict__ lengths2, + scalar_t* __restrict__ dists, + int64_t* __restrict__ idxs, + const size_t N, + const size_t P1, + const size_t P2, + const size_t D, + const size_t K, + const size_t norm) { + // Store both dists and indices for knn in global memory. + const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); + const int64_t chunks_to_do = N * chunks_per_cloud; + for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { + const int64_t n = chunk / chunks_per_cloud; + const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); + int64_t p1 = start_point + threadIdx.x; + if (p1 >= lengths1[n]) + continue; + int offset = n * P1 * K + p1 * K; + int64_t length2 = lengths2[n]; + MinK mink(dists + offset, idxs + offset, K); + for (int p2 = 0; p2 < length2; ++p2) { + // Find the distance between points1[n, p1] and points[n, p2] + scalar_t dist = 0; + for (int d = 0; d < D; ++d) { + scalar_t coord1 = points1[n * P1 * D + p1 * D + d]; + scalar_t coord2 = points2[n * P2 * D + p2 * D + d]; + scalar_t diff = coord1 - coord2; + scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); + dist += norm_diff; + } + mink.add(dist, p2); + } + } +} + +template +__global__ void KNearestNeighborKernelV1( + const scalar_t* __restrict__ points1, + const scalar_t* __restrict__ points2, + const int64_t* __restrict__ lengths1, + const int64_t* __restrict__ lengths2, + scalar_t* __restrict__ dists, + int64_t* __restrict__ idxs, + const size_t N, + const size_t P1, + const size_t P2, + const size_t K, + const size_t norm) { + // Same idea as the previous version, but hoist D into a template argument + // so we can cache the current point in a thread-local array. We still store + // the current best K dists and indices in global memory, so this should work + // for very large K and fairly large D. + scalar_t cur_point[D]; + const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); + const int64_t chunks_to_do = N * chunks_per_cloud; + for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { + const int64_t n = chunk / chunks_per_cloud; + const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); + int64_t p1 = start_point + threadIdx.x; + if (p1 >= lengths1[n]) + continue; + for (int d = 0; d < D; ++d) { + cur_point[d] = points1[n * P1 * D + p1 * D + d]; + } + int offset = n * P1 * K + p1 * K; + int64_t length2 = lengths2[n]; + MinK mink(dists + offset, idxs + offset, K); + for (int p2 = 0; p2 < length2; ++p2) { + // Find the distance between cur_point and points[n, p2] + scalar_t dist = 0; + for (int d = 0; d < D; ++d) { + scalar_t diff = cur_point[d] - points2[n * P2 * D + p2 * D + d]; + scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); + dist += norm_diff; + } + mink.add(dist, p2); + } + } +} + +// This is a shim functor to allow us to dispatch using DispatchKernel1D +template +struct KNearestNeighborV1Functor { + static void run( + size_t blocks, + size_t threads, + const scalar_t* __restrict__ points1, + const scalar_t* __restrict__ points2, + const int64_t* __restrict__ lengths1, + const int64_t* __restrict__ lengths2, + scalar_t* __restrict__ dists, + int64_t* __restrict__ idxs, + const size_t N, + const size_t P1, + const size_t P2, + const size_t K, + const size_t norm) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + KNearestNeighborKernelV1<<>>( + points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, K, norm); + } +}; + +template +__global__ void KNearestNeighborKernelV2( + const scalar_t* __restrict__ points1, + const scalar_t* __restrict__ points2, + const int64_t* __restrict__ lengths1, + const int64_t* __restrict__ lengths2, + scalar_t* __restrict__ dists, + int64_t* __restrict__ idxs, + const int64_t N, + const int64_t P1, + const int64_t P2, + const size_t norm) { + // Same general implementation as V2, but also hoist K into a template arg. + scalar_t cur_point[D]; + scalar_t min_dists[K]; + int min_idxs[K]; + const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); + const int64_t chunks_to_do = N * chunks_per_cloud; + for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { + const int64_t n = chunk / chunks_per_cloud; + const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); + int64_t p1 = start_point + threadIdx.x; + if (p1 >= lengths1[n]) + continue; + for (int d = 0; d < D; ++d) { + cur_point[d] = points1[n * P1 * D + p1 * D + d]; + } + int64_t length2 = lengths2[n]; + MinK mink(min_dists, min_idxs, K); + for (int p2 = 0; p2 < length2; ++p2) { + scalar_t dist = 0; + for (int d = 0; d < D; ++d) { + int offset = n * P2 * D + p2 * D + d; + scalar_t diff = cur_point[d] - points2[offset]; + scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); + dist += norm_diff; + } + mink.add(dist, p2); + } + for (int k = 0; k < mink.size(); ++k) { + idxs[n * P1 * K + p1 * K + k] = min_idxs[k]; + dists[n * P1 * K + p1 * K + k] = min_dists[k]; + } + } +} + +// This is a shim so we can dispatch using DispatchKernel2D +template +struct KNearestNeighborKernelV2Functor { + static void run( + size_t blocks, + size_t threads, + const scalar_t* __restrict__ points1, + const scalar_t* __restrict__ points2, + const int64_t* __restrict__ lengths1, + const int64_t* __restrict__ lengths2, + scalar_t* __restrict__ dists, + int64_t* __restrict__ idxs, + const int64_t N, + const int64_t P1, + const int64_t P2, + const size_t norm) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + KNearestNeighborKernelV2<<>>( + points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, norm); + } +}; + +template +__global__ void KNearestNeighborKernelV3( + const scalar_t* __restrict__ points1, + const scalar_t* __restrict__ points2, + const int64_t* __restrict__ lengths1, + const int64_t* __restrict__ lengths2, + scalar_t* __restrict__ dists, + int64_t* __restrict__ idxs, + const size_t N, + const size_t P1, + const size_t P2, + const size_t norm) { + // Same idea as V2, but use register indexing for thread-local arrays. + // Enabling sorting for this version leads to huge slowdowns; I suspect + // that it forces min_dists into local memory rather than registers. + // As a result this version is always unsorted. + scalar_t cur_point[D]; + scalar_t min_dists[K]; + int min_idxs[K]; + const int64_t chunks_per_cloud = (1 + (P1 - 1) / blockDim.x); + const int64_t chunks_to_do = N * chunks_per_cloud; + for (int64_t chunk = blockIdx.x; chunk < chunks_to_do; chunk += gridDim.x) { + const int64_t n = chunk / chunks_per_cloud; + const int64_t start_point = blockDim.x * (chunk % chunks_per_cloud); + int64_t p1 = start_point + threadIdx.x; + if (p1 >= lengths1[n]) + continue; + for (int d = 0; d < D; ++d) { + cur_point[d] = points1[n * P1 * D + p1 * D + d]; + } + int64_t length2 = lengths2[n]; + RegisterMinK mink(min_dists, min_idxs); + for (int p2 = 0; p2 < length2; ++p2) { + scalar_t dist = 0; + for (int d = 0; d < D; ++d) { + int offset = n * P2 * D + p2 * D + d; + scalar_t diff = cur_point[d] - points2[offset]; + scalar_t norm_diff = (norm == 2) ? (diff * diff) : abs(diff); + dist += norm_diff; + } + mink.add(dist, p2); + } + for (int k = 0; k < mink.size(); ++k) { + idxs[n * P1 * K + p1 * K + k] = min_idxs[k]; + dists[n * P1 * K + p1 * K + k] = min_dists[k]; + } + } +} + +// This is a shim so we can dispatch using DispatchKernel2D +template +struct KNearestNeighborKernelV3Functor { + static void run( + size_t blocks, + size_t threads, + const scalar_t* __restrict__ points1, + const scalar_t* __restrict__ points2, + const int64_t* __restrict__ lengths1, + const int64_t* __restrict__ lengths2, + scalar_t* __restrict__ dists, + int64_t* __restrict__ idxs, + const size_t N, + const size_t P1, + const size_t P2, + const size_t norm) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + KNearestNeighborKernelV3<<>>( + points1, points2, lengths1, lengths2, dists, idxs, N, P1, P2, norm); + } +}; + +constexpr int V1_MIN_D = 1; +constexpr int V1_MAX_D = 32; + +constexpr int V2_MIN_D = 1; +constexpr int V2_MAX_D = 8; +constexpr int V2_MIN_K = 1; +constexpr int V2_MAX_K = 32; + +constexpr int V3_MIN_D = 1; +constexpr int V3_MAX_D = 8; +constexpr int V3_MIN_K = 1; +constexpr int V3_MAX_K = 4; + +bool InBounds(const int64_t min, const int64_t x, const int64_t max) { + return min <= x && x <= max; +} + +bool KnnCheckVersion(int version, const int64_t D, const int64_t K) { + if (version == 0) { + return true; + } else if (version == 1) { + return InBounds(V1_MIN_D, D, V1_MAX_D); + } else if (version == 2) { + return InBounds(V2_MIN_D, D, V2_MAX_D) && InBounds(V2_MIN_K, K, V2_MAX_K); + } else if (version == 3) { + return InBounds(V3_MIN_D, D, V3_MAX_D) && InBounds(V3_MIN_K, K, V3_MAX_K); + } + return false; +} + +int ChooseVersion(const int64_t D, const int64_t K) { + for (int version = 3; version >= 1; version--) { + if (KnnCheckVersion(version, D, K)) { + return version; + } + } + return 0; +} + +std::tuple KNearestNeighborIdxCuda( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const int norm, + const int K, + int version) { + // Check inputs are on the same device + at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, + lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}; + at::CheckedFrom c = "KNearestNeighborIdxCuda"; + at::checkAllSameGPU(c, {p1_t, p2_t, lengths1_t, lengths2_t}); + at::checkAllSameType(c, {p1_t, p2_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(p1.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const auto N = p1.size(0); + const auto P1 = p1.size(1); + const auto P2 = p2.size(1); + const auto D = p2.size(2); + const int64_t K_64 = K; + + TORCH_CHECK((norm == 1) || (norm == 2), "Norm must be 1 or 2."); + + TORCH_CHECK(p1.size(2) == D, "Point sets must have the same last dimension"); + auto long_dtype = lengths1.options().dtype(at::kLong); + auto idxs = at::zeros({N, P1, K}, long_dtype); + auto dists = at::zeros({N, P1, K}, p1.options()); + + if (idxs.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(idxs, dists); + } + + if (version < 0) { + version = ChooseVersion(D, K); + } else if (!KnnCheckVersion(version, D, K)) { + int new_version = ChooseVersion(D, K); + std::cout << "WARNING: Requested KNN version " << version + << " is not compatible with D = " << D << "; K = " << K + << ". Falling back to version = " << new_version << std::endl; + version = new_version; + } + + // At this point we should have a valid version no matter what data the user + // gave us. But we can check once more to be sure; however this time + // assert fail since failing at this point means we have a bug in our version + // selection or checking code. + AT_ASSERTM(KnnCheckVersion(version, D, K), "Invalid version"); + + const size_t threads = 256; + const size_t blocks = 256; + if (version == 0) { + AT_DISPATCH_FLOATING_TYPES( + p1.scalar_type(), "knn_kernel_cuda", ([&] { + KNearestNeighborKernelV0<<>>( + p1.contiguous().data_ptr(), + p2.contiguous().data_ptr(), + lengths1.contiguous().data_ptr(), + lengths2.contiguous().data_ptr(), + dists.data_ptr(), + idxs.data_ptr(), + N, + P1, + P2, + D, + K, + norm); + })); + } else if (version == 1) { + AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { + DispatchKernel1D< + KNearestNeighborV1Functor, + scalar_t, + V1_MIN_D, + V1_MAX_D>( + D, + blocks, + threads, + p1.contiguous().data_ptr(), + p2.contiguous().data_ptr(), + lengths1.contiguous().data_ptr(), + lengths2.contiguous().data_ptr(), + dists.data_ptr(), + idxs.data_ptr(), + N, + P1, + P2, + K, + norm); + })); + } else if (version == 2) { + AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { + DispatchKernel2D< + KNearestNeighborKernelV2Functor, + scalar_t, + V2_MIN_D, + V2_MAX_D, + V2_MIN_K, + V2_MAX_K>( + D, + K_64, + blocks, + threads, + p1.contiguous().data_ptr(), + p2.contiguous().data_ptr(), + lengths1.contiguous().data_ptr(), + lengths2.contiguous().data_ptr(), + dists.data_ptr(), + idxs.data_ptr(), + N, + P1, + P2, + norm); + })); + } else if (version == 3) { + AT_DISPATCH_FLOATING_TYPES(p1.scalar_type(), "knn_kernel_cuda", ([&] { + DispatchKernel2D< + KNearestNeighborKernelV3Functor, + scalar_t, + V3_MIN_D, + V3_MAX_D, + V3_MIN_K, + V3_MAX_K>( + D, + K_64, + blocks, + threads, + p1.contiguous().data_ptr(), + p2.contiguous().data_ptr(), + lengths1.contiguous().data_ptr(), + lengths2.contiguous().data_ptr(), + dists.data_ptr(), + idxs.data_ptr(), + N, + P1, + P2, + norm); + })); + } + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(idxs, dists); +} + +// ------------------------------------------------------------- // +// Backward Operators // +// ------------------------------------------------------------- // + +// TODO(gkioxari) support all data types once AtomicAdd supports doubles. +// Currently, support is for floats only. +__global__ void KNearestNeighborBackwardKernel( + const float* __restrict__ p1, // (N, P1, D) + const float* __restrict__ p2, // (N, P2, D) + const int64_t* __restrict__ lengths1, // (N,) + const int64_t* __restrict__ lengths2, // (N,) + const int64_t* __restrict__ idxs, // (N, P1, K) + const float* __restrict__ grad_dists, // (N, P1, K) + float* __restrict__ grad_p1, // (N, P1, D) + float* __restrict__ grad_p2, // (N, P2, D) + const size_t N, + const size_t P1, + const size_t P2, + const size_t K, + const size_t D, + const size_t norm) { + const size_t tid = blockIdx.x * blockDim.x + threadIdx.x; + const size_t stride = gridDim.x * blockDim.x; + + for (size_t i = tid; i < N * P1 * K * D; i += stride) { + const size_t n = i / (P1 * K * D); // batch index + size_t rem = i % (P1 * K * D); + const size_t p1_idx = rem / (K * D); // index of point in p1 + rem = rem % (K * D); + const size_t k = rem / D; // k-th nearest neighbor + const size_t d = rem % D; // d-th dimension in the feature vector + + const size_t num1 = lengths1[n]; // number of valid points in p1 in batch + const size_t num2 = lengths2[n]; // number of valid points in p2 in batch + if ((p1_idx < num1) && (k < num2)) { + const float grad_dist = grad_dists[n * P1 * K + p1_idx * K + k]; + // index of point in p2 corresponding to the k-th nearest neighbor + const int64_t p2_idx = idxs[n * P1 * K + p1_idx * K + k]; + // If the index is the pad value of -1 then ignore it + if (p2_idx == -1) { + continue; + } + float diff = 0.0; + if (norm == 1) { + float sign = + (p1[n * P1 * D + p1_idx * D + d] > p2[n * P2 * D + p2_idx * D + d]) + ? 1.0 + : -1.0; + diff = grad_dist * sign; + } else { // norm is 2 + diff = 2.0 * grad_dist * + (p1[n * P1 * D + p1_idx * D + d] - p2[n * P2 * D + p2_idx * D + d]); + } + atomicAdd(grad_p1 + n * P1 * D + p1_idx * D + d, diff); + atomicAdd(grad_p2 + n * P2 * D + p2_idx * D + d, -1.0f * diff); + } + } +} + +std::tuple KNearestNeighborBackwardCuda( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const at::Tensor& idxs, + int norm, + const at::Tensor& grad_dists) { + // Check inputs are on the same device + at::TensorArg p1_t{p1, "p1", 1}, p2_t{p2, "p2", 2}, + lengths1_t{lengths1, "lengths1", 3}, lengths2_t{lengths2, "lengths2", 4}, + idxs_t{idxs, "idxs", 5}, grad_dists_t{grad_dists, "grad_dists", 6}; + at::CheckedFrom c = "KNearestNeighborBackwardCuda"; + at::checkAllSameGPU( + c, {p1_t, p2_t, lengths1_t, lengths2_t, idxs_t, grad_dists_t}); + at::checkAllSameType(c, {p1_t, p2_t, grad_dists_t}); + + // This is nondeterministic because atomicAdd + at::globalContext().alertNotDeterministic("KNearestNeighborBackwardCuda"); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(p1.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const auto N = p1.size(0); + const auto P1 = p1.size(1); + const auto P2 = p2.size(1); + const auto D = p2.size(2); + const auto K = idxs.size(2); + + TORCH_CHECK(p1.size(2) == D, "Point sets must have the same last dimension"); + TORCH_CHECK(idxs.size(0) == N, "KNN idxs must have the same batch dimension"); + TORCH_CHECK( + idxs.size(1) == P1, "KNN idxs must have the same point dimension as p1"); + TORCH_CHECK(grad_dists.size(0) == N); + TORCH_CHECK(grad_dists.size(1) == P1); + TORCH_CHECK(grad_dists.size(2) == K); + + auto grad_p1 = at::zeros({N, P1, D}, p1.options()); + auto grad_p2 = at::zeros({N, P2, D}, p2.options()); + + if (grad_p1.numel() == 0 || grad_p2.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(grad_p1, grad_p2); + } + + const int blocks = 64; + const int threads = 512; + + KNearestNeighborBackwardKernel<<>>( + p1.contiguous().data_ptr(), + p2.contiguous().data_ptr(), + lengths1.contiguous().data_ptr(), + lengths2.contiguous().data_ptr(), + idxs.contiguous().data_ptr(), + grad_dists.contiguous().data_ptr(), + grad_p1.data_ptr(), + grad_p2.data_ptr(), + N, + P1, + P2, + K, + D, + norm); + + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(grad_p1, grad_p2); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/knn/knn.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/knn/knn.h new file mode 100644 index 0000000000000000000000000000000000000000..63f204cd4759229011ca840bd208713ff79ce301 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/knn/knn.h @@ -0,0 +1,161 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include "utils/pytorch3d_cutils.h" + +// Compute indices of K nearest neighbors in pointcloud p2 to points +// in pointcloud p1. +// +// Args: +// p1: FloatTensor of shape (N, P1, D) giving a batch of pointclouds each +// containing P1 points of dimension D. +// p2: FloatTensor of shape (N, P2, D) giving a batch of pointclouds each +// containing P2 points of dimension D. +// lengths1: LongTensor, shape (N,), giving actual length of each P1 cloud. +// lengths2: LongTensor, shape (N,), giving actual length of each P2 cloud. +// norm: int specifying the norm for the distance (1 for L1, 2 for L2) +// K: int giving the number of nearest points to return. +// version: Integer telling which implementation to use. +// +// Returns: +// p1_neighbor_idx: LongTensor of shape (N, P1, K), where +// p1_neighbor_idx[n, i, k] = j means that the kth nearest +// neighbor to p1[n, i] in the cloud p2[n] is p2[n, j]. +// It is padded with zeros so that it can be used easily in a later +// gather() operation. +// +// p1_neighbor_dists: FloatTensor of shape (N, P1, K) containing the squared +// distance from each point p1[n, p, :] to its K neighbors +// p2[n, p1_neighbor_idx[n, p, k], :]. + +// CPU implementation. +std::tuple KNearestNeighborIdxCpu( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const int norm, + const int K); + +// CUDA implementation +std::tuple KNearestNeighborIdxCuda( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const int norm, + const int K, + const int version); + +// Implementation which is exposed. +std::tuple KNearestNeighborIdx( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const int norm, + const int K, + const int version) { + if (p1.is_cuda() || p2.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(p1); + CHECK_CUDA(p2); + return KNearestNeighborIdxCuda( + p1, p2, lengths1, lengths2, norm, K, version); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(p1); + CHECK_CPU(p2); + return KNearestNeighborIdxCpu(p1, p2, lengths1, lengths2, norm, K); +} + +// Compute gradients with respect to p1 and p2 +// +// Args: +// p1: FloatTensor of shape (N, P1, D) giving a batch of pointclouds each +// containing P1 points of dimension D. +// p2: FloatTensor of shape (N, P2, D) giving a batch of pointclouds each +// containing P2 points of dimension D. +// lengths1: LongTensor, shape (N,), giving actual length of each P1 cloud. +// lengths2: LongTensor, shape (N,), giving actual length of each P2 cloud. +// p1_neighbor_idx: LongTensor of shape (N, P1, K), where +// p1_neighbor_idx[n, i, k] = j means that the kth nearest +// neighbor to p1[n, i] in the cloud p2[n] is p2[n, j]. +// It is padded with zeros so that it can be used easily in a later +// gather() operation. This is computed from the forward pass. +// norm: int specifying the norm for the distance (1 for L1, 2 for L2) +// grad_dists: FLoatTensor of shape (N, P1, K) which contains the input +// gradients. +// +// Returns: +// grad_p1: FloatTensor of shape (N, P1, D) containing the output gradients +// wrt p1. +// grad_p2: FloatTensor of shape (N, P2, D) containing the output gradients +// wrt p2. + +// CPU implementation. +std::tuple KNearestNeighborBackwardCpu( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const at::Tensor& idxs, + const int norm, + const at::Tensor& grad_dists); + +// CUDA implementation +std::tuple KNearestNeighborBackwardCuda( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const at::Tensor& idxs, + const int norm, + const at::Tensor& grad_dists); + +// Implementation which is exposed. +std::tuple KNearestNeighborBackward( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const at::Tensor& idxs, + const int norm, + const at::Tensor& grad_dists) { + if (p1.is_cuda() || p2.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(p1); + CHECK_CUDA(p2); + return KNearestNeighborBackwardCuda( + p1, p2, lengths1, lengths2, idxs, norm, grad_dists); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(p1); + CHECK_CPU(p2); + return KNearestNeighborBackwardCpu( + p1, p2, lengths1, lengths2, idxs, norm, grad_dists); +} + +// Utility to check whether a KNN version can be used. +// +// Args: +// version: Integer in the range 0 <= version <= 3 indicating one of our +// KNN implementations. +// D: Number of dimensions for the input and query point clouds +// K: Number of neighbors to be found +// +// Returns: +// Whether the indicated KNN version can be used. +bool KnnCheckVersion(int version, const int64_t D, const int64_t K); diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/knn/knn_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/knn/knn_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9e3153a6669721240c36084a3a7a563dee250a42 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/knn/knn_cpu.cpp @@ -0,0 +1,128 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +std::tuple KNearestNeighborIdxCpu( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const int norm, + const int K) { + const int N = p1.size(0); + const int P1 = p1.size(1); + const int D = p1.size(2); + + auto long_opts = lengths1.options().dtype(torch::kInt64); + torch::Tensor idxs = torch::full({N, P1, K}, 0, long_opts); + torch::Tensor dists = torch::full({N, P1, K}, 0, p1.options()); + + auto p1_a = p1.accessor(); + auto p2_a = p2.accessor(); + auto lengths1_a = lengths1.accessor(); + auto lengths2_a = lengths2.accessor(); + auto idxs_a = idxs.accessor(); + auto dists_a = dists.accessor(); + + for (int n = 0; n < N; ++n) { + const int64_t length1 = lengths1_a[n]; + const int64_t length2 = lengths2_a[n]; + for (int64_t i1 = 0; i1 < length1; ++i1) { + // Use a priority queue to store (distance, index) tuples. + std::priority_queue> q; + for (int64_t i2 = 0; i2 < length2; ++i2) { + float dist = 0; + for (int d = 0; d < D; ++d) { + float diff = p1_a[n][i1][d] - p2_a[n][i2][d]; + if (norm == 1) { + dist += abs(diff); + } else { // norm is 2 (default) + dist += diff * diff; + } + } + int size = static_cast(q.size()); + if (size < K || dist < std::get<0>(q.top())) { + q.emplace(dist, i2); + if (size >= K) { + q.pop(); + } + } + } + while (!q.empty()) { + auto t = q.top(); + q.pop(); + const int k = q.size(); + dists_a[n][i1][k] = std::get<0>(t); + idxs_a[n][i1][k] = std::get<1>(t); + } + } + } + return std::make_tuple(idxs, dists); +} + +// ------------------------------------------------------------- // +// Backward Operators // +// ------------------------------------------------------------- // + +std::tuple KNearestNeighborBackwardCpu( + const at::Tensor& p1, + const at::Tensor& p2, + const at::Tensor& lengths1, + const at::Tensor& lengths2, + const at::Tensor& idxs, + const int norm, + const at::Tensor& grad_dists) { + const int N = p1.size(0); + const int P1 = p1.size(1); + const int D = p1.size(2); + const int P2 = p2.size(1); + const int K = idxs.size(2); + + torch::Tensor grad_p1 = torch::full({N, P1, D}, 0, p1.options()); + torch::Tensor grad_p2 = torch::full({N, P2, D}, 0, p2.options()); + + auto p1_a = p1.accessor(); + auto p2_a = p2.accessor(); + auto lengths1_a = lengths1.accessor(); + auto lengths2_a = lengths2.accessor(); + auto idxs_a = idxs.accessor(); + auto grad_dists_a = grad_dists.accessor(); + auto grad_p1_a = grad_p1.accessor(); + auto grad_p2_a = grad_p2.accessor(); + + for (int n = 0; n < N; ++n) { + const int64_t length1 = lengths1_a[n]; + int64_t length2 = lengths2_a[n]; + length2 = (length2 < K) ? length2 : K; + for (int64_t i1 = 0; i1 < length1; ++i1) { + for (int64_t k = 0; k < length2; ++k) { + const int64_t i2 = idxs_a[n][i1][k]; + // If the index is the pad value of -1 then ignore it + if (i2 == -1) { + continue; + } + for (int64_t d = 0; d < D; ++d) { + float diff = 0.0; + if (norm == 1) { + float sign = (p1_a[n][i1][d] > p2_a[n][i2][d]) ? 1.0 : -1.0; + diff = grad_dists_a[n][i1][k] * sign; + } else { // norm is 2 (default) + diff = 2.0f * grad_dists_a[n][i1][k] * + (p1_a[n][i1][d] - p2_a[n][i2][d]); + } + grad_p1_a[n][i1][d] += diff; + grad_p2_a[n][i2][d] += -1.0f * diff; + } + } + } + } + return std::make_tuple(grad_p1, grad_p2); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.cu new file mode 100644 index 0000000000000000000000000000000000000000..8d05ec80b27fd74122b94660bb4dedff585562cb --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.cu @@ -0,0 +1,565 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include "marching_cubes/tables.h" + +/* +Parallelized marching cubes for pytorch extension +referenced and adapted from CUDA-Samples: +(https://github.com/NVIDIA/cuda-samples/tree/master/Samples/5_Domain_Specific/marchingCubes) +We divide the algorithm into two forward-passes: +(1) The first forward-pass executes "ClassifyVoxelKernel" to +evaluate volume scalar field for each cube and pre-compute +two arrays -- number of vertices per cube (d_voxelVerts) and +occupied or not per cube (d_voxelOccupied). + +Some prepration steps: +With d_voxelOccupied, an exclusive scan is performed to compute +the number of activeVoxels, which can be used to accelerate +computation. With d_voxelVerts, another exclusive scan +is performed to compute the accumulated sum of vertices in the 3d +grid and totalVerts. + +(2) The second forward-pass calls "GenerateFacesKernel" to +generate interpolated vertex positions and face indices by "marching +through" each cube in the grid. + +*/ + +// EPS: Used to indicate if two float values are close +__constant__ const float EPSILON = 1e-5; + +// Linearly interpolate the position where an isosurface cuts an edge +// between two vertices, based on their scalar values +// +// Args: +// isolevel: float value used as threshold +// p1: position of point1 +// p2: position of point2 +// valp1: field value for p1 +// valp2: field value for p2 +// +// Returns: +// point: interpolated verte +// +__device__ float3 +vertexInterp(float isolevel, float3 p1, float3 p2, float valp1, float valp2) { + float ratio; + float3 p; + + if (abs(isolevel - valp1) < EPSILON) { + return p1; + } else if (abs(isolevel - valp2) < EPSILON) { + return p2; + } else if (abs(valp1 - valp2) < EPSILON) { + return p1; + } + + ratio = (isolevel - valp1) / (valp2 - valp1); + + p.x = p1.x * (1 - ratio) + p2.x * ratio; + p.y = p1.y * (1 - ratio) + p2.y * ratio; + p.z = p1.z * (1 - ratio) + p2.z * ratio; + + return p; +} + +// Determine if the triangle is degenerate +// A triangle is degenerate when at least two of the vertices +// share the same position. +// +// Args: +// p1: position of vertex p1 +// p2: position of vertex p2 +// p3: position of vertex p3 +// +// Returns: +// boolean indicator if the triangle is degenerate +__device__ bool isDegenerate(float3 p1, float3 p2, float3 p3) { + if ((abs(p1.x - p2.x) < EPSILON && abs(p1.y - p2.y) < EPSILON && + abs(p1.z - p2.z) < EPSILON) || + (abs(p2.x - p3.x) < EPSILON && abs(p2.y - p3.y) < EPSILON && + abs(p2.z - p3.z) < EPSILON) || + (abs(p3.x - p1.x) < EPSILON && abs(p3.y - p1.y) < EPSILON && + abs(p3.z - p1.z) < EPSILON)) { + return true; + } else { + return false; + } +} + +// Convert from local vertex id to global vertex id, given position +// of the cube where the vertex resides. The function ensures vertices +// shared from adjacent cubes are mapped to the same global id. + +// Args: +// v: local vertex id +// x: x position of the cube where the vertex belongs +// y: y position of the cube where the vertex belongs +// z: z position of the cube where the vertex belongs +// W: width of x dimension +// H: height of y dimension + +// Returns: +// global vertex id represented by its x/y/z offsets +__device__ uint localToGlobal(int v, int x, int y, int z, int W, int H) { + const int dx = v & 1; + const int dy = v >> 1 & 1; + const int dz = v >> 2 & 1; + return (x + dx) + (y + dy) * W + (z + dz) * W * H; +} + +// Hash_combine a pair of global vertex id to a single integer. +// +// Args: +// v1_id: global id of vertex 1 +// v2_id: global id of vertex 2 +// W: width of the 3d grid +// H: height of the 3d grid +// Z: depth of the 3d grid +// +// Returns: +// hashing for a pair of vertex ids +// +__device__ int64_t hashVpair(uint v1_id, uint v2_id, int W, int H, int D) { + return (int64_t)v1_id * (W + W * H + W * H * D) + (int64_t)v2_id; +} + +// precompute number of vertices and occupancy +// for each voxel in the grid. +// +// Args: +// voxelVerts: pointer to device array to store number +// of verts per voxel +// voxelOccupied: pointer to device array to store +// occupancy state per voxel +// vol: torch tensor stored with 3D scalar field +// isolevel: threshold to determine isosurface intersection +// +__global__ void ClassifyVoxelKernel( + at::PackedTensorAccessor32 voxelVerts, + at::PackedTensorAccessor32 voxelOccupied, + const at::PackedTensorAccessor32 vol, + // const at::PackedTensorAccessor + // numVertsTable, + float isolevel) { + const int indexTable[8]{0, 1, 4, 5, 3, 2, 7, 6}; + const uint D = vol.size(0) - 1; + const uint H = vol.size(1) - 1; + const uint W = vol.size(2) - 1; + + // 1-d grid + uint id = blockIdx.x * blockDim.x + threadIdx.x; + uint num_threads = gridDim.x * blockDim.x; + + // Table mapping from cubeindex to number of vertices in the configuration + const unsigned char numVertsTable[256] = { + 0, 3, 3, 6, 3, 6, 6, 9, 3, 6, 6, 9, 6, 9, 9, 6, 3, 6, + 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 3, 6, 6, 9, + 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 6, 9, 12, + 12, 9, 9, 12, 12, 9, 12, 15, 15, 6, 3, 6, 6, 9, 6, 9, 9, 12, + 6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 12, 9, 12, 12, 15, 9, 12, + 12, 15, 12, 15, 15, 12, 6, 9, 9, 12, 9, 12, 6, 9, 9, 12, 12, 15, + 12, 15, 9, 6, 9, 12, 12, 9, 12, 15, 9, 6, 12, 15, 15, 12, 15, 6, + 12, 3, 3, 6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, + 6, 9, 9, 12, 9, 12, 12, 15, 9, 6, 12, 9, 12, 9, 15, 6, 6, 9, + 9, 12, 9, 12, 12, 15, 9, 12, 12, 15, 12, 15, 15, 12, 9, 12, 12, 9, + 12, 15, 15, 12, 12, 9, 15, 6, 15, 12, 6, 3, 6, 9, 9, 12, 9, 12, + 12, 15, 9, 12, 12, 15, 6, 9, 9, 6, 9, 12, 12, 15, 12, 15, 15, 6, + 12, 9, 15, 12, 9, 6, 12, 3, 9, 12, 12, 15, 12, 15, 9, 12, 12, 15, + 15, 6, 9, 12, 6, 3, 6, 9, 9, 6, 9, 12, 6, 3, 9, 6, 12, 3, + 6, 3, 3, 0, + }; + + for (uint tid = id; tid < D * H * W; tid += num_threads) { + // compute global location of the voxel + const int gx = tid % W; + const int gy = tid / W % H; + const int gz = tid / (W * H); + + int cubeindex = 0; + for (int i = 0; i < 8; i++) { + const int dx = i & 1; + const int dy = i >> 1 & 1; + const int dz = i >> 2 & 1; + + const int x = gx + dx; + const int y = gy + dy; + const int z = gz + dz; + + if (vol[z][y][x] < isolevel) { + cubeindex |= 1 << indexTable[i]; + } + } + // collect number of vertices for each voxel + unsigned char numVerts = numVertsTable[cubeindex]; + voxelVerts[tid] = numVerts; + voxelOccupied[tid] = (numVerts > 0); + } +} + +// extract compact voxel array for acceleration +// +// Args: +// compactedVoxelArray: tensor of shape (activeVoxels,) which maps +// from accumulated non-empty voxel index to original 3d grid index +// voxelOccupied: tensor of shape (numVoxels,) which stores +// the occupancy state per voxel +// voxelOccupiedScan: tensor of shape (numVoxels,) which +// stores the accumulated occupied voxel counts +// numVoxels: number of total voxels in the grid +// +__global__ void CompactVoxelsKernel( + at::PackedTensorAccessor32 + compactedVoxelArray, + const at::PackedTensorAccessor32 + voxelOccupied, + const at::PackedTensorAccessor32 + voxelOccupiedScan, + uint numVoxels) { + uint id = blockIdx.x * blockDim.x + threadIdx.x; + uint num_threads = gridDim.x * blockDim.x; + for (uint tid = id; tid < numVoxels; tid += num_threads) { + if (voxelOccupied[tid]) { + compactedVoxelArray[voxelOccupiedScan[tid]] = tid; + } + } +} + +// generate triangles for each voxel using marching cubes +// +// Args: +// verts: torch tensor of shape (V, 3) to store interpolated mesh vertices +// faces: torch tensor of shape (F, 3) to store indices for mesh faces +// ids: torch tensor of shape (V) to store id of each vertex +// compactedVoxelArray: tensor of shape (activeVoxels,) which stores +// non-empty voxel index. +// numVertsScanned: tensor of shape (numVoxels,) which stores accumulated +// vertices count in the voxel +// activeVoxels: number of active voxels used for acceleration +// vol: torch tensor stored with 3D scalar field +// isolevel: threshold to determine isosurface intersection +// +__global__ void GenerateFacesKernel( + at::PackedTensorAccessor32 verts, + at::PackedTensorAccessor faces, + at::PackedTensorAccessor ids, + at::PackedTensorAccessor32 + compactedVoxelArray, + at::PackedTensorAccessor32 + numVertsScanned, + const uint activeVoxels, + const at::PackedTensorAccessor32 vol, + const at::PackedTensorAccessor32 faceTable, + // const at::PackedTensorAccessor32 + // numVertsTable, + const float isolevel) { + uint id = blockIdx.x * blockDim.x + threadIdx.x; + uint num_threads = gridDim.x * blockDim.x; + const int faces_size = faces.size(0); + // Table mapping each edge to the corresponding cube vertices offsets + const int edgeToVertsTable[12][2] = { + {0, 1}, + {1, 5}, + {4, 5}, + {0, 4}, + {2, 3}, + {3, 7}, + {6, 7}, + {2, 6}, + {0, 2}, + {1, 3}, + {5, 7}, + {4, 6}, + }; + + // Table mapping from cubeindex to number of vertices in the configuration + const unsigned char numVertsTable[256] = { + 0, 3, 3, 6, 3, 6, 6, 9, 3, 6, 6, 9, 6, 9, 9, 6, 3, 6, + 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 3, 6, 6, 9, + 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 6, 9, 12, + 12, 9, 9, 12, 12, 9, 12, 15, 15, 6, 3, 6, 6, 9, 6, 9, 9, 12, + 6, 9, 9, 12, 9, 12, 12, 9, 6, 9, 9, 12, 9, 12, 12, 15, 9, 12, + 12, 15, 12, 15, 15, 12, 6, 9, 9, 12, 9, 12, 6, 9, 9, 12, 12, 15, + 12, 15, 9, 6, 9, 12, 12, 9, 12, 15, 9, 6, 12, 15, 15, 12, 15, 6, + 12, 3, 3, 6, 6, 9, 6, 9, 9, 12, 6, 9, 9, 12, 9, 12, 12, 9, + 6, 9, 9, 12, 9, 12, 12, 15, 9, 6, 12, 9, 12, 9, 15, 6, 6, 9, + 9, 12, 9, 12, 12, 15, 9, 12, 12, 15, 12, 15, 15, 12, 9, 12, 12, 9, + 12, 15, 15, 12, 12, 9, 15, 6, 15, 12, 6, 3, 6, 9, 9, 12, 9, 12, + 12, 15, 9, 12, 12, 15, 6, 9, 9, 6, 9, 12, 12, 15, 12, 15, 15, 6, + 12, 9, 15, 12, 9, 6, 12, 3, 9, 12, 12, 15, 12, 15, 9, 12, 12, 15, + 15, 6, 9, 12, 6, 3, 6, 9, 9, 6, 9, 12, 6, 3, 9, 6, 12, 3, + 6, 3, 3, 0, + }; + + for (uint tid = id; tid < activeVoxels; tid += num_threads) { + uint voxel = compactedVoxelArray[tid]; // maps from accumulated id to + // original 3d voxel id + // mapping from offsets to vi index + int indexTable[8]{0, 1, 4, 5, 3, 2, 7, 6}; + // field value for each vertex + float val[8]; + // position for each vertex + float3 p[8]; + // 3d address + const uint D = vol.size(0) - 1; + const uint H = vol.size(1) - 1; + const uint W = vol.size(2) - 1; + + const int gx = voxel % W; + const int gy = voxel / W % H; + const int gz = voxel / (W * H); + + // recalculate cubeindex; + uint cubeindex = 0; + for (int i = 0; i < 8; i++) { + const int dx = i & 1; + const int dy = i >> 1 & 1; + const int dz = i >> 2 & 1; + + const int x = gx + dx; + const int y = gy + dy; + const int z = gz + dz; + + if (vol[z][y][x] < isolevel) { + cubeindex |= 1 << indexTable[i]; + } + val[indexTable[i]] = vol[z][y][x]; // maps from vi to volume + p[indexTable[i]] = make_float3(x, y, z); // maps from vi to position + } + + // Interpolate vertices where the surface intersects the cube + float3 vertlist[12]; + vertlist[0] = vertexInterp(isolevel, p[0], p[1], val[0], val[1]); + vertlist[1] = vertexInterp(isolevel, p[1], p[2], val[1], val[2]); + vertlist[2] = vertexInterp(isolevel, p[3], p[2], val[3], val[2]); + vertlist[3] = vertexInterp(isolevel, p[0], p[3], val[0], val[3]); + + vertlist[4] = vertexInterp(isolevel, p[4], p[5], val[4], val[5]); + vertlist[5] = vertexInterp(isolevel, p[5], p[6], val[5], val[6]); + vertlist[6] = vertexInterp(isolevel, p[7], p[6], val[7], val[6]); + vertlist[7] = vertexInterp(isolevel, p[4], p[7], val[4], val[7]); + + vertlist[8] = vertexInterp(isolevel, p[0], p[4], val[0], val[4]); + vertlist[9] = vertexInterp(isolevel, p[1], p[5], val[1], val[5]); + vertlist[10] = vertexInterp(isolevel, p[2], p[6], val[2], val[6]); + vertlist[11] = vertexInterp(isolevel, p[3], p[7], val[3], val[7]); + + // output triangle faces + uint numVerts = numVertsTable[cubeindex]; + + for (int i = 0; i < numVerts; i++) { + int index = numVertsScanned[voxel] + i; + unsigned char edge = faceTable[cubeindex][i]; + + uint v1 = edgeToVertsTable[edge][0]; + uint v2 = edgeToVertsTable[edge][1]; + uint v1_id = localToGlobal(v1, gx, gy, gz, W + 1, H + 1); + uint v2_id = localToGlobal(v2, gx, gy, gz, W + 1, H + 1); + int64_t edge_id = hashVpair(v1_id, v2_id, W + 1, H + 1, D + 1); + + verts[index][0] = vertlist[edge].x; + verts[index][1] = vertlist[edge].y; + verts[index][2] = vertlist[edge].z; + + if (index < faces_size) { + faces[index][0] = index * 3 + 0; + faces[index][1] = index * 3 + 1; + faces[index][2] = index * 3 + 2; + } + + ids[index] = edge_id; + } + } // end for grid-strided kernel +} + +// ATen/Torch does not have an exclusive-scan operator. Additionally, in the +// code below we need to get the "total number of items to work on" after +// a scan, which with an inclusive-scan would simply be the value of the last +// element in the tensor. +// +// This utility function hits two birds with one stone, by running +// an inclusive-scan into a right-shifted view of a tensor that's +// allocated to be one element bigger than the input tensor. +// +// Note; return tensor is `int64_t` per element, even if the input +// tensor is only 32-bit. Also, the return tensor is one element bigger +// than the input one. +// +// Secondary optional argument is an output argument that gets the +// value of the last element of the return tensor (because you almost +// always need this CPU-side right after this function anyway). +static at::Tensor ExclusiveScanAndTotal( + const at::Tensor& inTensor, + int64_t* optTotal = nullptr) { + const auto inSize = inTensor.sizes()[0]; + auto retTensor = at::zeros({inSize + 1}, at::kLong).to(inTensor.device()); + + using at::indexing::None; + using at::indexing::Slice; + auto rightShiftedView = retTensor.index({Slice(1, None)}); + + // Do an (inclusive-scan) cumulative sum in to the view that's + // shifted one element to the right... + at::cumsum_out(rightShiftedView, inTensor, 0, at::kLong); + + if (optTotal) { + *optTotal = retTensor[inSize].cpu().item(); + } + + // ...so that the not-shifted tensor holds the exclusive-scan + return retTensor; +} + +// Entrance for marching cubes cuda extension. Marching Cubes is an algorithm to +// create triangle meshes from an implicit function (one of the form f(x, y, z) +// = 0). It works by iteratively checking a grid of cubes superimposed over a +// region of the function. The number of faces and positions of the vertices in +// each cube are determined by the the isolevel as well as the volume values +// from the eight vertices of the cube. +// +// We implement this algorithm with two forward passes where the first pass +// checks the occupancy and collects number of vertices for each cube. The +// second pass will skip empty voxels and generate vertices as well as faces for +// each cube through table lookup. The vertex positions, faces and identifiers +// for each vertex will be returned. +// +// +// Args: +// vol: torch tensor of shape (D, H, W) for volume scalar field +// isolevel: threshold to determine isosurface intesection +// +// Returns: +// tuple of : which stores vertex positions, face +// indices and integer identifiers for each vertex. +// verts: (N_verts, 3) FloatTensor for vertex positions +// faces: (N_faces, 3) LongTensor of face indices +// ids: (N_verts,) LongTensor used to identify each vertex. Vertices from +// adjacent edges can share the same 3d position. To reduce memory +// redudancy, we tag each vertex with a unique id for deduplication. In +// contrast to deduping on vertices, this has the benefit to avoid +// floating point precision issues. +// +std::tuple MarchingCubesCuda( + const at::Tensor& vol, + const float isolevel) { + // Set the device for the kernel launch based on the device of vol + at::cuda::CUDAGuard device_guard(vol.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // transfer _FACE_TABLE data to device + at::Tensor face_table_tensor = at::zeros( + {256, 16}, at::TensorOptions().dtype(at::kInt).device(at::kCPU)); + auto face_table_a = face_table_tensor.accessor(); + for (int i = 0; i < 256; i++) { + for (int j = 0; j < 16; j++) { + face_table_a[i][j] = _FACE_TABLE[i][j]; + } + } + at::Tensor faceTable = face_table_tensor.to(vol.device()); + + // get numVoxels + int threads = 128; + const uint D = vol.size(0); + const uint H = vol.size(1); + const uint W = vol.size(2); + const int numVoxels = (D - 1) * (H - 1) * (W - 1); + dim3 grid((numVoxels + threads - 1) / threads, 1, 1); + if (grid.x > 65535) { + grid.x = 65535; + } + + using at::indexing::None; + using at::indexing::Slice; + + auto d_voxelVerts = + at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt)) + .to(vol.device()); + auto d_voxelOccupied = + at::zeros({numVoxels}, at::TensorOptions().dtype(at::kInt)) + .to(vol.device()); + + // Execute "ClassifyVoxelKernel" kernel to precompute + // two arrays - d_voxelOccupied and d_voxelVertices to global memory, + // which stores the occupancy state and number of voxel vertices per voxel. + ClassifyVoxelKernel<<>>( + d_voxelVerts.packed_accessor32(), + d_voxelOccupied.packed_accessor32(), + vol.packed_accessor32(), + isolevel); + AT_CUDA_CHECK(cudaGetLastError()); + cudaDeviceSynchronize(); + + // Scan "d_voxelOccupied" array to generate accumulated voxel occupancy + // count for voxels in the grid and compute the number of active voxels. + // If the number of active voxels is 0, return zero tensor for verts and + // faces. + int64_t activeVoxels = 0; + auto d_voxelOccupiedScan = + ExclusiveScanAndTotal(d_voxelOccupied, &activeVoxels); + + const int device_id = vol.device().index(); + auto opt = at::TensorOptions().dtype(at::kInt).device(at::kCUDA, device_id); + auto opt_long = + at::TensorOptions().dtype(at::kLong).device(at::kCUDA, device_id); + + if (activeVoxels == 0) { + int ntris = 0; + at::Tensor verts = at::zeros({ntris * 3, 3}, vol.options()); + at::Tensor faces = at::zeros({ntris, 3}, opt_long); + at::Tensor ids = at::zeros({ntris}, opt_long); + return std::make_tuple(verts, faces, ids); + } + + // Execute "CompactVoxelsKernel" kernel to compress voxels for acceleration. + // This allows us to run triangle generation on only the occupied voxels. + auto d_compVoxelArray = at::zeros({activeVoxels}, opt); + CompactVoxelsKernel<<>>( + d_compVoxelArray.packed_accessor32(), + d_voxelOccupied.packed_accessor32(), + d_voxelOccupiedScan + .packed_accessor32(), + numVoxels); + AT_CUDA_CHECK(cudaGetLastError()); + cudaDeviceSynchronize(); + + // Scan d_voxelVerts array to generate offsets of vertices for each voxel + int64_t totalVerts = 0; + auto d_voxelVertsScan = ExclusiveScanAndTotal(d_voxelVerts, &totalVerts); + + // Execute "GenerateFacesKernel" kernel + // This runs only on the occupied voxels. + // It looks up the field values and generates the triangle data. + at::Tensor verts = at::zeros({totalVerts, 3}, vol.options()); + at::Tensor faces = at::zeros({totalVerts / 3, 3}, opt_long); + + at::Tensor ids = at::zeros({totalVerts}, opt_long); + + dim3 grid2((activeVoxels + threads - 1) / threads, 1, 1); + if (grid2.x > 65535) { + grid2.x = 65535; + } + + GenerateFacesKernel<<>>( + verts.packed_accessor32(), + faces.packed_accessor(), + ids.packed_accessor(), + d_compVoxelArray.packed_accessor32(), + d_voxelVertsScan.packed_accessor32(), + activeVoxels, + vol.packed_accessor32(), + faceTable.packed_accessor32(), + isolevel); + AT_CUDA_CHECK(cudaGetLastError()); + + return std::make_tuple(verts, faces, ids); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.h new file mode 100644 index 0000000000000000000000000000000000000000..7a425c07653c97898c5f34771b93436642b9e36c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include +#include "utils/pytorch3d_cutils.h" + +// Run Marching Cubes algorithm over a batch of volume scalar fields +// with a pre-defined threshold and return a mesh composed of vertices +// and faces for the mesh. +// +// Args: +// vol: FloatTensor of shape (D, H, W) giving a volume +// scalar grids. +// isolevel: isosurface value to use as the threshoold to determine whether +// the points are within a volume. +// +// Returns: +// vertices: (N_verts, 3) FloatTensor of vertices +// faces: (N_faces, 3) LongTensor of faces +// ids: (N_verts,) LongTensor used to identify each vertex and deduplication +// to avoid floating point precision issues. +// For Cuda, will be used to dedupe redundant vertices. +// For cpp implementation, this tensor is just a placeholder. + +// CPU implementation +std::tuple MarchingCubesCpu( + const at::Tensor& vol, + const float isolevel); + +// CUDA implementation +std::tuple MarchingCubesCuda( + const at::Tensor& vol, + const float isolevel); + +// Implementation which is exposed +inline std::tuple MarchingCubes( + const at::Tensor& vol, + const float isolevel) { + if (vol.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(vol); + const int D = vol.size(0); + const int H = vol.size(1); + const int W = vol.size(2); + if (D > 1024 || H > 1024 || W > 1024) { + AT_ERROR("Maximum volume size allowed 1K x 1K x 1K"); + } + return MarchingCubesCuda(vol.contiguous(), isolevel); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(vol); + return MarchingCubesCpu(vol.contiguous(), isolevel); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fa128e714228fd4c4b699ee13071b752dd8fdf4c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_cpu.cpp @@ -0,0 +1,111 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include "marching_cubes/marching_cubes_utils.h" +#include "marching_cubes/tables.h" + +// Cpu implementation for Marching Cubes +// Args: +// vol: a Tensor of size (D, H, W) corresponding to a 3D scalar field +// isolevel: the isosurface value to use as the threshold to determine +// whether points are within a volume. +// +// Returns: +// vertices: a float tensor of shape (N_verts, 3) for positions of the mesh +// faces: a long tensor of shape (N_faces, 3) for indices of the face +// ids: a long tensor of shape (N_verts) as placeholder +// +std::tuple MarchingCubesCpu( + const at::Tensor& vol, + const float isolevel) { + // volume shapes + const int D = vol.size(0); + const int H = vol.size(1); + const int W = vol.size(2); + + // Create tensor accessors + auto vol_a = vol.accessor(); + // edge_id_to_v maps from an edge id to a vertex position + std::unordered_map edge_id_to_v; + // uniq_edge_id: used to remove redundant edge ids + std::unordered_map uniq_edge_id; + std::vector faces; // store face indices + std::vector verts; // store vertex positions + // enumerate each cell in the 3d grid + for (int z = 0; z < D - 1; z++) { + for (int y = 0; y < H - 1; y++) { + for (int x = 0; x < W - 1; x++) { + Cube cube(x, y, z, vol_a, isolevel); + // Cube is entirely in/out of the surface + if (_FACE_TABLE[cube.cubeindex][0] == 255) { + continue; + } + // store all boundary vertices that intersect with the edges + std::array interp_points; + // triangle vertex IDs and positions + std::vector tri; + std::vector ps; + + // Interpolate the vertices where the surface intersects with the cube + for (int j = 0; _FACE_TABLE[cube.cubeindex][j] != 255; j++) { + const int e = _FACE_TABLE[cube.cubeindex][j]; + interp_points[e] = cube.VertexInterp(isolevel, e, vol_a); + + int64_t edge = cube.HashVpair(e, W, H, D); + tri.push_back(edge); + ps.push_back(interp_points[e]); + + // Check if the triangle face is degenerate. A triangle face + // is degenerate if any of the two verices share the same 3D position + if ((j + 1) % 3 == 0 && ps[0] != ps[1] && ps[1] != ps[2] && + ps[2] != ps[0]) { + for (int k = 0; k < 3; k++) { + int64_t v = tri.at(k); + edge_id_to_v[v] = ps.at(k); + if (!uniq_edge_id.count(v)) { + uniq_edge_id[v] = verts.size(); + verts.push_back(edge_id_to_v[v]); + } + faces.push_back(uniq_edge_id[v]); + } + tri.clear(); + ps.clear(); + } // endif + } // endfor edge enumeration + } // endfor x + } // endfor y + } // endfor z + // Collect returning tensor + const int n_vertices = verts.size(); + const int64_t n_faces = (int64_t)faces.size() / 3; + auto vert_tensor = torch::zeros({n_vertices, 3}, torch::kFloat); + auto id_tensor = torch::zeros({n_vertices}, torch::kInt64); // placeholder + auto face_tensor = torch::zeros({n_faces, 3}, torch::kInt64); + + auto vert_a = vert_tensor.accessor(); + for (int i = 0; i < n_vertices; i++) { + vert_a[i][0] = verts.at(i).x; + vert_a[i][1] = verts.at(i).y; + vert_a[i][2] = verts.at(i).z; + } + + auto face_a = face_tensor.accessor(); + for (int64_t i = 0; i < n_faces; i++) { + face_a[i][0] = faces.at(i * 3 + 0); + face_a[i][1] = faces.at(i * 3 + 1); + face_a[i][2] = faces.at(i * 3 + 2); + } + + return std::make_tuple(vert_tensor, face_tensor, id_tensor); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_utils.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..486e0339eda613f2886bb3165a0fde1d0a5d6bf7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/marching_cubes_utils.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include +#include +#include "ATen/core/TensorAccessor.h" +#include "marching_cubes/tables.h" + +// EPS: Used to assess whether two float values are close +const float EPS = 1e-5; + +// Data structures for the marching cubes +struct Vertex { + // Constructor used when performing marching cube in each cell + explicit Vertex(float x = 0.0f, float y = 0.0f, float z = 0.0f) + : x(x), y(y), z(z) {} + + // The */+ operator overrides are used for vertex interpolation + Vertex operator*(float s) const { + return Vertex(x * s, y * s, z * s); + } + Vertex operator+(const Vertex& xyz) const { + return Vertex(x + xyz.x, y + xyz.y, z + xyz.z); + } + // The =/!= operator overrides is used for checking degenerate triangles + bool operator==(const Vertex& xyz) const { + return ( + std::abs(x - xyz.x) < EPS && std::abs(y - xyz.y) < EPS && + std::abs(z - xyz.z) < EPS); + } + bool operator!=(const Vertex& xyz) const { + return ( + std::abs(x - xyz.x) >= EPS || std::abs(y - xyz.y) >= EPS || + std::abs(z - xyz.z) >= EPS); + } + // vertex position + float x, y, z; +}; + +struct Cube { + // Edge and vertex convention: + // v4_______e4____________v5 + // /| /| + // / | / | + // e7/ | e5/ | + // /___|______e6_________/ | + // v7| | |v6 |e9 + // | | | | + // | |e8 |e10| + // e11| | | | + // | |_________________|___| + // | / v0 e0 | /v1 + // | / | / + // | /e3 | /e1 + // |/_____________________|/ + // v3 e2 v2 + + Vertex p[8]; + int x, y, z; + int cubeindex = 0; + Cube( + int x, + int y, + int z, + const at::TensorAccessor& vol_a, + const float isolevel) + : x(x), y(y), z(z) { + // vertex position (x, y, z) for v0-v1-v4-v5-v3-v2-v7-v6 + for (int v = 0; v < 8; v++) { + p[v] = Vertex(x + (v & 1), y + (v >> 1 & 1), z + (v >> 2 & 1)); + } + // Calculates cube configuration index given values of the cube vertices + for (int i = 0; i < 8; i++) { + const int idx = _INDEX_TABLE[i]; + Vertex v = p[idx]; + if (vol_a[v.z][v.y][v.x] < isolevel) { + cubeindex |= (1 << i); + } + } + } + + // Linearly interpolate the position where an isosurface cuts an edge + // between two vertices, based on their scalar values + // + // Args: + // isolevel: float value used as threshold + // edge: edge (ID) to interpolate + // cube: current cube vertices + // vol_a: 3D scalar field + // + // Returns: + // point: interpolated vertex + Vertex VertexInterp( + float isolevel, + const int edge, + const at::TensorAccessor& vol_a) { + const int v1 = _EDGE_TO_VERTICES[edge][0]; + const int v2 = _EDGE_TO_VERTICES[edge][1]; + Vertex p1 = p[v1]; + Vertex p2 = p[v2]; + float val1 = vol_a[p1.z][p1.y][p1.x]; + float val2 = vol_a[p2.z][p2.y][p2.x]; + + float ratio = 1.0f; + if (std::abs(isolevel - val1) < EPS) { + return p1; + } else if (std::abs(isolevel - val2) < EPS) { + return p2; + } else if (std::abs(val1 - val2) < EPS) { + return p1; + } + // interpolate vertex p based on two vertices on the edge + ratio = (isolevel - val1) / (val2 - val1); + return p1 * (1 - ratio) + p2 * ratio; + } + + // Hash an edge into a global edge_id. The function binds an + // edge with an integer to address floating point precision issue. + // + // Args: + // v1_id: global id of vertex 1 + // v2_id: global id of vertex 2 + // W: width of the 3d grid + // H: height of the 3d grid + // D: depth of the 3d grid + // + // Returns: + // hashing for a pair of vertex ids + // + int64_t HashVpair(const int edge, int W, int H, int D) { + const int v1 = _EDGE_TO_VERTICES[edge][0]; + const int v2 = _EDGE_TO_VERTICES[edge][1]; + const int v1_id = p[v1].x + p[v1].y * W + p[v1].z * W * H; + const int v2_id = p[v2].x + p[v2].y * W + p[v2].z * W * H; + return (int64_t)v1_id * (W + W * H + W * H * D) + (int64_t)v2_id; + } +}; diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/tables.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/tables.h new file mode 100644 index 0000000000000000000000000000000000000000..3aff617c53e5ba963c014cabd7beb1c2cd61a053 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/marching_cubes/tables.h @@ -0,0 +1,294 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +using uint = unsigned int; + +// A table mapping from cubeindex to a list of face configurations. +// Each list contains at most 5 faces, where each face is represented with +// 3 consecutive numbers +// Table adapted from http://paulbourke.net/geometry/polygonise/ +// +#define X 255 +const unsigned char _FACE_TABLE[256][16] = { + {X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 8, 3, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 1, 9, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {1, 8, 3, 9, 8, 1, X, X, X, X, X, X, X, X, X, X}, + {1, 2, 10, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 8, 3, 1, 2, 10, X, X, X, X, X, X, X, X, X, X}, + {9, 2, 10, 0, 2, 9, X, X, X, X, X, X, X, X, X, X}, + {2, 8, 3, 2, 10, 8, 10, 9, 8, X, X, X, X, X, X, X}, + {3, 11, 2, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 11, 2, 8, 11, 0, X, X, X, X, X, X, X, X, X, X}, + {1, 9, 0, 2, 3, 11, X, X, X, X, X, X, X, X, X, X}, + {1, 11, 2, 1, 9, 11, 9, 8, 11, X, X, X, X, X, X, X}, + {3, 10, 1, 11, 10, 3, X, X, X, X, X, X, X, X, X, X}, + {0, 10, 1, 0, 8, 10, 8, 11, 10, X, X, X, X, X, X, X}, + {3, 9, 0, 3, 11, 9, 11, 10, 9, X, X, X, X, X, X, X}, + {9, 8, 10, 10, 8, 11, X, X, X, X, X, X, X, X, X, X}, + {4, 7, 8, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {4, 3, 0, 7, 3, 4, X, X, X, X, X, X, X, X, X, X}, + {0, 1, 9, 8, 4, 7, X, X, X, X, X, X, X, X, X, X}, + {4, 1, 9, 4, 7, 1, 7, 3, 1, X, X, X, X, X, X, X}, + {1, 2, 10, 8, 4, 7, X, X, X, X, X, X, X, X, X, X}, + {3, 4, 7, 3, 0, 4, 1, 2, 10, X, X, X, X, X, X, X}, + {9, 2, 10, 9, 0, 2, 8, 4, 7, X, X, X, X, X, X, X}, + {2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, X, X, X, X}, + {8, 4, 7, 3, 11, 2, X, X, X, X, X, X, X, X, X, X}, + {11, 4, 7, 11, 2, 4, 2, 0, 4, X, X, X, X, X, X, X}, + {9, 0, 1, 8, 4, 7, 2, 3, 11, X, X, X, X, X, X, X}, + {4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, X, X, X, X}, + {3, 10, 1, 3, 11, 10, 7, 8, 4, X, X, X, X, X, X, X}, + {1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, X, X, X, X}, + {4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, X, X, X, X}, + {4, 7, 11, 4, 11, 9, 9, 11, 10, X, X, X, X, X, X, X}, + {9, 5, 4, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {9, 5, 4, 0, 8, 3, X, X, X, X, X, X, X, X, X, X}, + {0, 5, 4, 1, 5, 0, X, X, X, X, X, X, X, X, X, X}, + {8, 5, 4, 8, 3, 5, 3, 1, 5, X, X, X, X, X, X, X}, + {1, 2, 10, 9, 5, 4, X, X, X, X, X, X, X, X, X, X}, + {3, 0, 8, 1, 2, 10, 4, 9, 5, X, X, X, X, X, X, X}, + {5, 2, 10, 5, 4, 2, 4, 0, 2, X, X, X, X, X, X, X}, + {2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, X, X, X, X}, + {9, 5, 4, 2, 3, 11, X, X, X, X, X, X, X, X, X, X}, + {0, 11, 2, 0, 8, 11, 4, 9, 5, X, X, X, X, X, X, X}, + {0, 5, 4, 0, 1, 5, 2, 3, 11, X, X, X, X, X, X, X}, + {2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, X, X, X, X}, + {10, 3, 11, 10, 1, 3, 9, 5, 4, X, X, X, X, X, X, X}, + {4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, X, X, X, X}, + {5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, X, X, X, X}, + {5, 4, 8, 5, 8, 10, 10, 8, 11, X, X, X, X, X, X, X}, + {9, 7, 8, 5, 7, 9, X, X, X, X, X, X, X, X, X, X}, + {9, 3, 0, 9, 5, 3, 5, 7, 3, X, X, X, X, X, X, X}, + {0, 7, 8, 0, 1, 7, 1, 5, 7, X, X, X, X, X, X, X}, + {1, 5, 3, 3, 5, 7, X, X, X, X, X, X, X, X, X, X}, + {9, 7, 8, 9, 5, 7, 10, 1, 2, X, X, X, X, X, X, X}, + {10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, X, X, X, X}, + {8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, X, X, X, X}, + {2, 10, 5, 2, 5, 3, 3, 5, 7, X, X, X, X, X, X, X}, + {7, 9, 5, 7, 8, 9, 3, 11, 2, X, X, X, X, X, X, X}, + {9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, X, X, X, X}, + {2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, X, X, X, X}, + {11, 2, 1, 11, 1, 7, 7, 1, 5, X, X, X, X, X, X, X}, + {9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, X, X, X, X}, + {5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, X}, + {11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, X}, + {11, 10, 5, 7, 11, 5, X, X, X, X, X, X, X, X, X, X}, + {10, 6, 5, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 8, 3, 5, 10, 6, X, X, X, X, X, X, X, X, X, X}, + {9, 0, 1, 5, 10, 6, X, X, X, X, X, X, X, X, X, X}, + {1, 8, 3, 1, 9, 8, 5, 10, 6, X, X, X, X, X, X, X}, + {1, 6, 5, 2, 6, 1, X, X, X, X, X, X, X, X, X, X}, + {1, 6, 5, 1, 2, 6, 3, 0, 8, X, X, X, X, X, X, X}, + {9, 6, 5, 9, 0, 6, 0, 2, 6, X, X, X, X, X, X, X}, + {5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, X, X, X, X}, + {2, 3, 11, 10, 6, 5, X, X, X, X, X, X, X, X, X, X}, + {11, 0, 8, 11, 2, 0, 10, 6, 5, X, X, X, X, X, X, X}, + {0, 1, 9, 2, 3, 11, 5, 10, 6, X, X, X, X, X, X, X}, + {5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, X, X, X, X}, + {6, 3, 11, 6, 5, 3, 5, 1, 3, X, X, X, X, X, X, X}, + {0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, X, X, X, X}, + {3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, X, X, X, X}, + {6, 5, 9, 6, 9, 11, 11, 9, 8, X, X, X, X, X, X, X}, + {5, 10, 6, 4, 7, 8, X, X, X, X, X, X, X, X, X, X}, + {4, 3, 0, 4, 7, 3, 6, 5, 10, X, X, X, X, X, X, X}, + {1, 9, 0, 5, 10, 6, 8, 4, 7, X, X, X, X, X, X, X}, + {10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, X, X, X, X}, + {6, 1, 2, 6, 5, 1, 4, 7, 8, X, X, X, X, X, X, X}, + {1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, X, X, X, X}, + {8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, X, X, X, X}, + {7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, X}, + {3, 11, 2, 7, 8, 4, 10, 6, 5, X, X, X, X, X, X, X}, + {5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, X, X, X, X}, + {0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, X, X, X, X}, + {9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, X}, + {8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, X, X, X, X}, + {5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, X}, + {0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, X}, + {6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, X, X, X, X}, + {10, 4, 9, 6, 4, 10, X, X, X, X, X, X, X, X, X, X}, + {4, 10, 6, 4, 9, 10, 0, 8, 3, X, X, X, X, X, X, X}, + {10, 0, 1, 10, 6, 0, 6, 4, 0, X, X, X, X, X, X, X}, + {8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, X, X, X, X}, + {1, 4, 9, 1, 2, 4, 2, 6, 4, X, X, X, X, X, X, X}, + {3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, X, X, X, X}, + {0, 2, 4, 4, 2, 6, X, X, X, X, X, X, X, X, X, X}, + {8, 3, 2, 8, 2, 4, 4, 2, 6, X, X, X, X, X, X, X}, + {10, 4, 9, 10, 6, 4, 11, 2, 3, X, X, X, X, X, X, X}, + {0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, X, X, X, X}, + {3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, X, X, X, X}, + {6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, X}, + {9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, X, X, X, X}, + {8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, X}, + {3, 11, 6, 3, 6, 0, 0, 6, 4, X, X, X, X, X, X, X}, + {6, 4, 8, 11, 6, 8, X, X, X, X, X, X, X, X, X, X}, + {7, 10, 6, 7, 8, 10, 8, 9, 10, X, X, X, X, X, X, X}, + {0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, X, X, X, X}, + {10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, X, X, X, X}, + {10, 6, 7, 10, 7, 1, 1, 7, 3, X, X, X, X, X, X, X}, + {1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, X, X, X, X}, + {2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, X}, + {7, 8, 0, 7, 0, 6, 6, 0, 2, X, X, X, X, X, X, X}, + {7, 3, 2, 6, 7, 2, X, X, X, X, X, X, X, X, X, X}, + {2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, X, X, X, X}, + {2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, X}, + {1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, X}, + {11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, X, X, X, X}, + {8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, X}, + {0, 9, 1, 11, 6, 7, X, X, X, X, X, X, X, X, X, X}, + {7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, X, X, X, X}, + {7, 11, 6, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {7, 6, 11, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {3, 0, 8, 11, 7, 6, X, X, X, X, X, X, X, X, X, X}, + {0, 1, 9, 11, 7, 6, X, X, X, X, X, X, X, X, X, X}, + {8, 1, 9, 8, 3, 1, 11, 7, 6, X, X, X, X, X, X, X}, + {10, 1, 2, 6, 11, 7, X, X, X, X, X, X, X, X, X, X}, + {1, 2, 10, 3, 0, 8, 6, 11, 7, X, X, X, X, X, X, X}, + {2, 9, 0, 2, 10, 9, 6, 11, 7, X, X, X, X, X, X, X}, + {6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, X, X, X, X}, + {7, 2, 3, 6, 2, 7, X, X, X, X, X, X, X, X, X, X}, + {7, 0, 8, 7, 6, 0, 6, 2, 0, X, X, X, X, X, X, X}, + {2, 7, 6, 2, 3, 7, 0, 1, 9, X, X, X, X, X, X, X}, + {1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, X, X, X, X}, + {10, 7, 6, 10, 1, 7, 1, 3, 7, X, X, X, X, X, X, X}, + {10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, X, X, X, X}, + {0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, X, X, X, X}, + {7, 6, 10, 7, 10, 8, 8, 10, 9, X, X, X, X, X, X, X}, + {6, 8, 4, 11, 8, 6, X, X, X, X, X, X, X, X, X, X}, + {3, 6, 11, 3, 0, 6, 0, 4, 6, X, X, X, X, X, X, X}, + {8, 6, 11, 8, 4, 6, 9, 0, 1, X, X, X, X, X, X, X}, + {9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, X, X, X, X}, + {6, 8, 4, 6, 11, 8, 2, 10, 1, X, X, X, X, X, X, X}, + {1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, X, X, X, X}, + {4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, X, X, X, X}, + {10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, X}, + {8, 2, 3, 8, 4, 2, 4, 6, 2, X, X, X, X, X, X, X}, + {0, 4, 2, 4, 6, 2, X, X, X, X, X, X, X, X, X, X}, + {1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, X, X, X, X}, + {1, 9, 4, 1, 4, 2, 2, 4, 6, X, X, X, X, X, X, X}, + {8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, X, X, X, X}, + {10, 1, 0, 10, 0, 6, 6, 0, 4, X, X, X, X, X, X, X}, + {4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, X}, + {10, 9, 4, 6, 10, 4, X, X, X, X, X, X, X, X, X, X}, + {4, 9, 5, 7, 6, 11, X, X, X, X, X, X, X, X, X, X}, + {0, 8, 3, 4, 9, 5, 11, 7, 6, X, X, X, X, X, X, X}, + {5, 0, 1, 5, 4, 0, 7, 6, 11, X, X, X, X, X, X, X}, + {11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, X, X, X, X}, + {9, 5, 4, 10, 1, 2, 7, 6, 11, X, X, X, X, X, X, X}, + {6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, X, X, X, X}, + {7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, X, X, X, X}, + {3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, X}, + {7, 2, 3, 7, 6, 2, 5, 4, 9, X, X, X, X, X, X, X}, + {9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, X, X, X, X}, + {3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, X, X, X, X}, + {6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, X}, + {9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, X, X, X, X}, + {1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, X}, + {4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, X}, + {7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, X, X, X, X}, + {6, 9, 5, 6, 11, 9, 11, 8, 9, X, X, X, X, X, X, X}, + {3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, X, X, X, X}, + {0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, X, X, X, X}, + {6, 11, 3, 6, 3, 5, 5, 3, 1, X, X, X, X, X, X, X}, + {1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, X, X, X, X}, + {0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, X}, + {11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, X}, + {6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, X, X, X, X}, + {5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, X, X, X, X}, + {9, 5, 6, 9, 6, 0, 0, 6, 2, X, X, X, X, X, X, X}, + {1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, X}, + {1, 5, 6, 2, 1, 6, X, X, X, X, X, X, X, X, X, X}, + {1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, X}, + {10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, X, X, X, X}, + {0, 3, 8, 5, 6, 10, X, X, X, X, X, X, X, X, X, X}, + {10, 5, 6, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {11, 5, 10, 7, 5, 11, X, X, X, X, X, X, X, X, X, X}, + {11, 5, 10, 11, 7, 5, 8, 3, 0, X, X, X, X, X, X, X}, + {5, 11, 7, 5, 10, 11, 1, 9, 0, X, X, X, X, X, X, X}, + {10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, X, X, X, X}, + {11, 1, 2, 11, 7, 1, 7, 5, 1, X, X, X, X, X, X, X}, + {0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, X, X, X, X}, + {9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, X, X, X, X}, + {7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, X}, + {2, 5, 10, 2, 3, 5, 3, 7, 5, X, X, X, X, X, X, X}, + {8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, X, X, X, X}, + {9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, X, X, X, X}, + {9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, X}, + {1, 3, 5, 3, 7, 5, X, X, X, X, X, X, X, X, X, X}, + {0, 8, 7, 0, 7, 1, 1, 7, 5, X, X, X, X, X, X, X}, + {9, 0, 3, 9, 3, 5, 5, 3, 7, X, X, X, X, X, X, X}, + {9, 8, 7, 5, 9, 7, X, X, X, X, X, X, X, X, X, X}, + {5, 8, 4, 5, 10, 8, 10, 11, 8, X, X, X, X, X, X, X}, + {5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, X, X, X, X}, + {0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, X, X, X, X}, + {10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, X}, + {2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, X, X, X, X}, + {0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, X}, + {0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, X}, + {9, 4, 5, 2, 11, 3, X, X, X, X, X, X, X, X, X, X}, + {2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, X, X, X, X}, + {5, 10, 2, 5, 2, 4, 4, 2, 0, X, X, X, X, X, X, X}, + {3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, X}, + {5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, X, X, X, X}, + {8, 4, 5, 8, 5, 3, 3, 5, 1, X, X, X, X, X, X, X}, + {0, 4, 5, 1, 0, 5, X, X, X, X, X, X, X, X, X, X}, + {8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, X, X, X, X}, + {9, 4, 5, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {4, 11, 7, 4, 9, 11, 9, 10, 11, X, X, X, X, X, X, X}, + {0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, X, X, X, X}, + {1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, X, X, X, X}, + {3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, X}, + {4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, X, X, X, X}, + {9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, X}, + {11, 7, 4, 11, 4, 2, 2, 4, 0, X, X, X, X, X, X, X}, + {11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, X, X, X, X}, + {2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, X, X, X, X}, + {9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, X}, + {3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, X}, + {1, 10, 2, 8, 7, 4, X, X, X, X, X, X, X, X, X, X}, + {4, 9, 1, 4, 1, 7, 7, 1, 3, X, X, X, X, X, X, X}, + {4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, X, X, X, X}, + {4, 0, 3, 7, 4, 3, X, X, X, X, X, X, X, X, X, X}, + {4, 8, 7, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {9, 10, 8, 10, 11, 8, X, X, X, X, X, X, X, X, X, X}, + {3, 0, 9, 3, 9, 11, 11, 9, 10, X, X, X, X, X, X, X}, + {0, 1, 10, 0, 10, 8, 8, 10, 11, X, X, X, X, X, X, X}, + {3, 1, 10, 11, 3, 10, X, X, X, X, X, X, X, X, X, X}, + {1, 2, 11, 1, 11, 9, 9, 11, 8, X, X, X, X, X, X, X}, + {3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, X, X, X, X}, + {0, 2, 11, 8, 0, 11, X, X, X, X, X, X, X, X, X, X}, + {3, 2, 11, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {2, 3, 8, 2, 8, 10, 10, 8, 9, X, X, X, X, X, X, X}, + {9, 10, 2, 0, 9, 2, X, X, X, X, X, X, X, X, X, X}, + {2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, X, X, X, X}, + {1, 10, 2, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {1, 3, 8, 9, 1, 8, X, X, X, X, X, X, X, X, X, X}, + {0, 9, 1, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {0, 3, 8, X, X, X, X, X, X, X, X, X, X, X, X, X}, + {X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X}}; +#undef X + +// Table mapping each edge to the corresponding cube vertices offsets +const uint _EDGE_TO_VERTICES[12][2] = { + {0, 1}, + {1, 5}, + {4, 5}, + {0, 4}, + {2, 3}, + {3, 7}, + {6, 7}, + {2, 6}, + {0, 2}, + {1, 3}, + {5, 7}, + {4, 6}, +}; + +// Table mapping from 0-7 to v0-v7 in cube.vertices +const int _INDEX_TABLE[8] = {0, 1, 5, 4, 2, 3, 7, 6}; diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu new file mode 100644 index 0000000000000000000000000000000000000000..94f22c18431bb8bc4557584acdd5894155a17e37 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.cu @@ -0,0 +1,241 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +// Kernel for inputs_packed of shape (F, D), where D > 1 +template +__global__ void PackedToPaddedKernel( + const scalar_t* __restrict__ inputs_packed, + const int64_t* __restrict__ first_idxs, + scalar_t* __restrict__ inputs_padded, + const size_t batch_size, + const size_t max_size, + const size_t num_inputs, + const size_t D) { + // Batch elements split evenly across blocks (num blocks = batch_size) and + // values for each element split across threads in the block. Each thread adds + // the values of its respective input elements to the global inputs_padded + // tensor. + const size_t tid = threadIdx.x; + const size_t batch_idx = blockIdx.x; + + const int64_t start = first_idxs[batch_idx]; + const int64_t end = + batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; + const int num = end - start; + for (size_t f = tid; f < num; f += blockDim.x) { + for (size_t j = 0; j < D; ++j) { + inputs_padded[batch_idx * max_size * D + f * D + j] = + inputs_packed[(start + f) * D + j]; + } + } +} + +// Kernel for inputs of shape (F, 1) +template +__global__ void PackedToPaddedKernelD1( + const scalar_t* __restrict__ inputs_packed, + const int64_t* __restrict__ first_idxs, + scalar_t* __restrict__ inputs_padded, + const size_t batch_size, + const size_t max_size, + const size_t num_inputs) { + // Batch elements split evenly across blocks (num blocks = batch_size) and + // values for each element split across threads in the block. Each thread adds + // the values of its respective input elements to the global inputs_padded + // tensor. + const size_t tid = threadIdx.x; + const size_t batch_idx = blockIdx.x; + + const int64_t start = first_idxs[batch_idx]; + const int64_t end = + batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; + const int num = end - start; + for (size_t f = tid; f < num; f += blockDim.x) { + inputs_padded[batch_idx * max_size + f] = inputs_packed[start + f]; + } +} + +// Kernel for inputs_padded of shape (B, F, D), where D > 1 +template +__global__ void PaddedToPackedKernel( + const scalar_t* __restrict__ inputs_padded, + const int64_t* __restrict__ first_idxs, + scalar_t* __restrict__ inputs_packed, + const size_t batch_size, + const size_t max_size, + const size_t num_inputs, + const size_t D) { + // Batch elements split evenly across blocks (num blocks = batch_size) and + // values for each element split across threads in the block. Each thread adds + // the values of its respective input elements to the global inputs_packed + // tensor. + const size_t tid = threadIdx.x; + const size_t batch_idx = blockIdx.x; + + const int64_t start = first_idxs[batch_idx]; + const int64_t end = + batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; + const int num = end - start; + for (size_t f = tid; f < num; f += blockDim.x) { + for (size_t j = 0; j < D; ++j) { + inputs_packed[(start + f) * D + j] = + inputs_padded[batch_idx * max_size * D + f * D + j]; + } + } +} + +// Kernel for inputs_padded of shape (B, F, 1) +template +__global__ void PaddedToPackedKernelD1( + const scalar_t* __restrict__ inputs_padded, + const int64_t* __restrict__ first_idxs, + scalar_t* __restrict__ inputs_packed, + const size_t batch_size, + const size_t max_size, + const size_t num_inputs) { + // Batch elements split evenly across blocks (num blocks = batch_size) and + // values for each element split across threads in the block. Each thread adds + // the values of its respective input elements to the global inputs_packed + // tensor. + const size_t tid = threadIdx.x; + const size_t batch_idx = blockIdx.x; + + const int64_t start = first_idxs[batch_idx]; + const int64_t end = + batch_idx + 1 < batch_size ? first_idxs[batch_idx + 1] : num_inputs; + const int num = end - start; + for (size_t f = tid; f < num; f += blockDim.x) { + inputs_packed[start + f] = inputs_padded[batch_idx * max_size + f]; + } +} + +at::Tensor PackedToPaddedCuda( + const at::Tensor inputs_packed, + const at::Tensor first_idxs, + const int64_t max_size) { + // Check inputs are on the same device + at::TensorArg inputs_packed_t{inputs_packed, "inputs_packed", 1}, + first_idxs_t{first_idxs, "first_idxs", 2}; + at::CheckedFrom c = "PackedToPaddedCuda"; + at::checkAllSameGPU(c, {inputs_packed_t, first_idxs_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(inputs_packed.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int64_t num_inputs = inputs_packed.size(0); + const int64_t batch_size = first_idxs.size(0); + + TORCH_CHECK( + inputs_packed.dim() == 2, "inputs_packed must be a 2-dimensional tensor"); + const int64_t D = inputs_packed.size(1); + at::Tensor inputs_padded = + at::zeros({batch_size, max_size, D}, inputs_packed.options()); + + if (inputs_padded.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return inputs_padded; + } + + const int threads = 512; + const int blocks = batch_size; + if (D == 1) { + AT_DISPATCH_FLOATING_TYPES( + inputs_packed.scalar_type(), "packed_to_padded_d1_kernel", ([&] { + PackedToPaddedKernelD1<<>>( + inputs_packed.contiguous().data_ptr(), + first_idxs.contiguous().data_ptr(), + inputs_padded.data_ptr(), + batch_size, + max_size, + num_inputs); + })); + } else { + AT_DISPATCH_FLOATING_TYPES( + inputs_packed.scalar_type(), "packed_to_padded_kernel", ([&] { + PackedToPaddedKernel<<>>( + inputs_packed.contiguous().data_ptr(), + first_idxs.contiguous().data_ptr(), + inputs_padded.data_ptr(), + batch_size, + max_size, + num_inputs, + D); + })); + } + + AT_CUDA_CHECK(cudaGetLastError()); + return inputs_padded; +} + +at::Tensor PaddedToPackedCuda( + const at::Tensor inputs_padded, + const at::Tensor first_idxs, + const int64_t num_inputs) { + // Check inputs are on the same device + at::TensorArg inputs_padded_t{inputs_padded, "inputs_padded", 1}, + first_idxs_t{first_idxs, "first_idxs", 2}; + at::CheckedFrom c = "PaddedToPackedCuda"; + at::checkAllSameGPU(c, {inputs_padded_t, first_idxs_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(inputs_padded.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int64_t batch_size = inputs_padded.size(0); + const int64_t max_size = inputs_padded.size(1); + + TORCH_CHECK(batch_size == first_idxs.size(0), "sizes mismatch"); + TORCH_CHECK( + inputs_padded.dim() == 3, + "inputs_padded must be a 3-dimensional tensor"); + const int64_t D = inputs_padded.size(2); + + at::Tensor inputs_packed = + at::zeros({num_inputs, D}, inputs_padded.options()); + + if (inputs_packed.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return inputs_packed; + } + + const int threads = 512; + const int blocks = batch_size; + + if (D == 1) { + AT_DISPATCH_FLOATING_TYPES( + inputs_padded.scalar_type(), "padded_to_packed_d1_kernel", ([&] { + PaddedToPackedKernelD1<<>>( + inputs_padded.contiguous().data_ptr(), + first_idxs.contiguous().data_ptr(), + inputs_packed.data_ptr(), + batch_size, + max_size, + num_inputs); + })); + } else { + AT_DISPATCH_FLOATING_TYPES( + inputs_padded.scalar_type(), "padded_to_packed_kernel", ([&] { + PaddedToPackedKernel<<>>( + inputs_padded.contiguous().data_ptr(), + first_idxs.contiguous().data_ptr(), + inputs_packed.data_ptr(), + batch_size, + max_size, + num_inputs, + D); + })); + } + + AT_CUDA_CHECK(cudaGetLastError()); + return inputs_packed; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..e6c688d75d306012dc2899e460e0977c72ae297a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include "utils/pytorch3d_cutils.h" + +// PackedToPadded +// Converts a packed tensor into a padded tensor, restoring the batch dimension. +// Refer to pytorch3d/structures/meshes.py for details on packed/padded tensors. +// +// Inputs: +// inputs_packed: FloatTensor of shape (F, D), representing the packed batch +// tensor, e.g. areas for faces in a batch of meshes. +// first_idxs: LongTensor of shape (N,) where N is the number of +// elements in the batch and `first_idxs[i] = f` +// means that the inputs for batch element i begin at +// `inputs[f]`. +// max_size: Max length of an element in the batch. +// Returns: +// inputs_padded: FloatTensor of shape (N, max_size, D) where max_size is max +// of `sizes`. The values for batch element i which start at +// `inputs_packed[first_idxs[i]]` will be copied to +// `inputs_padded[i, :]`, with zeros padding out the extra +// inputs. +// + +// PaddedToPacked +// Converts a padded tensor into a packed tensor. +// Refer to pytorch3d/structures/meshes.py for details on packed/padded tensors. +// +// Inputs: +// inputs_padded: FloatTensor of shape (N, max_size, D), representing the +// padded tensor, e.g. areas for faces in a batch of meshes. +// first_idxs: LongTensor of shape (N,) where N is the number of +// elements in the batch and `first_idxs[i] = f` +// means that the inputs for batch element i begin at +// `inputs_packed[f]`. +// num_inputs: Number of packed entries (= F) +// Returns: +// inputs_packed: FloatTensor of shape (F, D), where +// `inputs_packed[first_idx[i]:] = inputs_padded[i, :]`. +// +// + +// Cpu implementation. +at::Tensor PackedToPaddedCpu( + const at::Tensor inputs_packed, + const at::Tensor first_idxs, + const int64_t max_size); + +// Cpu implementation. +at::Tensor PaddedToPackedCpu( + const at::Tensor inputs_padded, + const at::Tensor first_idxs, + const int64_t num_inputs); + +#ifdef WITH_CUDA +// Cuda implementation. +at::Tensor PackedToPaddedCuda( + const at::Tensor inputs_packed, + const at::Tensor first_idxs, + const int64_t max_size); + +// Cuda implementation. +at::Tensor PaddedToPackedCuda( + const at::Tensor inputs_padded, + const at::Tensor first_idxs, + const int64_t num_inputs); +#endif + +// Implementation which is exposed. +at::Tensor PackedToPadded( + const at::Tensor inputs_packed, + const at::Tensor first_idxs, + const int64_t max_size) { + if (inputs_packed.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(inputs_packed); + CHECK_CUDA(first_idxs); + return PackedToPaddedCuda(inputs_packed, first_idxs, max_size); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(inputs_packed); + CHECK_CPU(first_idxs); + return PackedToPaddedCpu(inputs_packed, first_idxs, max_size); +} + +// Implementation which is exposed. +at::Tensor PaddedToPacked( + const at::Tensor inputs_padded, + const at::Tensor first_idxs, + const int64_t num_inputs) { + if (inputs_padded.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(inputs_padded); + CHECK_CUDA(first_idxs); + return PaddedToPackedCuda(inputs_padded, first_idxs, num_inputs); +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(inputs_padded); + CHECK_CPU(first_idxs); + return PaddedToPackedCpu(inputs_padded, first_idxs, num_inputs); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c9176a1afd5e6736f938dc938dfc1d62c1052ddc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/packed_to_padded_tensor/packed_to_padded_tensor_cpu.cpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include + +at::Tensor PackedToPaddedCpu( + const at::Tensor inputs_packed, + const at::Tensor first_idxs, + const int64_t max_size) { + const int64_t num_inputs = inputs_packed.size(0); + const int64_t batch_size = first_idxs.size(0); + + AT_ASSERTM( + inputs_packed.dim() == 2, "inputs_packed must be a 2-dimensional tensor"); + const int64_t D = inputs_packed.size(1); + + torch::Tensor inputs_padded = + torch::zeros({batch_size, max_size, D}, inputs_packed.options()); + + auto inputs_packed_a = inputs_packed.accessor(); + auto first_idxs_a = first_idxs.accessor(); + auto inputs_padded_a = inputs_padded.accessor(); + + for (int b = 0; b < batch_size; ++b) { + const int64_t start = first_idxs_a[b]; + const int64_t end = b + 1 < batch_size ? first_idxs_a[b + 1] : num_inputs; + const int64_t num = end - start; + for (int i = 0; i < num; ++i) { + for (int j = 0; j < D; ++j) { + inputs_padded_a[b][i][j] = inputs_packed_a[start + i][j]; + } + } + } + return inputs_padded; +} + +at::Tensor PaddedToPackedCpu( + const at::Tensor inputs_padded, + const at::Tensor first_idxs, + const int64_t num_inputs) { + const int64_t batch_size = inputs_padded.size(0); + + AT_ASSERTM( + inputs_padded.dim() == 3, "inputs_padded must be a 3-dimensional tensor"); + const int64_t D = inputs_padded.size(2); + + torch::Tensor inputs_packed = + torch::zeros({num_inputs, D}, inputs_padded.options()); + + auto inputs_padded_a = inputs_padded.accessor(); + auto first_idxs_a = first_idxs.accessor(); + auto inputs_packed_a = inputs_packed.accessor(); + + for (int b = 0; b < batch_size; ++b) { + const int64_t start = first_idxs_a[b]; + const int64_t end = b + 1 < batch_size ? first_idxs_a[b + 1] : num_inputs; + const int64_t num = end - start; + for (int i = 0; i < num; ++i) { + for (int j = 0; j < D; ++j) { + inputs_packed_a[start + i][j] = inputs_padded_a[b][i][j]; + } + } + } + return inputs_packed; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/constants.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/constants.h new file mode 100644 index 0000000000000000000000000000000000000000..a2eee6217158d3a2e7a3e92a52e5afa4107494ab --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/constants.h @@ -0,0 +1,19 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_CONSTANTS_H_ +#define PULSAR_NATIVE_CONSTANTS_H_ + +#define EPS 1E-6 +#define FEPS 1E-6f +#define MAX_FLOAT 3.4E38f +#define MAX_INT 2147483647 +#define MAX_UINT 4294967295u +#define MAX_USHORT 65535u + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/global.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/global.h new file mode 100644 index 0000000000000000000000000000000000000000..88656a005a3e5fb700d4b58be84a2818166cb4cd --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/global.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_GLOBAL_H +#define PULSAR_GLOBAL_H + +#include "./constants.h" +#ifndef WIN32 +#include +#endif + +#if defined(_WIN64) || defined(_WIN32) +using uint = unsigned int; +using ushort = unsigned short; +#endif + +#include "./logging.h" // <- include before torch/extension.h + +#define MAX_GRAD_SPHERES 128 + +#ifdef __CUDACC__ +#define INLINE __forceinline__ +#define HOST __host__ +#define DEVICE __device__ +#define GLOBAL __global__ +#define RESTRICT __restrict__ +#define DEBUGBREAK() +#ifdef __NVCC_DIAG_PRAGMA_SUPPORT__ +#pragma nv_diag_suppress 1866 +#pragma nv_diag_suppress 2941 +#pragma nv_diag_suppress 2951 +#pragma nv_diag_suppress 2967 +#else +#if !defined(USE_ROCM) +#pragma diag_suppress = attribute_not_allowed +#pragma diag_suppress = 1866 +#pragma diag_suppress = 2941 +#pragma diag_suppress = 2951 +#pragma diag_suppress = 2967 +#endif //! USE_ROCM +#endif +#else // __CUDACC__ +#define INLINE inline +#define HOST +#define DEVICE +#define GLOBAL +#define RESTRICT +#define DEBUGBREAK() std::raise(SIGINT) +// Don't care about pytorch warnings; they shouldn't clutter our warnings. +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Weverything" +#include +#pragma clang diagnostic pop +#ifdef WITH_CUDA +#include +#if !defined(USE_ROCM) +#include +#endif //! USE_ROCM +#else +#ifndef cudaStream_t +typedef void* cudaStream_t; +#endif +struct int2 { + int x, y; +}; +struct ushort2 { + unsigned short x, y; +}; +struct float2 { + float x, y; +}; +struct float3 { + float x, y, z; +}; +inline float3 make_float3(const float& x, const float& y, const float& z) { + float3 res; + res.x = x; + res.y = y; + res.z = z; + return res; +} +#endif +namespace py = pybind11; + +inline bool operator==(const float3& a, const float3& b) { + return a.x == b.x && a.y == b.y && a.z == b.z; +} +#endif // __CUDACC__ +#define IHD INLINE HOST DEVICE + +// An assertion command that can be used on host and device. +#ifdef PULSAR_ASSERTIONS +#ifdef __CUDACC__ +#define PASSERT(VAL) \ + if (!(VAL)) { \ + printf( \ + "Pulsar assertion failed in %s, line %d: %s.\n", \ + __FILE__, \ + __LINE__, \ + #VAL); \ + } +#else +#define PASSERT(VAL) \ + if (!(VAL)) { \ + printf( \ + "Pulsar assertion failed in %s, line %d: %s.\n", \ + __FILE__, \ + __LINE__, \ + #VAL); \ + std::raise(SIGINT); \ + } +#endif +#else +#define PASSERT(VAL) +#endif + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/README.md b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/README.md new file mode 100644 index 0000000000000000000000000000000000000000..60c5d07cba3b8d403693e9aa3db2a0b74f66c472 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/README.md @@ -0,0 +1,5 @@ +# CUDA device compilation units + +This folder contains `.cu` files to create compilation units +for device-specific functions. See `../include/README.md` for +more information. diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/commands.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/commands.h new file mode 100644 index 0000000000000000000000000000000000000000..73dc8263a3f94a67232f9742c630bd4f2b3077af --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/commands.h @@ -0,0 +1,513 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_CUDA_COMMANDS_H_ +#define PULSAR_NATIVE_CUDA_COMMANDS_H_ + +// Definitions for GPU commands. +#include +#include +namespace cg = cooperative_groups; + +#ifdef __DRIVER_TYPES_H__ +#ifndef DEVICE_RESET +#define DEVICE_RESET cudaDeviceReset(); +#endif +#else +#ifndef DEVICE_RESET +#define DEVICE_RESET +#endif +#endif + +#define HANDLECUDA(CMD) CMD +// handleCudaError((CMD), __FILE__, __LINE__) +inline void +handleCudaError(const cudaError_t err, const char* file, const int line) { + if (err != cudaSuccess) { +#ifndef __NVCC__ + fprintf( + stderr, + "%s(%i) : getLastCudaError() CUDA error :" + " (%d) %s.\n", + file, + line, + static_cast(err), + cudaGetErrorString(err)); + DEVICE_RESET + exit(1); +#endif + } +} +inline void +getLastCudaError(const char* errorMessage, const char* file, const int line) { + cudaError_t err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "Error: %s.", errorMessage); + handleCudaError(err, file, line); + } +} + +#define ALIGN(VAL) __align__(VAL) +#define SYNC() HANDLECUDE(cudaDeviceSynchronize()) +#define THREADFENCE_B() __threadfence_block() +#define SHFL_SYNC(a, b, c) __shfl_sync((a), (b), (c)) +#define SHARED __shared__ +#define ACTIVEMASK() __activemask() +#define BALLOT(mask, val) __ballot_sync((mask), val) + +/* TODO (ROCM-6.2): None of the WARP_* are used anywhere and ROCM-6.2 natively + * supports __shfl_*. Disabling until the move to ROCM-6.2. + */ +#if !defined(USE_ROCM) +/** + * Find the cumulative sum within a warp up to the current + * thread lane, with each mask thread contributing base. + */ +template +DEVICE T +WARP_CUMSUM(const cg::coalesced_group& group, const uint& mask, const T& base) { + T ret = base; + T shfl_val; + shfl_val = __shfl_down_sync(mask, ret, 1u); // Deactivate the rightmost lane. + ret += (group.thread_rank() < 31) * shfl_val; + shfl_val = __shfl_down_sync(mask, ret, 2u); + ret += (group.thread_rank() < 30) * shfl_val; + shfl_val = __shfl_down_sync(mask, ret, 4u); // ...4 + ret += (group.thread_rank() < 28) * shfl_val; + shfl_val = __shfl_down_sync(mask, ret, 8u); // ...8 + ret += (group.thread_rank() < 24) * shfl_val; + shfl_val = __shfl_down_sync(mask, ret, 16u); // ...16 + ret += (group.thread_rank() < 16) * shfl_val; + return ret; +} + +template +DEVICE T +WARP_MAX(const cg::coalesced_group& group, const uint& mask, const T& base) { + T ret = base; + ret = max(ret, __shfl_down_sync(mask, ret, 16u)); + ret = max(ret, __shfl_down_sync(mask, ret, 8u)); + ret = max(ret, __shfl_down_sync(mask, ret, 4u)); + ret = max(ret, __shfl_down_sync(mask, ret, 2u)); + ret = max(ret, __shfl_down_sync(mask, ret, 1u)); + return ret; +} + +template +DEVICE T +WARP_SUM(const cg::coalesced_group& group, const uint& mask, const T& base) { + T ret = base; + ret = ret + __shfl_down_sync(mask, ret, 16u); + ret = ret + __shfl_down_sync(mask, ret, 8u); + ret = ret + __shfl_down_sync(mask, ret, 4u); + ret = ret + __shfl_down_sync(mask, ret, 2u); + ret = ret + __shfl_down_sync(mask, ret, 1u); + return ret; +} + +INLINE DEVICE float3 WARP_SUM_FLOAT3( + const cg::coalesced_group& group, + const uint& mask, + const float3& base) { + float3 ret = base; + ret.x = WARP_SUM(group, mask, base.x); + ret.y = WARP_SUM(group, mask, base.y); + ret.z = WARP_SUM(group, mask, base.z); + return ret; +} +#endif //! USE_ROCM + +// Floating point. +// #define FMUL(a, b) __fmul_rn((a), (b)) +#define FMUL(a, b) ((a) * (b)) +#define FDIV(a, b) __fdiv_rn((a), (b)) +// #define FSUB(a, b) __fsub_rn((a), (b)) +#define FSUB(a, b) ((a) - (b)) +#define FADD(a, b) __fadd_rn((a), (b)) +#define FSQRT(a) __fsqrt_rn(a) +#define FEXP(a) fasterexp(a) +#define FLN(a) fasterlog(a) +#define FPOW(a, b) __powf((a), (b)) +#define FMAX(a, b) fmax((a), (b)) +#define FMIN(a, b) fmin((a), (b)) +#define FCEIL(a) ceilf(a) +#define FFLOOR(a) floorf(a) +#define FROUND(x) nearbyintf(x) +#define FSATURATE(x) __saturatef(x) +#define FABS(a) abs(a) +#define IASF(a, loc) (loc) = __int_as_float(a) +#define FASI(a, loc) (loc) = __float_as_int(a) +#define FABSLEQAS(a, b, c) \ + ((a) <= (b) ? FSUB((b), (a)) <= (c) : FSUB((a), (b)) < (c)) +/** Calculates x*y+z. */ +#define FMA(x, y, z) __fmaf_rn((x), (y), (z)) +#define I2F(a) __int2float_rn(a) +#define FRCP(x) __frcp_rn(x) +#if !defined(USE_ROCM) +__device__ static float atomicMax(float* address, float val) { + int* address_as_i = (int*)address; + int old = *address_as_i, assumed; + do { + assumed = old; + old = ::atomicCAS( + address_as_i, + assumed, + __float_as_int(::fmaxf(val, __int_as_float(assumed)))); + } while (assumed != old); + return __int_as_float(old); +} +__device__ static float atomicMin(float* address, float val) { + int* address_as_i = (int*)address; + int old = *address_as_i, assumed; + do { + assumed = old; + old = ::atomicCAS( + address_as_i, + assumed, + __float_as_int(::fminf(val, __int_as_float(assumed)))); + } while (assumed != old); + return __int_as_float(old); +} +#endif //! USE_ROCM +#define DMAX(a, b) FMAX(a, b) +#define DMIN(a, b) FMIN(a, b) +#define DSQRT(a) sqrt(a) +#define DSATURATE(a) DMIN(1., DMAX(0., (a))) +// half +#define HADD(a, b) __hadd((a), (b)) +#define HSUB2(a, b) __hsub2((a), (b)) +#define HMUL2(a, b) __hmul2((a), (b)) +#define HSQRT(a) hsqrt(a) + +// uint. +#define CLZ(VAL) __clz(VAL) +#define POPC(a) __popc(a) +// +// +// +// +// +// +// +// +// +#define ATOMICADD(PTR, VAL) atomicAdd((PTR), (VAL)) +#define ATOMICADD_F3(PTR, VAL) \ + ATOMICADD(&((PTR)->x), VAL.x); \ + ATOMICADD(&((PTR)->y), VAL.y); \ + ATOMICADD(&((PTR)->z), VAL.z); +#if (CUDART_VERSION >= 10000) && (__CUDA_ARCH__ >= 600) +#define ATOMICADD_B(PTR, VAL) atomicAdd_block((PTR), (VAL)) +#else +#define ATOMICADD_B(PTR, VAL) ATOMICADD(PTR, VAL) +#endif +// +// +// +// +// int. +#define IMIN(a, b) min((a), (b)) +#define IMAX(a, b) max((a), (b)) +#define IABS(a) abs(a) + +// Checks. +// like TORCH_CHECK_ARG in PyTorch > 1.10 +#define ARGCHECK(cond, argN, ...) \ + TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__) + +// Math. +#define NORM3DF(x, y, z) norm3df(x, y, z) +#define RNORM3DF(x, y, z) rnorm3df(x, y, z) + +// High level. +#define GET_SORT_WS_SIZE(RES_PTR, KEY_TYPE, VAL_TYPE, NUM_OBJECTS) \ + cub::DeviceRadixSort::SortPairsDescending( \ + (void*)NULL, \ + *(RES_PTR), \ + reinterpret_cast(NULL), \ + reinterpret_cast(NULL), \ + reinterpret_cast(NULL), \ + reinterpret_cast(NULL), \ + (NUM_OBJECTS)); +#define GET_REDUCE_WS_SIZE(RES_PTR, TYPE, REDUCE_OP, NUM_OBJECTS) \ + { \ + TYPE init = TYPE(); \ + cub::DeviceReduce::Reduce( \ + (void*)NULL, \ + *(RES_PTR), \ + (TYPE*)NULL, \ + (TYPE*)NULL, \ + (NUM_OBJECTS), \ + (REDUCE_OP), \ + init); \ + } +#define GET_SELECT_WS_SIZE( \ + RES_PTR, TYPE_SELECTOR, TYPE_SELECTION, NUM_OBJECTS) \ + { \ + cub::DeviceSelect::Flagged( \ + (void*)NULL, \ + *(RES_PTR), \ + (TYPE_SELECTION*)NULL, \ + (TYPE_SELECTOR*)NULL, \ + (TYPE_SELECTION*)NULL, \ + (int*)NULL, \ + (NUM_OBJECTS)); \ + } +#define GET_SUM_WS_SIZE(RES_PTR, TYPE_SUM, NUM_OBJECTS) \ + { \ + cub::DeviceReduce::Sum( \ + (void*)NULL, \ + *(RES_PTR), \ + (TYPE_SUM*)NULL, \ + (TYPE_SUM*)NULL, \ + NUM_OBJECTS); \ + } +#define GET_MM_WS_SIZE(RES_PTR, TYPE, NUM_OBJECTS) \ + { \ + TYPE init = TYPE(); \ + cub::DeviceReduce::Max( \ + (void*)NULL, *(RES_PTR), (TYPE*)NULL, (TYPE*)NULL, (NUM_OBJECTS)); \ + } +#define SORT_DESCENDING( \ + TMPN1, SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS) \ + void* TMPN1 = NULL; \ + size_t TMPN1##_bytes = 0; \ + cub::DeviceRadixSort::SortPairsDescending( \ + TMPN1, \ + TMPN1##_bytes, \ + (SORT_PTR), \ + (SORTED_PTR), \ + (VAL_PTR), \ + (VAL_SORTED_PTR), \ + (NUM_OBJECTS)); \ + HANDLECUDA(cudaMalloc(&TMPN1, TMPN1##_bytes)); \ + cub::DeviceRadixSort::SortPairsDescending( \ + TMPN1, \ + TMPN1##_bytes, \ + (SORT_PTR), \ + (SORTED_PTR), \ + (VAL_PTR), \ + (VAL_SORTED_PTR), \ + (NUM_OBJECTS)); \ + HANDLECUDA(cudaFree(TMPN1)); +#define SORT_DESCENDING_WS( \ + TMPN1, \ + SORT_PTR, \ + SORTED_PTR, \ + VAL_PTR, \ + VAL_SORTED_PTR, \ + NUM_OBJECTS, \ + WORKSPACE_PTR, \ + WORKSPACE_BYTES) \ + cub::DeviceRadixSort::SortPairsDescending( \ + (WORKSPACE_PTR), \ + (WORKSPACE_BYTES), \ + (SORT_PTR), \ + (SORTED_PTR), \ + (VAL_PTR), \ + (VAL_SORTED_PTR), \ + (NUM_OBJECTS)); +#define SORT_ASCENDING_WS( \ + SORT_PTR, \ + SORTED_PTR, \ + VAL_PTR, \ + VAL_SORTED_PTR, \ + NUM_OBJECTS, \ + WORKSPACE_PTR, \ + WORKSPACE_BYTES, \ + STREAM) \ + cub::DeviceRadixSort::SortPairs( \ + (WORKSPACE_PTR), \ + (WORKSPACE_BYTES), \ + (SORT_PTR), \ + (SORTED_PTR), \ + (VAL_PTR), \ + (VAL_SORTED_PTR), \ + (NUM_OBJECTS), \ + 0, \ + sizeof(*(SORT_PTR)) * 8, \ + (STREAM)); +#define SUM_WS( \ + SUM_PTR, OUT_PTR, NUM_OBJECTS, WORKSPACE_PTR, WORKSPACE_BYTES, STREAM) \ + cub::DeviceReduce::Sum( \ + (WORKSPACE_PTR), \ + (WORKSPACE_BYTES), \ + (SUM_PTR), \ + (OUT_PTR), \ + (NUM_OBJECTS), \ + (STREAM)); +#define MIN_WS( \ + MIN_PTR, OUT_PTR, NUM_OBJECTS, WORKSPACE_PTR, WORKSPACE_BYTES, STREAM) \ + cub::DeviceReduce::Min( \ + (WORKSPACE_PTR), \ + (WORKSPACE_BYTES), \ + (MIN_PTR), \ + (OUT_PTR), \ + (NUM_OBJECTS), \ + (STREAM)); +#define MAX_WS( \ + MAX_PTR, OUT_PTR, NUM_OBJECTS, WORKSPACE_PTR, WORKSPACE_BYTES, STREAM) \ + cub::DeviceReduce::Min( \ + (WORKSPACE_PTR), \ + (WORKSPACE_BYTES), \ + (MAX_PTR), \ + (OUT_PTR), \ + (NUM_OBJECTS), \ + (STREAM)); +// +// +// +// TODO: rewrite using nested contexts instead of temporary names. +#define REDUCE(REDUCE_PTR, RESULT_PTR, NUM_ITEMS, REDUCE_OP, REDUCE_INIT) \ + cub::DeviceReduce::Reduce( \ + TMPN1, \ + TMPN1##_bytes, \ + (REDUCE_PTR), \ + (RESULT_PTR), \ + (NUM_ITEMS), \ + (REDUCE_OP), \ + (REDUCE_INIT)); \ + HANDLECUDA(cudaMalloc(&TMPN1, TMPN1##_bytes)); \ + cub::DeviceReduce::Reduce( \ + TMPN1, \ + TMPN1##_bytes, \ + (REDUCE_PTR), \ + (RESULT_PTR), \ + (NUM_ITEMS), \ + (REDUCE_OP), \ + (REDUCE_INIT)); \ + HANDLECUDA(cudaFree(TMPN1)); +#define REDUCE_WS( \ + REDUCE_PTR, \ + RESULT_PTR, \ + NUM_ITEMS, \ + REDUCE_OP, \ + REDUCE_INIT, \ + WORKSPACE_PTR, \ + WORSPACE_BYTES, \ + STREAM) \ + cub::DeviceReduce::Reduce( \ + (WORKSPACE_PTR), \ + (WORSPACE_BYTES), \ + (REDUCE_PTR), \ + (RESULT_PTR), \ + (NUM_ITEMS), \ + (REDUCE_OP), \ + (REDUCE_INIT), \ + (STREAM)); +#define SELECT_FLAGS_WS( \ + FLAGS_PTR, \ + ITEM_PTR, \ + OUT_PTR, \ + NUM_SELECTED_PTR, \ + NUM_ITEMS, \ + WORKSPACE_PTR, \ + WORSPACE_BYTES, \ + STREAM) \ + cub::DeviceSelect::Flagged( \ + (WORKSPACE_PTR), \ + (WORSPACE_BYTES), \ + (ITEM_PTR), \ + (FLAGS_PTR), \ + (OUT_PTR), \ + (NUM_SELECTED_PTR), \ + (NUM_ITEMS), \ + (STREAM)); + +#define COPY_HOST_DEV(PTR_D, PTR_H, TYPE, SIZE) \ + HANDLECUDA(cudaMemcpy( \ + (PTR_D), (PTR_H), sizeof(TYPE) * (SIZE), cudaMemcpyHostToDevice)) +#define COPY_DEV_HOST(PTR_H, PTR_D, TYPE, SIZE) \ + HANDLECUDA(cudaMemcpy( \ + (PTR_H), (PTR_D), sizeof(TYPE) * (SIZE), cudaMemcpyDeviceToHost)) +#define COPY_DEV_DEV(PTR_T, PTR_S, TYPE, SIZE) \ + HANDLECUDA(cudaMemcpy( \ + (PTR_T), (PTR_S), sizeof(TYPE) * (SIZE), cudaMemcpyDeviceToDevice)) +// +// We *must* use cudaMallocManaged for pointers on device that should +// interact with pytorch. However, this comes at a significant speed penalty. +// We're using plain CUDA pointers for the rendering operations and +// explicitly copy results to managed pointers wrapped for pytorch (see +// pytorch/util.h). +#define MALLOC(VAR, TYPE, SIZE) cudaMalloc(&(VAR), sizeof(TYPE) * (SIZE)) +#define FREE(PTR) HANDLECUDA(cudaFree(PTR)) +#define MEMSET(VAR, VAL, TYPE, SIZE, STREAM) \ + HANDLECUDA(cudaMemsetAsync((VAR), (VAL), sizeof(TYPE) * (SIZE), (STREAM))) + +#define LAUNCH_MAX_PARALLEL_1D(FUNC, N, STREAM, ...) \ + { \ + int64_t max_threads = \ + at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; \ + uint num_threads = min((N), max_threads); \ + uint num_blocks = iDivCeil((N), num_threads); \ + FUNC<<>>(__VA_ARGS__); \ + } +#define LAUNCH_PARALLEL_1D(FUNC, N, TN, STREAM, ...) \ + { \ + uint num_threads = min(static_cast(N), static_cast(TN)); \ + uint num_blocks = iDivCeil((N), num_threads); \ + FUNC<<>>(__VA_ARGS__); \ + } +#define LAUNCH_MAX_PARALLEL_2D(FUNC, NX, NY, STREAM, ...) \ + { \ + int64_t max_threads = \ + at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock; \ + int64_t max_threads_sqrt = static_cast(sqrt(max_threads)); \ + dim3 num_threads, num_blocks; \ + num_threads.x = min((NX), max_threads_sqrt); \ + num_blocks.x = iDivCeil((NX), num_threads.x); \ + num_threads.y = min((NY), max_threads_sqrt); \ + num_blocks.y = iDivCeil((NY), num_threads.y); \ + num_threads.z = 1; \ + num_blocks.z = 1; \ + FUNC<<>>(__VA_ARGS__); \ + } +#define LAUNCH_PARALLEL_2D(FUNC, NX, NY, TX, TY, STREAM, ...) \ + { \ + dim3 num_threads, num_blocks; \ + num_threads.x = min((NX), (TX)); \ + num_blocks.x = iDivCeil((NX), num_threads.x); \ + num_threads.y = min((NY), (TY)); \ + num_blocks.y = iDivCeil((NY), num_threads.y); \ + num_threads.z = 1; \ + num_blocks.z = 1; \ + FUNC<<>>(__VA_ARGS__); \ + } + +#define GET_PARALLEL_IDX_1D(VARNAME, N) \ + const uint VARNAME = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; \ + if (VARNAME >= (N)) { \ + return; \ + } +#define GET_PARALLEL_IDS_2D(VAR_X, VAR_Y, WIDTH, HEIGHT) \ + const uint VAR_X = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; \ + const uint VAR_Y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y; \ + if (VAR_X >= (WIDTH) || VAR_Y >= (HEIGHT)) \ + return; +#define END_PARALLEL() +#define END_PARALLEL_NORET() +#define END_PARALLEL_2D_NORET() +#define END_PARALLEL_2D() +#define RETURN_PARALLEL() return +#define CHECKLAUNCH() C10_CUDA_CHECK(cudaGetLastError()); +#define ISONDEVICE true +#define SYNCDEVICE() HANDLECUDA(cudaDeviceSynchronize()) +#define START_TIME(TN) \ + cudaEvent_t __time_start_##TN, __time_stop_##TN; \ + cudaEventCreate(&__time_start_##TN); \ + cudaEventCreate(&__time_stop_##TN); \ + cudaEventRecord(__time_start_##TN); +#define STOP_TIME(TN) cudaEventRecord(__time_stop_##TN); +#define GET_TIME(TN, TOPTR) \ + cudaEventSynchronize(__time_stop_##TN); \ + cudaEventElapsedTime((TOPTR), __time_start_##TN, __time_stop_##TN); +#define START_TIME_CU(TN) START_TIME(CN) +#define STOP_TIME_CU(TN) STOP_TIME(TN) +#define GET_TIME_CU(TN, TOPTR) GET_TIME(TN, TOPTR) + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.backward.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.backward.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..e0da7b7020c0a3f5ae0647030282adf0e0103d39 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.backward.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.backward.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.backward_dbg.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.backward_dbg.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..a95bb421d2d9b6bfec1a9286e035b042b0d9842c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.backward_dbg.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.backward_dbg.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.calc_gradients.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.calc_gradients.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..ff38b08e0dfe46e65a94039c8dec7da721d0421a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.calc_gradients.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.calc_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.calc_signature.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.calc_signature.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..81c72192eaa877038d9383cfdd0adf9a91e06f97 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.calc_signature.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.calc_signature.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.construct.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.construct.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..67583511aec2a6bd4dd8670aeb809939a3d2e19c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.construct.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.construct.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.create_selector.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.create_selector.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..52e265bcb2ab8ca9e4d08d90d1dc4fef75294520 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.create_selector.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.create_selector.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.destruct.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.destruct.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..e61be93fa4c4893e6c4800f71cf49ef81c717ff0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.destruct.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.destruct.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.fill_bg.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.fill_bg.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..6c7b1a48b675b1dbe69992c81a8cbb8c8861911e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.fill_bg.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.fill_bg.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.forward.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.forward.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..bfb42debeeaa7444daec94a88830c39825239170 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.forward.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.forward.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.norm_cam_gradients.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.norm_cam_gradients.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..93d666324a4973f44ad4becbeecaf34e0c7b96e5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.norm_cam_gradients.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.norm_cam_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.norm_sphere_gradients.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.norm_sphere_gradients.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..65339caea11645e4b7ba99a0af77c21b4ae2f738 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.norm_sphere_gradients.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.norm_sphere_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.render.gpu.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.render.gpu.cu new file mode 100644 index 0000000000000000000000000000000000000000..eb46adbafbc1c2a60dfb21fa9ce222828e53e31b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/gpu/renderer.render.gpu.cu @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.render.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/README.md b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/README.md new file mode 100644 index 0000000000000000000000000000000000000000..34f1bade9134da24f4038425c4b50fe1fffc45dc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/README.md @@ -0,0 +1,5 @@ +# Device-specific host compilation units + +This folder contains `.cpp` files to create compilation units +for device specific functions. See `../include/README.md` for +more information. diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/commands.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/commands.h new file mode 100644 index 0000000000000000000000000000000000000000..a48eaaa901d557874ed84e92751da76a5bcbf6c3 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/commands.h @@ -0,0 +1,391 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_COMMANDS_H_ +#define PULSAR_NATIVE_COMMANDS_H_ + +#ifdef _MSC_VER +#include +#define __builtin_popcount (int)__popcnt +#endif + +// Definitions for CPU commands. +// #include +// #include + +namespace cg { +struct coalesced_group { + INLINE uint thread_rank() const { + return 0u; + } + INLINE uint size() const { + return 1u; + } + INLINE uint ballot(uint val) const { + return static_cast(val > 0); + } +}; + +struct thread_block { + INLINE uint thread_rank() const { + return 0u; + } + INLINE uint size() const { + return 1u; + } + INLINE void sync() const {} +}; + +INLINE coalesced_group coalesced_threads() { + coalesced_group ret; + return ret; +} + +INLINE thread_block this_thread_block() { + thread_block ret; + return ret; +} +} // namespace cg +#define SHFL_SYNC(a, b, c) (b) +template +T WARP_CUMSUM( + const cg::coalesced_group& group, + const uint& mask, + const T& base) { + return base; +} + +template +DEVICE T +WARP_MAX(const cg::coalesced_group& group, const uint& mask, const T& base) { + return base; +} + +template +DEVICE T +WARP_SUM(const cg::coalesced_group& group, const uint& mask, const T& base) { + return base; +} + +INLINE DEVICE float3 WARP_SUM_FLOAT3( + const cg::coalesced_group& group, + const uint& mask, + const float3& base) { + return base; +} + +#define ACTIVEMASK() (1u << 31) +#define ALIGN(VAL) +#define SYNC() +#define THREADFENCE_B() +#define BALLOT(mask, val) (val != 0) +#define SHARED +// Floating point. +#define FMAX(a, b) std::fmax((a), (b)) +#define FMIN(a, b) std::fmin((a), (b)) +INLINE float atomicMax(float* address, float val) { + *address = std::max(*address, val); + return *address; +} +INLINE float atomicMin(float* address, float val) { + *address = std::min(*address, val); + return *address; +} +#define FMUL(a, b) ((a) * (b)) +#define FDIV(a, b) ((a) / (b)) +#define FSUB(a, b) ((a) - (b)) +#define FABSLEQAS(a, b, c) \ + ((a) <= (b) ? FSUB((b), (a)) <= (c) : FSUB((a), (b)) < (c)) +#define FADD(a, b) ((a) + (b)) +#define FSQRT(a) sqrtf(a) +#define FEXP(a) fasterexp(a) +#define FLN(a) fasterlog(a) +#define FPOW(a, b) powf((a), (b)) +#define FROUND(x) roundf(x) +#define FCEIL(a) ceilf(a) +#define FFLOOR(a) floorf(a) +#define FSATURATE(x) std::max(0.f, std::min(1.f, x)) +#define FABS(a) abs(a) +#define FMA(x, y, z) ((x) * (y) + (z)) +#define I2F(a) static_cast(a) +#define FRCP(x) (1.f / (x)) +#define IASF(x, loc) memcpy(&(loc), &(x), sizeof(x)) +#define FASI(x, loc) memcpy(&(loc), &(x), sizeof(x)) +#define DMAX(a, b) std::max((a), (b)) +#define DMIN(a, b) std::min((a), (b)) +#define DSATURATE(a) DMIN(1., DMAX(0., (a))) +#define DSQRT(a) sqrt(a) +// +// +// +// +// +// +// +// +// +// +// +// +// uint. +#define CLZ(VAL) _clz(VAL) +template +INLINE T ATOMICADD(T* address, T val) { + T old = *address; + *address += val; + return old; +} +template +INLINE void ATOMICADD_F3(T* address, T val) { + ATOMICADD(&(address->x), val.x); + ATOMICADD(&(address->y), val.y); + ATOMICADD(&(address->z), val.z); +} +#define ATOMICADD_B(a, b) ATOMICADD((a), (b)) +#define POPC(a) __builtin_popcount(a) + +// int. +#define IMIN(a, b) std::min((a), (b)) +#define IMAX(a, b) std::max((a), (b)) +#define IABS(a) abs(a) + +// Checks. +// like TORCH_CHECK_ARG in PyTorch > 1.10 +#define ARGCHECK(cond, argN, ...) \ + TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__) + +// Math. +#define NORM3DF(x, y, z) sqrtf(x* x + y * y + z * z) +#define RNORM3DF(x, y, z) (1.f / sqrtf(x * x + y * y + z * z)) + +// High level. +#define PREFETCH(PTR) +#define GET_SORT_WS_SIZE(RES_PTR, KEY_TYPE, VAL_TYPE, NUM_OBJECTS) \ + *(RES_PTR) = 0; +#define GET_REDUCE_WS_SIZE(RES_PTR, TYPE, REDUCE_OP, NUM_OBJECTS) \ + *(RES_PTR) = 0; +#define GET_SELECT_WS_SIZE( \ + RES_PTR, TYPE_SELECTOR, TYPE_SELECTION, NUM_OBJECTS) \ + *(RES_PTR) = 0; +#define GET_SUM_WS_SIZE(RES_PTR, TYPE_SUM, NUM_OBJECTS) *(RES_PTR) = 0; +#define GET_MM_WS_SIZE(RES_PTR, TYPE, NUM_OBJECTS) *(RES_PTR) = 0; + +#define SORT_DESCENDING( \ + TMPN1, SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS) \ + std::vector TMPN1(NUM_OBJECTS); \ + std::iota(TMPN1.begin(), TMPN1.end(), 0); \ + const auto TMPN1##_val_ptr = (SORT_PTR); \ + std::sort( \ + TMPN1.begin(), TMPN1.end(), [&TMPN1##_val_ptr](size_t i1, size_t i2) { \ + return TMPN1##_val_ptr[i1] > TMPN1##_val_ptr[i2]; \ + }); \ + for (int i = 0; i < (NUM_OBJECTS); ++i) { \ + (SORTED_PTR)[i] = (SORT_PTR)[TMPN1[i]]; \ + } \ + for (int i = 0; i < (NUM_OBJECTS); ++i) { \ + (VAL_SORTED_PTR)[i] = (VAL_PTR)[TMPN1[i]]; \ + } + +#define SORT_ASCENDING( \ + SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS, STREAM) \ + { \ + std::vector TMPN1(NUM_OBJECTS); \ + std::iota(TMPN1.begin(), TMPN1.end(), 0); \ + const auto TMPN1_val_ptr = (SORT_PTR); \ + std::sort( \ + TMPN1.begin(), \ + TMPN1.end(), \ + [&TMPN1_val_ptr](size_t i1, size_t i2) -> bool { \ + return TMPN1_val_ptr[i1] < TMPN1_val_ptr[i2]; \ + }); \ + for (int i = 0; i < (NUM_OBJECTS); ++i) { \ + (SORTED_PTR)[i] = (SORT_PTR)[TMPN1[i]]; \ + } \ + for (int i = 0; i < (NUM_OBJECTS); ++i) { \ + (VAL_SORTED_PTR)[i] = (VAL_PTR)[TMPN1[i]]; \ + } \ + } + +#define SORT_DESCENDING_WS( \ + TMPN1, \ + SORT_PTR, \ + SORTED_PTR, \ + VAL_PTR, \ + VAL_SORTED_PTR, \ + NUM_OBJECTS, \ + WORSPACE_PTR, \ + WORKSPACE_SIZE) \ + SORT_DESCENDING( \ + TMPN1, SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS) + +#define SORT_ASCENDING_WS( \ + SORT_PTR, \ + SORTED_PTR, \ + VAL_PTR, \ + VAL_SORTED_PTR, \ + NUM_OBJECTS, \ + WORSPACE_PTR, \ + WORKSPACE_SIZE, \ + STREAM) \ + SORT_ASCENDING( \ + SORT_PTR, SORTED_PTR, VAL_PTR, VAL_SORTED_PTR, NUM_OBJECTS, STREAM) + +#define REDUCE(REDUCE_PTR, RESULT_PTR, NUM_ITEMS, REDUCE_OP, REDUCE_INIT) \ + { \ + *(RESULT_PTR) = (REDUCE_INIT); \ + for (int i = 0; i < (NUM_ITEMS); ++i) { \ + *(RESULT_PTR) = REDUCE_OP(*(RESULT_PTR), (REDUCE_PTR)[i]); \ + } \ + } +#define REDUCE_WS( \ + REDUCE_PTR, \ + RESULT_PTR, \ + NUM_ITEMS, \ + REDUCE_OP, \ + REDUCE_INIT, \ + WORKSPACE_PTR, \ + WORKSPACE_SIZE, \ + STREAM) \ + REDUCE(REDUCE_PTR, RESULT_PTR, NUM_ITEMS, REDUCE_OP, REDUCE_INIT) + +#define SELECT_FLAGS_WS( \ + FLAGS_PTR, \ + ITEM_PTR, \ + OUT_PTR, \ + NUM_SELECTED_PTR, \ + NUM_ITEMS, \ + WORKSPACE_PTR, \ + WORSPACE_BYTES, \ + STREAM) \ + { \ + *NUM_SELECTED_PTR = 0; \ + ptrdiff_t write_pos = 0; \ + for (int i = 0; i < NUM_ITEMS; ++i) { \ + if (FLAGS_PTR[i]) { \ + OUT_PTR[write_pos++] = ITEM_PTR[i]; \ + *NUM_SELECTED_PTR += 1; \ + } \ + } \ + } + +template +void SUM_WS( + T* SUM_PTR, + T* OUT_PTR, + size_t NUM_OBJECTS, + char* WORKSPACE_PTR, + size_t WORKSPACE_BYTES, + cudaStream_t STREAM) { + *(OUT_PTR) = T(); + for (int i = 0; i < (NUM_OBJECTS); ++i) { + *(OUT_PTR) = *(OUT_PTR) + (SUM_PTR)[i]; + } +} + +template +void MIN_WS( + T* MIN_PTR, + T* OUT_PTR, + size_t NUM_OBJECTS, + char* WORKSPACE_PTR, + size_t WORKSPACE_BYTES, + cudaStream_t STREAM) { + *(OUT_PTR) = T(); + for (int i = 0; i < (NUM_OBJECTS); ++i) { + *(OUT_PTR) = std::min(*(OUT_PTR), (MIN_PTR)[i]); + } +} + +template +void MAX_WS( + T* MAX_PTR, + T* OUT_PTR, + size_t NUM_OBJECTS, + char* WORKSPACE_PTR, + size_t WORKSPACE_BYTES, + cudaStream_t STREAM) { + *(OUT_PTR) = T(); + for (int i = 0; i < (NUM_OBJECTS); ++i) { + *(OUT_PTR) = std::max(*(OUT_PTR), (MAX_PTR)[i]); + } +} +// +// +// +// +#define COPY_HOST_DEV(PTR_D, PTR_H, TYPE, SIZE) \ + std::memcpy((PTR_D), (PTR_H), sizeof(TYPE) * (SIZE)) +// +#define COPY_DEV_HOST(PTR_H, PTR_D, TYPE, SIZE) \ + std::memcpy((PTR_H), (PTR_D), sizeof(TYPE) * (SIZE)) +// +#define COPY_DEV_DEV(PTR_T, PTR_S, TYPE, SIZE) \ + std::memcpy((PTR_T), (PTR_S), sizeof(TYPE) * SIZE) +// + +#define MALLOC(VAR, TYPE, SIZE) MALLOC_HOST(VAR, TYPE, SIZE) +#define FREE(PTR) FREE_HOST(PTR) +#define MEMSET(VAR, VAL, TYPE, SIZE, STREAM) \ + memset((VAR), (VAL), sizeof(TYPE) * (SIZE)) +// + +#define LAUNCH_MAX_PARALLEL_1D(FUNC, N, STREAM, ...) FUNC(__VA_ARGS__); +#define LAUNCH_PARALLEL_1D(FUNC, N, TN, STREAM, ...) FUNC(__VA_ARGS__); +#define LAUNCH_MAX_PARALLEL_2D(FUNC, NX, NY, STREAM, ...) FUNC(__VA_ARGS__); +#define LAUNCH_PARALLEL_2D(FUNC, NX, NY, TX, TY, STREAM, ...) FUNC(__VA_ARGS__); +// +// +// +// +// +#define GET_PARALLEL_IDX_1D(VARNAME, N) \ + for (uint VARNAME = 0; VARNAME < (N); ++VARNAME) { +#define GET_PARALLEL_IDS_2D(VAR_X, VAR_Y, WIDTH, HEIGHT) \ + int2 blockDim; \ + blockDim.x = 1; \ + blockDim.y = 1; \ + uint __parallel_2d_width = WIDTH; \ + uint __parallel_2d_height = HEIGHT; \ + for (uint VAR_Y = 0; VAR_Y < __parallel_2d_height; ++(VAR_Y)) { \ + for (uint VAR_X = 0; VAR_X < __parallel_2d_width; ++(VAR_X)) { +// +// +// +#define END_PARALLEL() \ + end_parallel :; \ + } +#define END_PARALLEL_NORET() } +#define END_PARALLEL_2D() \ + end_parallel :; \ + } \ + } +#define END_PARALLEL_2D_NORET() \ + } \ + } +#define RETURN_PARALLEL() goto end_parallel; +#define CHECKLAUNCH() +#define ISONDEVICE false +#define SYNCDEVICE() +#define START_TIME(TN) \ + auto __time_start_##TN = std::chrono::steady_clock::now(); +#define STOP_TIME(TN) auto __time_stop_##TN = std::chrono::steady_clock::now(); +#define GET_TIME(TN, TOPTR) \ + *TOPTR = std::chrono::duration_cast( \ + __time_stop_##TN - __time_start_##TN) \ + .count() +#define START_TIME_CU(TN) \ + cudaEvent_t __time_start_##TN, __time_stop_##TN; \ + cudaEventCreate(&__time_start_##TN); \ + cudaEventCreate(&__time_stop_##TN); \ + cudaEventRecord(__time_start_##TN); +#define STOP_TIME_CU(TN) cudaEventRecord(__time_stop_##TN); +#define GET_TIME_CU(TN, TOPTR) \ + cudaEventSynchronize(__time_stop_##TN); \ + cudaEventElapsedTime((TOPTR), __time_start_##TN, __time_stop_##TN); + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e0da7b7020c0a3f5ae0647030282adf0e0103d39 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.backward.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward_dbg.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward_dbg.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a95bb421d2d9b6bfec1a9286e035b042b0d9842c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.backward_dbg.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.backward_dbg.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_gradients.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_gradients.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ff38b08e0dfe46e65a94039c8dec7da721d0421a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_gradients.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.calc_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_signature.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_signature.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..81c72192eaa877038d9383cfdd0adf9a91e06f97 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.calc_signature.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.calc_signature.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.construct.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.construct.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..67583511aec2a6bd4dd8670aeb809939a3d2e19c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.construct.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.construct.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.create_selector.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.create_selector.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..52e265bcb2ab8ca9e4d08d90d1dc4fef75294520 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.create_selector.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.create_selector.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.destruct.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.destruct.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e61be93fa4c4893e6c4800f71cf49ef81c717ff0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.destruct.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.destruct.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.fill_bg.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.fill_bg.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c7b1a48b675b1dbe69992c81a8cbb8c8861911e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.fill_bg.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.fill_bg.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.forward.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.forward.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..bfb42debeeaa7444daec94a88830c39825239170 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.forward.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.forward.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_cam_gradients.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_cam_gradients.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..93d666324a4973f44ad4becbeecaf34e0c7b96e5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_cam_gradients.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.norm_cam_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_sphere_gradients.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_sphere_gradients.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..65339caea11645e4b7ba99a0af77c21b4ae2f738 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.norm_sphere_gradients.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.norm_sphere_gradients.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.render.cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.render.cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..eb46adbafbc1c2a60dfb21fa9ce222828e53e31b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/host/renderer.render.cpu.cpp @@ -0,0 +1,9 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "../include/renderer.render.instantiate.h" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/README.md b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e963ff043abdbbf88af350512f60fb70a02a4774 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/README.md @@ -0,0 +1,16 @@ +# The `include` folder + +This folder contains header files with implementations of several useful +algorithms. These implementations are usually done in files called `x.device.h` +and use macros that route every device specific command to the right +implementation (see `commands.h`). + +If you're using a device specific implementation, include `x.device.h`. +This gives you the high-speed, device specific implementation that lets +you work with all the details of the datastructure. All function calls are +inlined. If you need to work with the high-level interface and be able to +dynamically pick a device, only include `x.h`. The functions there are +templated with a boolean `DEV` flag and are instantiated in device specific +compilation units. You will not be able to use any other functions, but can +use `func(params)` to work on a CUDA device, or `func(params)` +to work on the host. diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/camera.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/camera.device.h new file mode 100644 index 0000000000000000000000000000000000000000..73b9a80c8298fdf308228a516ea341632d2eee58 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/camera.device.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_CAMERA_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_CAMERA_DEVICE_H_ + +#include "../global.h" +#include "./camera.h" +#include "./commands.h" + +namespace pulsar { +IHD CamGradInfo::CamGradInfo(int x) { + cam_pos = make_float3(0.f, 0.f, 0.f); + pixel_0_0_center = make_float3(0.f, 0.f, 0.f); + pixel_dir_x = make_float3(0.f, 0.f, 0.f); + pixel_dir_y = make_float3(0.f, 0.f, 0.f); +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/camera.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/camera.h new file mode 100644 index 0000000000000000000000000000000000000000..e67d5fd0739d00d0bf702a38bba654fc0cc62021 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/camera.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_CAMERA_H_ +#define PULSAR_NATIVE_INCLUDE_CAMERA_H_ + +#include +#include "../global.h" + +namespace pulsar { +/** + * Everything that's needed to raycast with our camera model. + */ +struct CamInfo { + float3 eye; /** Position in world coordinates. */ + float3 pixel_0_0_center; /** LUC center of pixel position in world + coordinates. */ + float3 pixel_dir_x; /** Direction for increasing x for one pixel to the next, + * in world coordinates. */ + float3 pixel_dir_y; /** Direction for increasing y for one pixel to the next, + * in world coordinates. */ + float3 sensor_dir_z; /** Normalized direction vector from eye through the + * sensor in z direction (optical axis). */ + float half_pixel_size; /** Half size of a pixel, in world coordinates. This + * must be consistent with pixel_dir_x and pixel_dir_y! + */ + float focal_length; /** The focal length, if applicable. */ + uint aperture_width; /** Full image width in px, possibly not fully used + * in case of a shifted principal point. */ + uint aperture_height; /** Full image height in px, possibly not fully used + * in case of a shifted principal point. */ + uint film_width; /** Resulting image width. */ + uint film_height; /** Resulting image height. */ + /** The top left coordinates (inclusive) of the film in the full aperture. */ + uint film_border_left, film_border_top; + int32_t principal_point_offset_x; /** Horizontal principal point offset. */ + int32_t principal_point_offset_y; /** Vertical principal point offset. */ + float min_dist; /** Minimum distance for a ball to be rendered. */ + float max_dist; /** Maximum distance for a ball to be rendered. */ + float norm_fac; /** 1 / (max_dist - min_dist), pre-computed. */ + /** The depth where to place the background, in normalized coordinates where + * 0. is the backmost depth and 1. the frontmost. */ + float background_normalization_depth; + /** The number of image content channels to use. Usually three. */ + uint n_channels; + /** Whether to use an orthogonal instead of a perspective projection. */ + bool orthogonal_projection; + /** Whether to use a right-handed system (inverts the z axis). */ + bool right_handed; +}; + +inline bool operator==(const CamInfo& a, const CamInfo& b) { + return a.film_width == b.film_width && a.film_height == b.film_height && + a.background_normalization_depth == b.background_normalization_depth && + a.n_channels == b.n_channels && + a.orthogonal_projection == b.orthogonal_projection && + a.right_handed == b.right_handed; +}; + +struct CamGradInfo { + HOST DEVICE CamGradInfo(int = 0); + float3 cam_pos; + float3 pixel_0_0_center; + float3 pixel_dir_x; + float3 pixel_dir_y; +}; + +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/closest_sphere_tracker.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/closest_sphere_tracker.device.h new file mode 100644 index 0000000000000000000000000000000000000000..a533dd0048e7f624af7c14a4017b19fde3accff5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/closest_sphere_tracker.device.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_CLOSEST_SPHERE_TRACKER_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_CLOSEST_SPHERE_TRACKER_DEVICE_H_ + +#include "../global.h" + +namespace pulsar { +namespace Renderer { + +/** + * A facility to track the closest spheres to the camera. + * + * Their max number is defined by MAX_GRAD_SPHERES (this is defined in + * `pulsar/native/global.h`). This is done to keep the performance as high as + * possible because this struct needs to do updates continuously on the GPU. + */ +struct ClosestSphereTracker { + public: + IHD ClosestSphereTracker(const int& n_track) : n_hits(0), n_track(n_track) { + PASSERT(n_track < MAX_GRAD_SPHERES); + // Initialize the sphere IDs to -1 and the weights to 0. + for (int i = 0; i < n_track; ++i) { + this->most_important_sphere_ids[i] = -1; + this->closest_sphere_intersection_depths[i] = MAX_FLOAT; + } + }; + + IHD void track( + const uint& sphere_idx, + const float& intersection_depth, + const uint& coord_x, + const uint& coord_y) { + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_TRACKER_PIX, + "tracker|tracking sphere %u (depth: %f).\n", + sphere_idx, + intersection_depth); + for (int i = IMIN(this->n_hits, n_track) - 1; i >= -1; --i) { + if (i < 0 || + this->closest_sphere_intersection_depths[i] < intersection_depth) { + // Write position is i+1. + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_TRACKER_PIX, + "tracker|determined writing position: %d.\n", + i + 1); + if (i + 1 < n_track) { + // Shift every other sphere back. + for (int j = n_track - 1; j > i + 1; --j) { + this->closest_sphere_intersection_depths[j] = + this->closest_sphere_intersection_depths[j - 1]; + this->most_important_sphere_ids[j] = + this->most_important_sphere_ids[j - 1]; + } + this->closest_sphere_intersection_depths[i + 1] = intersection_depth; + this->most_important_sphere_ids[i + 1] = sphere_idx; + } + break; + } + } +#if PULSAR_LOG_TRACKER_PIX + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_TRACKER_PIX, + "tracker|sphere list after adding sphere %u:\n", + sphere_idx); + for (int i = 0; i < n_track; ++i) { + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_TRACKER_PIX, + "tracker|sphere %d: %d (depth: %f).\n", + i, + this->most_important_sphere_ids[i], + this->closest_sphere_intersection_depths[i]); + } +#endif // PULSAR_LOG_TRACKER_PIX + this->n_hits += 1; + } + + /** + * Get the number of hits registered. + */ + IHD int get_n_hits() const { + return this->n_hits; + } + + /** + * Get the idx closest sphere ID. + * + * For example, get_closest_sphere_id(0) gives the overall closest + * sphere id. + * + * This method is implemented for highly optimized scenarios and will *not* + * perform an index check at runtime if assertions are disabled. idx must be + * >=0 and < IMIN(n_hits, n_track) for a valid result, if it is >= + * n_hits it will return -1. + */ + IHD int get_closest_sphere_id(const int& idx) { + PASSERT(idx >= 0 && idx < n_track); + return this->most_important_sphere_ids[idx]; + } + + /** + * Get the idx closest sphere normalized_depth. + * + * For example, get_closest_sphere_depth(0) gives the overall closest + * sphere depth (normalized). + * + * This method is implemented for highly optimized scenarios and will *not* + * perform an index check at runtime if assertions are disabled. idx must be + * >=0 and < IMIN(n_hits, n_track) for a valid result, if it is >= + * n_hits it will return 1. + FEPS. + */ + IHD float get_closest_sphere_depth(const int& idx) { + PASSERT(idx >= 0 && idx < n_track); + return this->closest_sphere_intersection_depths[idx]; + } + + private: + /** The number of registered hits so far. */ + int n_hits; + /** The number of intersections to track. Must be (malloc(sizeof(TYPE) * (SIZE))) +#define FREE_HOST(PTR) free(PTR) + +/* Include command definitions depending on CPU or GPU use. */ + +#ifdef __CUDACC__ +// TODO: find out which compiler we're using here and use the suppression. +// #pragma push +// #pragma diag_suppress = 68 +#include +// #pragma pop +#include "../gpu/commands.h" +#else +#pragma clang diagnostic push +#pragma clang diagnostic ignored "-Weverything" +#pragma clang diagnostic pop +#include "../host/commands.h" +#endif + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/fastermath.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/fastermath.h new file mode 100644 index 0000000000000000000000000000000000000000..cae598f9c0a7f903b502702dcb62173c8841a3b8 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/fastermath.h @@ -0,0 +1,88 @@ +#ifndef PULSAR_NATIVE_INCLUDE_FASTERMATH_H_ +#define PULSAR_NATIVE_INCLUDE_FASTERMATH_H_ + +// @lint-ignore-every LICENSELINT +/*=====================================================================* + * Copyright (C) 2011 Paul Mineiro * + * All rights reserved. * + * * + * Redistribution and use in source and binary forms, with * + * or without modification, are permitted provided that the * + * following conditions are met: * + * * + * * Redistributions of source code must retain the * + * above copyright notice, this list of conditions and * + * the following disclaimer. * + * * + * * Redistributions in binary form must reproduce the * + * above copyright notice, this list of conditions and * + * the following disclaimer in the documentation and/or * + * other materials provided with the distribution. * + * * + * * Neither the name of Paul Mineiro nor the names * + * of other contributors may be used to endorse or promote * + * products derived from this software without specific * + * prior written permission. * + * * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND * + * CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * + * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER * + * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, * + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * + * POSSIBILITY OF SUCH DAMAGE. * + * * + * Contact: Paul Mineiro * + *=====================================================================*/ + +#include +#include "./commands.h" + +#ifdef __cplusplus +#define cast_uint32_t static_cast +#else +#define cast_uint32_t (uint32_t) +#endif + +IHD float fasterlog2(float x) { + union { + float f; + uint32_t i; + } vx = {x}; + float y = vx.i; + y *= 1.1920928955078125e-7f; + return y - 126.94269504f; +} + +IHD float fasterlog(float x) { + // return 0.69314718f * fasterlog2 (x); + union { + float f; + uint32_t i; + } vx = {x}; + float y = vx.i; + y *= 8.2629582881927490e-8f; + return y - 87.989971088f; +} + +IHD float fasterpow2(float p) { + float clipp = (p < -126) ? -126.0f : p; + union { + uint32_t i; + float f; + } v = {cast_uint32_t((1 << 23) * (clipp + 126.94269504f))}; + return v.f; +} + +IHD float fasterexp(float p) { + return fasterpow2(1.442695040f * p); +} + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/math.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/math.h new file mode 100644 index 0000000000000000000000000000000000000000..b01f595e8ec1962a9e482cb6219639ef4620fb82 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/math.h @@ -0,0 +1,154 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_IMPL_MATH_H_ +#define PULSAR_NATIVE_IMPL_MATH_H_ + +#include "./camera.h" +#include "./commands.h" +#include "./fastermath.h" + +/** + * Get the direction of val. + * + * Returns +1 if val is positive, -1 if val is zero or negative. + */ +IHD int sign_dir(const int& val) { + return -(static_cast((val <= 0)) << 1) + 1; +}; + +/** + * Get the direction of val. + * + * Returns +1 if val is positive, -1 if val is zero or negative. + */ +IHD float sign_dir(const float& val) { + return static_cast(1 - (static_cast((val <= 0)) << 1)); +}; + +/** + * Integer ceil division. + */ +IHD uint iDivCeil(uint a, uint b) { + return (a % b != 0) ? (a / b + 1) : (a / b); +} + +IHD float3 outer_product_sum(const float3& a) { + return make_float3( + a.x * a.x + a.x * a.y + a.x * a.z, + a.x * a.y + a.y * a.y + a.y * a.z, + a.x * a.z + a.y * a.z + a.z * a.z); +} + +// TODO: put intrinsics here. +#if !defined(USE_ROCM) +IHD float3 operator+(const float3& a, const float3& b) { + return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); +} + +IHD void operator+=(float3& a, const float3& b) { + a.x += b.x; + a.y += b.y; + a.z += b.z; +} + +IHD void operator-=(float3& a, const float3& b) { + a.x -= b.x; + a.y -= b.y; + a.z -= b.z; +} + +IHD void operator/=(float3& a, const float& b) { + a.x /= b; + a.y /= b; + a.z /= b; +} + +IHD void operator*=(float3& a, const float& b) { + a.x *= b; + a.y *= b; + a.z *= b; +} + +IHD float3 operator/(const float3& a, const float& b) { + return make_float3(a.x / b, a.y / b, a.z / b); +} + +IHD float3 operator-(const float3& a, const float3& b) { + return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); +} + +IHD float3 operator*(const float3& a, const float& b) { + return make_float3(a.x * b, a.y * b, a.z * b); +} + +IHD float3 operator*(const float3& a, const float3& b) { + return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); +} + +IHD float3 operator*(const float& a, const float3& b) { + return b * a; +} +#endif //! USE_ROCM + +INLINE DEVICE float length(const float3& v) { + // TODO: benchmark what's faster. + return NORM3DF(v.x, v.y, v.z); + // return __fsqrt_rn(v.x * v.x + v.y * v.y + v.z * v.z); +} + +/** + * Left-hand multiplication of the constructed rotation matrix with the vector. + */ +IHD float3 rotate( + const float3& v, + const float3& dir_x, + const float3& dir_y, + const float3& dir_z) { + return make_float3( + dir_x.x * v.x + dir_x.y * v.y + dir_x.z * v.z, + dir_y.x * v.x + dir_y.y * v.y + dir_y.z * v.z, + dir_z.x * v.x + dir_z.y * v.y + dir_z.z * v.z); +} + +INLINE DEVICE float3 normalize(const float3& v) { + return v * RNORM3DF(v.x, v.y, v.z); +} + +INLINE DEVICE float dot(const float3& a, const float3& b) { + return FADD(FADD(FMUL(a.x, b.x), FMUL(a.y, b.y)), FMUL(a.z, b.z)); +} + +INLINE DEVICE float3 cross(const float3& a, const float3& b) { + // TODO: faster + return make_float3( + a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); +} + +namespace pulsar { +IHD CamGradInfo operator+(const CamGradInfo& a, const CamGradInfo& b) { + CamGradInfo res; + res.cam_pos = a.cam_pos + b.cam_pos; + res.pixel_0_0_center = a.pixel_0_0_center + b.pixel_0_0_center; + res.pixel_dir_x = a.pixel_dir_x + b.pixel_dir_x; + res.pixel_dir_y = a.pixel_dir_y + b.pixel_dir_y; + return res; +} + +IHD CamGradInfo operator*(const CamGradInfo& a, const float& b) { + CamGradInfo res; + res.cam_pos = a.cam_pos * b; + res.pixel_0_0_center = a.pixel_0_0_center * b; + res.pixel_dir_x = a.pixel_dir_x * b; + res.pixel_dir_y = a.pixel_dir_y * b; + return res; +} + +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward.device.h new file mode 100644 index 0000000000000000000000000000000000000000..2b510d0fa25cb3d71974c62cfb51dedf820ba5c1 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward.device.h @@ -0,0 +1,189 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_RENDERER_BACKWARD_DEVICE_H_ +#define PULSAR_NATIVE_RENDERER_BACKWARD_DEVICE_H_ + +#include "./camera.device.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +void backward( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy_d, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + cudaStream_t stream) { + ARGCHECK(gamma > 0.f && gamma <= 1.f, 6, "gamma must be in [0., 1.]"); + ARGCHECK( + percent_allowed_difference >= 0.f && percent_allowed_difference <= 1.f, + 7, + "percent_allowed_difference must be in [0., 1.]"); + ARGCHECK(max_n_hits >= 1u, 8, "max_n_hits must be >= 1"); + ARGCHECK( + num_balls > 0 && num_balls <= self->max_num_balls, + 9, + "num_balls must be >0 and less than max num balls!"); + ARGCHECK( + cam.film_width == self->cam.film_width && + cam.film_height == self->cam.film_height, + 5, + "cam film size must agree"); + ARGCHECK(mode <= 1, 10, "mode must be <= 1!"); + if (percent_allowed_difference < EPS) { + LOG(WARNING) << "percent_allowed_difference < " << FEPS << "! Clamping to " + << FEPS << "."; + percent_allowed_difference = FEPS; + } + if (percent_allowed_difference > 1.f - FEPS) { + LOG(WARNING) << "percent_allowed_difference > " << (1.f - FEPS) + << "! Clamping to " << (1.f - FEPS) << "."; + percent_allowed_difference = 1.f - FEPS; + } + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Rendering backward pass..."; + // Update camera. + self->cam.eye = cam.eye; + self->cam.pixel_0_0_center = cam.pixel_0_0_center - cam.eye; + self->cam.pixel_dir_x = cam.pixel_dir_x; + self->cam.pixel_dir_y = cam.pixel_dir_y; + self->cam.sensor_dir_z = cam.sensor_dir_z; + self->cam.half_pixel_size = cam.half_pixel_size; + self->cam.focal_length = cam.focal_length; + self->cam.aperture_width = cam.aperture_width; + self->cam.aperture_height = cam.aperture_height; + self->cam.min_dist = cam.min_dist; + self->cam.max_dist = cam.max_dist; + self->cam.norm_fac = cam.norm_fac; + self->cam.principal_point_offset_x = cam.principal_point_offset_x; + self->cam.principal_point_offset_y = cam.principal_point_offset_y; + self->cam.film_border_left = cam.film_border_left; + self->cam.film_border_top = cam.film_border_top; +#ifdef PULSAR_TIMINGS_ENABLED + START_TIME(calc_signature); +#endif + LAUNCH_MAX_PARALLEL_1D( + calc_signature, + num_balls, + stream, + *self, + reinterpret_cast(vert_pos), + vert_col, + vert_rad, + num_balls); + CHECKLAUNCH(); +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(calc_signature); + START_TIME(calc_gradients); +#endif + MEMSET(self->grad_pos_d, 0, float3, num_balls, stream); + MEMSET(self->grad_col_d, 0, float, num_balls * self->cam.n_channels, stream); + MEMSET(self->grad_rad_d, 0, float, num_balls, stream); + MEMSET(self->grad_cam_d, 0, float, 12, stream); + MEMSET(self->grad_cam_buf_d, 0, CamGradInfo, num_balls, stream); + MEMSET(self->grad_opy_d, 0, float, num_balls, stream); + MEMSET(self->ids_sorted_d, 0, int, num_balls, stream); + LAUNCH_PARALLEL_2D( + calc_gradients, + self->cam.film_width, + self->cam.film_height, + GRAD_BLOCK_SIZE, + GRAD_BLOCK_SIZE, + stream, + self->cam, + grad_im, + gamma, + reinterpret_cast(vert_pos), + vert_col, + vert_rad, + vert_opy_d, + num_balls, + image, + forw_info, + self->di_d, + self->ii_d, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + self->grad_rad_d, + self->grad_col_d, + self->grad_pos_d, + self->grad_cam_buf_d, + self->grad_opy_d, + self->ids_sorted_d, + self->n_track); + CHECKLAUNCH(); +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(calc_gradients); + START_TIME(normalize); +#endif + LAUNCH_MAX_PARALLEL_1D( + norm_sphere_gradients, num_balls, stream, *self, num_balls); + CHECKLAUNCH(); + if (dif_cam) { + SUM_WS( + self->grad_cam_buf_d, + reinterpret_cast(self->grad_cam_d), + static_cast(num_balls), + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + SUM_WS( + self->ids_sorted_d, + self->n_grad_contributions_d, + static_cast(num_balls), + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + LAUNCH_MAX_PARALLEL_1D( + norm_cam_gradients, static_cast(1), stream, *self); + CHECKLAUNCH(); + } +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(normalize); + float time_ms; + // This blocks the result and prevents batch-processing from parallelizing. + GET_TIME(calc_signature, &time_ms); + std::cout << "Time for signature calculation: " << time_ms << " ms" + << std::endl; + GET_TIME(calc_gradients, &time_ms); + std::cout << "Time for gradient calculation: " << time_ms << " ms" + << std::endl; + GET_TIME(normalize, &time_ms); + std::cout << "Time for aggregation and normalization: " << time_ms << " ms" + << std::endl; +#endif + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Backward pass complete."; +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..75e85fd4dc88befc63e84ed4891f8ecb4b659bc4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward.instantiate.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.backward.device.h" + +namespace pulsar { +namespace Renderer { + +template void backward( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + cudaStream_t stream); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.device.h new file mode 100644 index 0000000000000000000000000000000000000000..f2bdc7e69027d29a8442a14b08d677cc22dc51c9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.device.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_RENDERER_BACKWARD_DBG_DEVICE_H_ +#define PULSAR_NATIVE_RENDERER_BACKWARD_DBG_DEVICE_H_ + +#include "./camera.device.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +void backward_dbg( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy_d, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + const uint& pos_x, + const uint& pos_y, + cudaStream_t stream) { + ARGCHECK(gamma > 0.f && gamma <= 1.f, 6, "gamma must be in [0., 1.]"); + ARGCHECK( + percent_allowed_difference >= 0.f && percent_allowed_difference <= 1.f, + 7, + "percent_allowed_difference must be in [0., 1.]"); + ARGCHECK(max_n_hits >= 1u, 8, "max_n_hits must be >= 1"); + ARGCHECK( + num_balls > 0 && num_balls <= self->max_num_balls, + 9, + "num_balls must be >0 and less than max num balls!"); + ARGCHECK( + cam.film_width == self->cam.film_width && + cam.film_height == self->cam.film_height, + 5, + "cam film size must agree"); + ARGCHECK(mode <= 1, 10, "mode must be <= 1!"); + if (percent_allowed_difference < EPS) { + LOG(WARNING) << "percent_allowed_difference < " << FEPS << "! Clamping to " + << FEPS << "."; + percent_allowed_difference = FEPS; + } + ARGCHECK( + pos_x < cam.film_width && pos_y < cam.film_height, + 15, + "pos_x must be < width and pos_y < height."); + if (percent_allowed_difference > 1.f - FEPS) { + LOG(WARNING) << "percent_allowed_difference > " << (1.f - FEPS) + << "! Clamping to " << (1.f - FEPS) << "."; + percent_allowed_difference = 1.f - FEPS; + } + LOG_IF(INFO, PULSAR_LOG_RENDER) + << "Rendering debug backward pass for x: " << pos_x << ", y: " << pos_y; + // Update camera. + self->cam.eye = cam.eye; + self->cam.pixel_0_0_center = cam.pixel_0_0_center - cam.eye; + self->cam.pixel_dir_x = cam.pixel_dir_x; + self->cam.pixel_dir_y = cam.pixel_dir_y; + self->cam.sensor_dir_z = cam.sensor_dir_z; + self->cam.half_pixel_size = cam.half_pixel_size; + self->cam.focal_length = cam.focal_length; + self->cam.aperture_width = cam.aperture_width; + self->cam.aperture_height = cam.aperture_height; + self->cam.min_dist = cam.min_dist; + self->cam.max_dist = cam.max_dist; + self->cam.norm_fac = cam.norm_fac; + self->cam.principal_point_offset_x = cam.principal_point_offset_x; + self->cam.principal_point_offset_y = cam.principal_point_offset_y; + self->cam.film_border_left = cam.film_border_left; + self->cam.film_border_top = cam.film_border_top; + LAUNCH_MAX_PARALLEL_1D( + calc_signature, + num_balls, + stream, + *self, + reinterpret_cast(vert_pos), + vert_col, + vert_rad, + num_balls); + CHECKLAUNCH(); + MEMSET(self->grad_pos_d, 0, float3, num_balls, stream); + MEMSET(self->grad_col_d, 0, float, num_balls * self->cam.n_channels, stream); + MEMSET(self->grad_rad_d, 0, float, num_balls, stream); + MEMSET(self->grad_cam_d, 0, float, 12, stream); + MEMSET(self->grad_cam_buf_d, 0, CamGradInfo, num_balls, stream); + MEMSET(self->grad_opy_d, 0, float, num_balls, stream); + MEMSET(self->ids_sorted_d, 0, int, num_balls, stream); + LAUNCH_MAX_PARALLEL_2D( + calc_gradients, + (int64_t)1, + (int64_t)1, + stream, + self->cam, + grad_im, + gamma, + reinterpret_cast(vert_pos), + vert_col, + vert_rad, + vert_opy_d, + num_balls, + image, + forw_info, + self->di_d, + self->ii_d, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + self->grad_rad_d, + self->grad_col_d, + self->grad_pos_d, + self->grad_cam_buf_d, + self->grad_opy_d, + self->ids_sorted_d, + self->n_track, + pos_x, + pos_y); + CHECKLAUNCH(); + // We're not doing sphere gradient normalization here. + SUM_WS( + self->grad_cam_buf_d, + reinterpret_cast(self->grad_cam_d), + static_cast(1), + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + // We're not doing camera gradient normalization here. + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Debug backward pass complete."; +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..5a7a1ba1f8e56df0a5ff212e7eb769a0564e7f60 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.backward_dbg.instantiate.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.backward_dbg.device.h" + +namespace pulsar { +namespace Renderer { + +template void backward_dbg( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + const uint& pos_x, + const uint& pos_y, + cudaStream_t stream); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.device.h new file mode 100644 index 0000000000000000000000000000000000000000..90b3872e9606c8830b039f18c4d465c3f8c23c1f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.device.h @@ -0,0 +1,198 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CALC_GRADIENTS_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CALC_GRADIENTS_H_ + +#include "../global.h" +#include "./commands.h" +#include "./renderer.h" + +#include "./renderer.draw.device.h" + +namespace pulsar { +namespace Renderer { + +template +GLOBAL void calc_gradients( + const CamInfo cam, /** Camera in world coordinates. */ + float const* const RESTRICT grad_im, /** The gradient image. */ + const float + gamma, /** The transparency parameter used in the forward pass. */ + float3 const* const RESTRICT vert_poss, /** Vertex position vector. */ + float const* const RESTRICT vert_cols, /** Vertex color vector. */ + float const* const RESTRICT vert_rads, /** Vertex radius vector. */ + float const* const RESTRICT opacity, /** Vertex opacity. */ + const uint num_balls, /** Number of balls. */ + float const* const RESTRICT result_d, /** Result image. */ + float const* const RESTRICT forw_info_d, /** Forward pass info. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + // Mode switches. + const bool calc_grad_pos, + const bool calc_grad_col, + const bool calc_grad_rad, + const bool calc_grad_cam, + const bool calc_grad_opy, + // Out variables. + float* const RESTRICT grad_rad_d, /** Radius gradients. */ + float* const RESTRICT grad_col_d, /** Color gradients. */ + float3* const RESTRICT grad_pos_d, /** Position gradients. */ + CamGradInfo* const RESTRICT grad_cam_buf_d, /** Camera gradient buffer. */ + float* const RESTRICT grad_opy_d, /** Opacity gradient buffer. */ + int* const RESTRICT + grad_contributed_d, /** Gradient contribution counter. */ + // Infrastructure. + const int n_track, + const uint offs_x, + const uint offs_y /** Debug offsets. */ +) { + uint limit_x = cam.film_width, limit_y = cam.film_height; + if (offs_x != 0) { + // We're in debug mode. + limit_x = 1; + limit_y = 1; + } + GET_PARALLEL_IDS_2D(coord_x_base, coord_y_base, limit_x, limit_y); + // coord_x_base and coord_y_base are in the film coordinate system. + // We now need to translate to the aperture coordinate system. If + // the principal point was shifted left/up nothing has to be + // subtracted - only shift needs to be added in case it has been + // shifted down/right. + const uint film_coord_x = coord_x_base + offs_x; + const uint ap_coord_x = film_coord_x + + 2 * static_cast(std::max(0, cam.principal_point_offset_x)); + const uint film_coord_y = coord_y_base + offs_y; + const uint ap_coord_y = film_coord_y + + 2 * static_cast(std::max(0, cam.principal_point_offset_y)); + const float3 ray_dir = /** Ray cast through the pixel, normalized. */ + cam.pixel_0_0_center + ap_coord_x * cam.pixel_dir_x + + ap_coord_y * cam.pixel_dir_y; + const float norm_ray_dir = length(ray_dir); + // ray_dir_norm *must* be calculated here in the same way as in the draw + // function to have the same values withno other numerical instabilities + // (for example, ray_dir * FRCP(norm_ray_dir) does not work)! + float3 ray_dir_norm; /** Ray cast through the pixel, normalized. */ + float2 projected_ray; /** Ray intersection with the sensor. */ + if (cam.orthogonal_projection) { + ray_dir_norm = cam.sensor_dir_z; + projected_ray.x = static_cast(ap_coord_x); + projected_ray.y = static_cast(ap_coord_y); + } else { + ray_dir_norm = normalize( + cam.pixel_0_0_center + ap_coord_x * cam.pixel_dir_x + + ap_coord_y * cam.pixel_dir_y); + // This is a reasonable assumption for normal focal lengths and image sizes. + PASSERT(FABS(ray_dir_norm.z) > FEPS); + projected_ray.x = ray_dir_norm.x / ray_dir_norm.z * cam.focal_length; + projected_ray.y = ray_dir_norm.y / ray_dir_norm.z * cam.focal_length; + } + float* result = const_cast( + result_d + film_coord_y * cam.film_width * cam.n_channels + + film_coord_x * cam.n_channels); + const float* grad_im_l = grad_im + + film_coord_y * cam.film_width * cam.n_channels + + film_coord_x * cam.n_channels; + // For writing... + float3 grad_pos; + float grad_rad, grad_opy; + CamGradInfo grad_cam_local = CamGradInfo(); + // Set up shared infrastructure. + const int fwi_loc = film_coord_y * cam.film_width * (3 + 2 * n_track) + + film_coord_x * (3 + 2 * n_track); + float sm_m = forw_info_d[fwi_loc]; + float sm_d = forw_info_d[fwi_loc + 1]; + PULSAR_LOG_DEV_APIX( + PULSAR_LOG_GRAD, + "grad|sm_m: %f, sm_d: %f, result: " + "%f, %f, %f; grad_im: %f, %f, %f.\n", + sm_m, + sm_d, + result[0], + result[1], + result[2], + grad_im_l[0], + grad_im_l[1], + grad_im_l[2]); + // Start processing. + for (int grad_idx = 0; grad_idx < n_track; ++grad_idx) { + int sphere_idx; + FASI(forw_info_d[fwi_loc + 3 + 2 * grad_idx], sphere_idx); + PASSERT( + sphere_idx == -1 || + sphere_idx >= 0 && static_cast(sphere_idx) < num_balls); + if (sphere_idx >= 0) { + // TODO: make more efficient. + grad_pos = make_float3(0.f, 0.f, 0.f); + grad_rad = 0.f; + grad_cam_local = CamGradInfo(); + const DrawInfo di = di_d[sphere_idx]; + grad_opy = 0.f; + draw( + di, + opacity == NULL ? 1.f : opacity[sphere_idx], + cam, + gamma, + ray_dir_norm, + projected_ray, + // Mode switches. + false, // draw only + calc_grad_pos, + calc_grad_col, + calc_grad_rad, + calc_grad_cam, + calc_grad_opy, + // Position info. + ap_coord_x, + ap_coord_y, + sphere_idx, + // Optional in. + &ii_d[sphere_idx], + &ray_dir, + &norm_ray_dir, + grad_im_l, + NULL, + // In/out + &sm_d, + &sm_m, + result, + // Optional out. + NULL, + NULL, + &grad_pos, + grad_col_d + sphere_idx * cam.n_channels, + &grad_rad, + &grad_cam_local, + &grad_opy); + ATOMICADD(&(grad_rad_d[sphere_idx]), grad_rad); + // Color has been added directly. + ATOMICADD_F3(&(grad_pos_d[sphere_idx]), grad_pos); + ATOMICADD_F3( + &(grad_cam_buf_d[sphere_idx].cam_pos), grad_cam_local.cam_pos); + if (!cam.orthogonal_projection) { + ATOMICADD_F3( + &(grad_cam_buf_d[sphere_idx].pixel_0_0_center), + grad_cam_local.pixel_0_0_center); + } + ATOMICADD_F3( + &(grad_cam_buf_d[sphere_idx].pixel_dir_x), + grad_cam_local.pixel_dir_x); + ATOMICADD_F3( + &(grad_cam_buf_d[sphere_idx].pixel_dir_y), + grad_cam_local.pixel_dir_y); + ATOMICADD(&(grad_opy_d[sphere_idx]), grad_opy); + ATOMICADD(&(grad_contributed_d[sphere_idx]), 1); + } + } + END_PARALLEL_2D_NORET(); +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..596c322b28eef850d5466037770cef53caf51cff --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_gradients.instantiate.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.calc_gradients.device.h" + +namespace pulsar { +namespace Renderer { + +template GLOBAL void calc_gradients( + const CamInfo cam, /** Camera in world coordinates. */ + float const* const RESTRICT grad_im, /** The gradient image. */ + const float + gamma, /** The transparency parameter used in the forward pass. */ + float3 const* const RESTRICT vert_poss, /** Vertex position vector. */ + float const* const RESTRICT vert_cols, /** Vertex color vector. */ + float const* const RESTRICT vert_rads, /** Vertex radius vector. */ + float const* const RESTRICT opacity, /** Vertex opacity. */ + const uint num_balls, /** Number of balls. */ + float const* const RESTRICT result_d, /** Result image. */ + float const* const RESTRICT forw_info_d, /** Forward pass info. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + // Mode switches. + const bool calc_grad_pos, + const bool calc_grad_col, + const bool calc_grad_rad, + const bool calc_grad_cam, + const bool calc_grad_opy, + // Out variables. + float* const RESTRICT grad_rad_d, /** Radius gradients. */ + float* const RESTRICT grad_col_d, /** Color gradients. */ + float3* const RESTRICT grad_pos_d, /** Position gradients. */ + CamGradInfo* const RESTRICT grad_cam_buf_d, /** Camera gradient buffer. */ + float* const RESTRICT grad_opy_d, /** Opacity gradient buffer. */ + int* const RESTRICT + grad_contributed_d, /** Gradient contribution counter. */ + // Infrastructure. + const int n_track, + const uint offs_x, + const uint offs_y); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_signature.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_signature.device.h new file mode 100644 index 0000000000000000000000000000000000000000..bd687fee63d1ee9869ab5beb454a910ff387914c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_signature.device.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CALC_SIGNATURE_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CALC_SIGNATURE_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.get_screen_area.device.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +GLOBAL void calc_signature( + Renderer renderer, + float3 const* const RESTRICT vert_poss, + float const* const RESTRICT vert_cols, + float const* const RESTRICT vert_rads, + const uint num_balls) { + /* We're not using RESTRICT here for the pointers within `renderer`. Just one + value is being read from each of the pointers, so the effect would be + negligible or non-existent. */ + GET_PARALLEL_IDX_1D(idx, num_balls); + // Create aliases. + // For reading... + const float3& vert_pos = vert_poss[idx]; /** Vertex position. */ + const float* vert_col = + vert_cols + idx * renderer.cam.n_channels; /** Vertex color. */ + const float& vert_rad = vert_rads[idx]; /** Vertex radius. */ + const CamInfo& cam = renderer.cam; /** Camera in world coordinates. */ + // For writing... + /** Ball ID (either original index of the ball or -1 if not visible). */ + int& id_out = renderer.ids_d[idx]; + /** Intersection helper structure for the ball. */ + IntersectInfo& intersect_helper_out = renderer.ii_d[idx]; + /** Draw helper structure for this ball. */ + DrawInfo& draw_helper_out = renderer.di_d[idx]; + /** Minimum possible intersection depth for this ball. */ + float& closest_possible_intersect_out = renderer.min_depth_d[idx]; + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|vert_pos: %.9f, %.9f, %.9f, vert_col (first three): " + "%.9f, %.9f, %.9f.\n", + idx, + vert_pos.x, + vert_pos.y, + vert_pos.z, + vert_col[0], + vert_col[1], + vert_col[2]); + // Set flags to invalid for a potential early return. + id_out = -1; // Invalid ID. + closest_possible_intersect_out = + MAX_FLOAT; // These spheres are sorted to the very end. + intersect_helper_out.max.x = MAX_USHORT; // No intersection possible. + intersect_helper_out.min.x = MAX_USHORT; + intersect_helper_out.max.y = MAX_USHORT; + intersect_helper_out.min.y = MAX_USHORT; + // Start processing. + /** Ball center in the camera coordinate system. */ + const float3 ball_center_cam = vert_pos - cam.eye; + /** Distance to the ball center in the camera coordinate system. */ + const float t_center = length(ball_center_cam); + /** Closest possible intersection with this ball from the camera. */ + float closest_possible_intersect; + if (cam.orthogonal_projection) { + const float3 ball_center_cam_rot = rotate( + ball_center_cam, + cam.pixel_dir_x / length(cam.pixel_dir_x), + cam.pixel_dir_y / length(cam.pixel_dir_y), + cam.sensor_dir_z); + closest_possible_intersect = ball_center_cam_rot.z - vert_rad; + } else { + closest_possible_intersect = t_center - vert_rad; + } + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|t_center: %f. vert_rad: %f. " + "closest_possible_intersect: %f.\n", + idx, + t_center, + vert_rad, + closest_possible_intersect); + /** + * Corner points of the enclosing projected rectangle of the ball. + * They are first calculated in the camera coordinate system, then + * converted to the pixel coordinate system. + */ + float x_1, x_2, y_1, y_2; + bool hits_screen_plane; + float3 ray_center_norm = ball_center_cam / t_center; + PASSERT(vert_rad >= 0.f); + if (closest_possible_intersect < cam.min_dist || + closest_possible_intersect > cam.max_dist) { + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|ignoring sphere out of min/max bounds: %.9f, " + "min: %.9f, max: %.9f.\n", + idx, + closest_possible_intersect, + cam.min_dist, + cam.max_dist); + RETURN_PARALLEL(); + } + // Find the relevant region on the screen plane. + hits_screen_plane = get_screen_area( + ball_center_cam, + ray_center_norm, + vert_rad, + cam, + idx, + &x_1, + &x_2, + &y_1, + &y_2); + if (!hits_screen_plane) + RETURN_PARALLEL(); + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|in pixels: x_1: %f, x_2: %f, y_1: %f, y_2: %f.\n", + idx, + x_1, + x_2, + y_1, + y_2); + // Check whether the pixel coordinates are on screen. + if (FMAX(x_1, x_2) <= static_cast(cam.film_border_left) || + FMIN(x_1, x_2) >= + static_cast(cam.film_border_left + cam.film_width) - 0.5f || + FMAX(y_1, y_2) <= static_cast(cam.film_border_top) || + FMIN(y_1, y_2) > + static_cast(cam.film_border_top + cam.film_height) - 0.5f) + RETURN_PARALLEL(); + // Write results. + id_out = idx; + intersect_helper_out.min.x = static_cast( + FMAX(FMIN(x_1, x_2), static_cast(cam.film_border_left))); + intersect_helper_out.min.y = static_cast( + FMAX(FMIN(y_1, y_2), static_cast(cam.film_border_top))); + // In the following calculations, the max that needs to be stored is + // exclusive. + // That means that the calculated value needs to be `ceil`ed and incremented + // to find the correct value. + intersect_helper_out.max.x = static_cast(FMIN( + FCEIL(FMAX(x_1, x_2)) + 1, + static_cast(cam.film_border_left + cam.film_width))); + intersect_helper_out.max.y = static_cast(FMIN( + FCEIL(FMAX(y_1, y_2)) + 1, + static_cast(cam.film_border_top + cam.film_height))); + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|limits after refining: x_1: %u, x_2: %u, " + "y_1: %u, y_2: %u.\n", + idx, + intersect_helper_out.min.x, + intersect_helper_out.max.x, + intersect_helper_out.min.y, + intersect_helper_out.max.y); + if (intersect_helper_out.min.x == MAX_USHORT) { + id_out = -1; + RETURN_PARALLEL(); + } + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|writing info. closest_possible_intersect: %.9f. " + "ray_center_norm: %.9f, %.9f, %.9f. t_center: %.9f. radius: %.9f.\n", + idx, + closest_possible_intersect, + ray_center_norm.x, + ray_center_norm.y, + ray_center_norm.z, + t_center, + vert_rad); + closest_possible_intersect_out = closest_possible_intersect; + draw_helper_out.ray_center_norm = ray_center_norm; + draw_helper_out.t_center = t_center; + draw_helper_out.radius = vert_rad; + if (cam.n_channels <= 3) { + draw_helper_out.first_color = vert_col[0]; + for (uint c_id = 1; c_id < cam.n_channels; ++c_id) { + draw_helper_out.color_union.color[c_id - 1] = vert_col[c_id]; + } + } else { + draw_helper_out.color_union.ptr = const_cast(vert_col); + } + END_PARALLEL(); +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_signature.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_signature.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..6afa95b44b161d8881b79b22e119c89aad522cc6 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.calc_signature.instantiate.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CALC_SIGNATURE_INSTANTIATE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CALC_SIGNATURE_INSTANTIATE_H_ + +#include "./renderer.calc_signature.device.h" + +namespace pulsar { +namespace Renderer { +template GLOBAL void calc_signature( + Renderer renderer, + float3 const* const RESTRICT vert_poss, + float const* const RESTRICT vert_cols, + float const* const RESTRICT vert_rads, + const uint num_balls); +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.construct.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.construct.device.h new file mode 100644 index 0000000000000000000000000000000000000000..984f7710ba50e4dbeea8e61a2bac7ab41e608697 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.construct.device.h @@ -0,0 +1,111 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CONSTRUCT_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CONSTRUCT_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +HOST void construct( + Renderer* self, + const size_t& max_num_balls, + const int& width, + const int& height, + const bool& orthogonal_projection, + const bool& right_handed_system, + const float& background_normalization_depth, + const uint& n_channels, + const uint& n_track) { + ARGCHECK( + (max_num_balls > 0 && max_num_balls < MAX_INT), + 2, + ("the maximum number of balls must be >0 and <" + + std::to_string(MAX_INT) + ". Is " + std::to_string(max_num_balls) + ".") + .c_str()); + ARGCHECK(width > 1, 3, "the image width must be > 1"); + ARGCHECK(height > 1, 4, "the image height must be > 1"); + ARGCHECK( + background_normalization_depth > 0.f && + background_normalization_depth < 1.f, + 6, + "background_normalization_depth must be in ]0., 1.[."); + ARGCHECK(n_channels > 0, 7, "n_channels must be >0!"); + ARGCHECK( + n_track > 0 && n_track <= MAX_GRAD_SPHERES, + 8, + ("n_track must be >0 and <" + std::to_string(MAX_GRAD_SPHERES) + ". Is " + + std::to_string(n_track) + ".") + .c_str()); + self->cam.film_width = width; + self->cam.film_height = height; + self->max_num_balls = max_num_balls; + MALLOC(self->result_d, float, width* height* n_channels); + self->cam.orthogonal_projection = orthogonal_projection; + self->cam.right_handed = right_handed_system; + self->cam.background_normalization_depth = background_normalization_depth; + self->cam.n_channels = n_channels; + MALLOC(self->min_depth_d, float, max_num_balls); + MALLOC(self->min_depth_sorted_d, float, max_num_balls); + MALLOC(self->ii_d, IntersectInfo, max_num_balls); + MALLOC(self->ii_sorted_d, IntersectInfo, max_num_balls); + MALLOC(self->ids_d, int, max_num_balls); + MALLOC(self->ids_sorted_d, int, max_num_balls); + size_t sort_id_size = 0; + GET_SORT_WS_SIZE(&sort_id_size, float, int, max_num_balls); + CHECKLAUNCH(); + size_t sort_ii_size = 0; + GET_SORT_WS_SIZE(&sort_ii_size, float, IntersectInfo, max_num_balls); + CHECKLAUNCH(); + size_t sort_di_size = 0; + GET_SORT_WS_SIZE(&sort_di_size, float, DrawInfo, max_num_balls); + CHECKLAUNCH(); + size_t select_ii_size = 0; + GET_SELECT_WS_SIZE(&select_ii_size, char, IntersectInfo, max_num_balls); + size_t select_di_size = 0; + GET_SELECT_WS_SIZE(&select_di_size, char, DrawInfo, max_num_balls); + size_t sum_size = 0; + GET_SUM_WS_SIZE(&sum_size, CamGradInfo, max_num_balls); + size_t sum_cont_size = 0; + GET_SUM_WS_SIZE(&sum_cont_size, int, max_num_balls); + size_t reduce_size = 0; + GET_REDUCE_WS_SIZE( + &reduce_size, IntersectInfo, IntersectInfoMinMax(), max_num_balls); + self->workspace_size = IMAX( + IMAX(IMAX(sort_id_size, sort_ii_size), sort_di_size), + IMAX( + IMAX(select_di_size, select_ii_size), + IMAX(IMAX(sum_size, sum_cont_size), reduce_size))); + MALLOC(self->workspace_d, char, self->workspace_size); + MALLOC(self->di_d, DrawInfo, max_num_balls); + MALLOC(self->di_sorted_d, DrawInfo, max_num_balls); + MALLOC(self->region_flags_d, char, max_num_balls); + MALLOC(self->num_selected_d, size_t, 1); + MALLOC(self->forw_info_d, float, width* height * (3 + 2 * n_track)); + MALLOC(self->min_max_pixels_d, IntersectInfo, 1); + MALLOC(self->grad_pos_d, float3, max_num_balls); + MALLOC(self->grad_col_d, float, max_num_balls* n_channels); + MALLOC(self->grad_rad_d, float, max_num_balls); + MALLOC(self->grad_cam_d, float, 12); + MALLOC(self->grad_cam_buf_d, CamGradInfo, max_num_balls); + MALLOC(self->grad_opy_d, float, max_num_balls); + MALLOC(self->n_grad_contributions_d, int, 1); + self->n_track = static_cast(n_track); +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.construct.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.construct.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..e5ce722e29b063b04cb8efc0e880d9332bd35f23 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.construct.instantiate.h @@ -0,0 +1,29 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CONSTRUCT_INSTANTIATE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CONSTRUCT_INSTANTIATE_H_ + +#include "./renderer.construct.device.h" + +namespace pulsar { +namespace Renderer { +template void construct( + Renderer* self, + const size_t& max_num_balls, + const int& width, + const int& height, + const bool& orthogonal_projection, + const bool& right_handed_system, + const float& background_normalization_depth, + const uint& n_channels, + const uint& n_track); +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.create_selector.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.create_selector.device.h new file mode 100644 index 0000000000000000000000000000000000000000..747ad03cd3a3a49c34d81485a1780d81a332a215 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.create_selector.device.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CREATE_SELECTOR_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CREATE_SELECTOR_DEVICE_H_ + +#include "../global.h" +#include "./commands.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +GLOBAL void create_selector( + IntersectInfo const* const RESTRICT ii_sorted_d, + const uint num_balls, + const int min_x, + const int max_x, + const int min_y, + const int max_y, + /* Out variables. */ + char* RESTRICT region_flags_d) { + GET_PARALLEL_IDX_1D(idx, num_balls); + bool hit = (static_cast(ii_sorted_d[idx].min.x) <= max_x) && + (static_cast(ii_sorted_d[idx].max.x) > min_x) && + (static_cast(ii_sorted_d[idx].min.y) <= max_y) && + (static_cast(ii_sorted_d[idx].max.y) > min_y); + region_flags_d[idx] = hit; + END_PARALLEL_NORET(); +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.create_selector.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.create_selector.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..8e91a8bfb8e9b0f03db39c001e9363920b2eb35f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.create_selector.instantiate.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CREATE_SELECTOR_INSTANTIATE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CREATE_SELECTOR_INSTANTIATE_H_ + +#include "./renderer.create_selector.device.h" + +namespace pulsar { +namespace Renderer { + +template GLOBAL void create_selector( + IntersectInfo const* const RESTRICT ii_sorted_d, + const uint num_balls, + const int min_x, + const int max_x, + const int min_y, + const int max_y, + /* Out variables. */ + char* RESTRICT region_flags_d); + +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.destruct.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.destruct.device.h new file mode 100644 index 0000000000000000000000000000000000000000..8520233c59be062fa72376158a9935afa50c3950 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.destruct.device.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_DESTRUCT_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_DESTRUCT_H_ + +#include "../global.h" +#include "./commands.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +HOST void destruct(Renderer* self) { + if (self->result_d != NULL) + FREE(self->result_d); + self->result_d = NULL; + if (self->min_depth_d != NULL) + FREE(self->min_depth_d); + self->min_depth_d = NULL; + if (self->min_depth_sorted_d != NULL) + FREE(self->min_depth_sorted_d); + self->min_depth_sorted_d = NULL; + if (self->ii_d != NULL) + FREE(self->ii_d); + self->ii_d = NULL; + if (self->ii_sorted_d != NULL) + FREE(self->ii_sorted_d); + self->ii_sorted_d = NULL; + if (self->ids_d != NULL) + FREE(self->ids_d); + self->ids_d = NULL; + if (self->ids_sorted_d != NULL) + FREE(self->ids_sorted_d); + self->ids_sorted_d = NULL; + if (self->workspace_d != NULL) + FREE(self->workspace_d); + self->workspace_d = NULL; + if (self->di_d != NULL) + FREE(self->di_d); + self->di_d = NULL; + if (self->di_sorted_d != NULL) + FREE(self->di_sorted_d); + self->di_sorted_d = NULL; + if (self->region_flags_d != NULL) + FREE(self->region_flags_d); + self->region_flags_d = NULL; + if (self->num_selected_d != NULL) + FREE(self->num_selected_d); + self->num_selected_d = NULL; + if (self->forw_info_d != NULL) + FREE(self->forw_info_d); + self->forw_info_d = NULL; + if (self->min_max_pixels_d != NULL) + FREE(self->min_max_pixels_d); + self->min_max_pixels_d = NULL; + if (self->grad_pos_d != NULL) + FREE(self->grad_pos_d); + self->grad_pos_d = NULL; + if (self->grad_col_d != NULL) + FREE(self->grad_col_d); + self->grad_col_d = NULL; + if (self->grad_rad_d != NULL) + FREE(self->grad_rad_d); + self->grad_rad_d = NULL; + if (self->grad_cam_d != NULL) + FREE(self->grad_cam_d); + self->grad_cam_d = NULL; + if (self->grad_cam_buf_d != NULL) + FREE(self->grad_cam_buf_d); + self->grad_cam_buf_d = NULL; + if (self->grad_opy_d != NULL) + FREE(self->grad_opy_d); + self->grad_opy_d = NULL; + if (self->n_grad_contributions_d != NULL) + FREE(self->n_grad_contributions_d); + self->n_grad_contributions_d = NULL; +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.destruct.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.destruct.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..d41ba5a323d0bed9196dc804fab87929b2a726af --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.destruct.instantiate.h @@ -0,0 +1,20 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_DESTRUCT_INSTANTIATE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_DESTRUCT_INSTANTIATE_H_ + +#include "./renderer.destruct.device.h" + +namespace pulsar { +namespace Renderer { +template void destruct(Renderer* self); +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.draw.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.draw.device.h new file mode 100644 index 0000000000000000000000000000000000000000..cb8ecabed3eefce77f7120d234fad15b0bed064c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.draw.device.h @@ -0,0 +1,846 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_CALC_SIGNATURE_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_CALC_SIGNATURE_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +/** + * Draw a ball into the `result`. + * + * Returns whether a hit was noticed. See README for an explanation of sphere + * points and variable notation. + */ +INLINE DEVICE bool draw( + /* In variables. */ + const DrawInfo& draw_info, /** The draw information for this ball. */ + const float& opacity, /** The sphere opacity. */ + const CamInfo& + cam, /** Camera information. Doesn't have to be normalized. */ + const float& gamma, /** 'Transparency' indicator (see paper for details). */ + const float3& ray_dir_norm, /** The direction of the ray, normalized. */ + const float2& projected_ray, /** The intersection of the ray with the image + in pixel space. */ + /** Mode switches. */ + const bool& draw_only, /** Whether we are in draw vs. grad mode. */ + const bool& calc_grad_pos, /** Calculate position gradients. */ + const bool& calc_grad_col, /** Calculate color gradients. */ + const bool& calc_grad_rad, /** Calculate radius gradients. */ + const bool& calc_grad_cam, /** Calculate camera gradients. */ + const bool& calc_grad_opy, /** Calculate opacity gradients. */ + /** Position info. */ + const uint& coord_x, /** The pixel position x to draw at. */ + const uint& coord_y, /** The pixel position y to draw at. */ + const uint& idx, /** The id of the sphere to process. */ + /* Optional in variables. */ + IntersectInfo const* const RESTRICT + intersect_info, /** The intersect information for this ball. */ + float3 const* const RESTRICT ray_dir, /** The ray direction (not normalized) + to draw at. Only used for grad computation. */ + float const* const RESTRICT norm_ray_dir, /** The length of the direction + vector. Only used for grad computation. */ + float const* const RESTRICT grad_pix, /** The gradient for this pixel. Only + used for grad computation. */ + float const* const RESTRICT + ln_pad_over_1minuspad, /** Allowed percentage indicator. */ + /* In or out variables, depending on mode. */ + float* const RESTRICT sm_d, /** Normalization denominator. */ + float* const RESTRICT + sm_m, /** Maximum of normalization weight factors observed. */ + float* const RESTRICT + result, /** Result pixel color. Must be zeros initially. */ + /* Optional out variables. */ + float* const RESTRICT depth_threshold, /** The depth threshold to use. Only + used for rendering. */ + float* const RESTRICT intersection_depth_norm_out, /** The intersection + depth. Only set when rendering. */ + float3* const RESTRICT grad_pos, /** Gradient w.r.t. position. */ + float* const RESTRICT grad_col, /** Gradient w.r.t. color. */ + float* const RESTRICT grad_rad, /** Gradient w.r.t. radius. */ + CamGradInfo* const RESTRICT grad_cam, /** Gradient w.r.t. camera. */ + float* const RESTRICT grad_opy /** Gradient w.r.t. opacity. */ +) { + // TODO: variable reuse? + PASSERT( + isfinite(draw_info.ray_center_norm.x) && + isfinite(draw_info.ray_center_norm.y) && + isfinite(draw_info.ray_center_norm.z)); + PASSERT(isfinite(draw_info.t_center) && draw_info.t_center >= 0.f); + PASSERT( + isfinite(draw_info.radius) && draw_info.radius >= 0.f && + draw_info.radius <= draw_info.t_center); + PASSERT(isfinite(ray_dir_norm.x)); + PASSERT(isfinite(ray_dir_norm.y)); + PASSERT(isfinite(ray_dir_norm.z)); + PASSERT(isfinite(*sm_d)); + PASSERT( + cam.orthogonal_projection && cam.focal_length == 0.f || + cam.focal_length > 0.f); + PASSERT(gamma <= 1.f && gamma >= 1e-5f); + /** The ball center in the camera coordinate system. */ + float3 center = draw_info.ray_center_norm * draw_info.t_center; + /** The vector from the reference point to the ball center. */ + float3 raydiff; + if (cam.orthogonal_projection) { + center = rotate( + center, + cam.pixel_dir_x / length(cam.pixel_dir_x), + cam.pixel_dir_y / length(cam.pixel_dir_y), + cam.sensor_dir_z); + raydiff = + make_float3( // TODO: make offset consistent with `get_screen_area`. + center.x - + (projected_ray.x - + static_cast(cam.aperture_width) * .5f) * + (2.f * cam.half_pixel_size), + center.y - + (projected_ray.y - + static_cast(cam.aperture_height) * .5f) * + (2.f * cam.half_pixel_size), + 0.f); + } else { + /** The reference point on the ray; the point in the same distance + * from the camera as the ball center, but along the ray. + */ + const float3 rayref = ray_dir_norm * draw_info.t_center; + raydiff = center - rayref; + } + /** The closeness of the reference point to ball center in world coords. + * + * In [0., radius]. + */ + const float closeness_world = length(raydiff); + /** The reciprocal radius. */ + const float radius_rcp = FRCP(draw_info.radius); + /** The closeness factor normalized with the ball radius. + * + * In [0., 1.]. + */ + float closeness = FSATURATE(FMA(-closeness_world, radius_rcp, 1.f)); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|center: %.9f, %.9f, %.9f. raydiff: %.9f, " + "%.9f, %.9f. closeness_world: %.9f. closeness: %.9f\n", + idx, + center.x, + center.y, + center.z, + raydiff.x, + raydiff.y, + raydiff.z, + closeness_world, + closeness); + /** Whether this is the 'center pixel' for this ball, the pixel that + * is closest to its projected center. This information is used to + * make sure to draw 'tiny' spheres with less than one pixel in + * projected size. + */ + bool ray_through_center_pixel; + float projected_radius, projected_x, projected_y; + if (cam.orthogonal_projection) { + projected_x = center.x / (2.f * cam.half_pixel_size) + + (static_cast(cam.aperture_width) - 1.f) / 2.f; + projected_y = center.y / (2.f * cam.half_pixel_size) + + (static_cast(cam.aperture_height) - 1.f) / 2.f; + projected_radius = draw_info.radius / (2.f * cam.half_pixel_size); + ray_through_center_pixel = + (FABS(FSUB(projected_x, projected_ray.x)) < 0.5f + FEPS && + FABS(FSUB(projected_y, projected_ray.y)) < 0.5f + FEPS); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|closeness_world: %.9f. closeness: %.9f. " + "projected (x, y): %.9f, %.9f. projected_ray (x, y): " + "%.9f, %.9f. ray_through_center_pixel: %d.\n", + idx, + closeness_world, + closeness, + projected_x, + projected_y, + projected_ray.x, + projected_ray.y, + ray_through_center_pixel); + } else { + // Misusing this variable for half pixel size projected to the depth + // at which the sphere resides. Leave some slack for numerical + // inaccuracy (factor 1.5). + projected_x = FMUL(cam.half_pixel_size * 1.5, draw_info.t_center) * + FRCP(cam.focal_length); + projected_radius = FMUL(draw_info.radius, cam.focal_length) * + FRCP(draw_info.t_center) / (2.f * cam.half_pixel_size); + ray_through_center_pixel = projected_x > closeness_world; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|closeness_world: %.9f. closeness: %.9f. " + "projected half pixel size: %.9f. " + "ray_through_center_pixel: %d.\n", + idx, + closeness_world, + closeness, + projected_x, + ray_through_center_pixel); + } + if (draw_only && draw_info.radius < closeness_world && + !ray_through_center_pixel) { + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|Abandoning since no hit has been detected.\n", + idx); + return false; + } else { + // This is always a hit since we are following the forward execution pass. + // p2 is the closest intersection point with the sphere. + } + if (ray_through_center_pixel && projected_radius < 1.f) { + // Make a tiny sphere visible. + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|Setting closeness to 1 (projected radius: %.9f).\n", + idx, + projected_radius); + closeness = 1.; + } + PASSERT(closeness >= 0.f && closeness <= 1.f); + /** Distance between the camera (`o`) and `p1`, the closest point to the + * ball center along the casted ray. + * + * In [t_center - radius, t_center]. + */ + float o__p1_; + /** The distance from ball center to p1. + * + * In [0., sqrt(t_center ^ 2 - (t_center - radius) ^ 2)]. + */ + float c__p1_; + if (cam.orthogonal_projection) { + o__p1_ = FABS(center.z); + c__p1_ = length(raydiff); + } else { + o__p1_ = dot(center, ray_dir_norm); + /** + * This is being calculated as sqrt(t_center^2 - o__p1_^2) = + * sqrt((t_center + o__p1_) * (t_center - o__p1_)) to avoid + * catastrophic cancellation in floating point representations. + */ + c__p1_ = FSQRT( + (draw_info.t_center + o__p1_) * FMAX(draw_info.t_center - o__p1_, 0.f)); + // PASSERT(o__p1_ >= draw_info.t_center - draw_info.radius); + // Numerical errors lead to too large values. + o__p1_ = FMIN(o__p1_, draw_info.t_center); + // PASSERT(o__p1_ <= draw_info.t_center); + } + /** The distance from the closest point to the sphere center (p1) + * to the closest intersection point (p2). + * + * In [0., radius]. + */ + const float p1__p2_ = + FSQRT((draw_info.radius + c__p1_) * FMAX(draw_info.radius - c__p1_, 0.f)); + PASSERT(p1__p2_ >= 0.f && p1__p2_ <= draw_info.radius); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|o__p1_: %.9f, c__p1_: %.9f, p1__p2_: %.9f.\n", + idx, + o__p1_, + c__p1_, + p1__p2_); + /** The intersection depth of the ray with this ball. + * + * In [t_center - radius, t_center]. + */ + const float intersection_depth = (o__p1_ - p1__p2_); + PASSERT( + cam.orthogonal_projection && + (intersection_depth >= center.z - draw_info.radius && + intersection_depth <= center.z) || + intersection_depth >= draw_info.t_center - draw_info.radius && + intersection_depth <= draw_info.t_center); + /** Normalized distance of the closest intersection point; in [0., 1.]. */ + const float norm_dist = + FMUL(FSUB(intersection_depth, cam.min_dist), cam.norm_fac); + PASSERT(norm_dist >= 0.f && norm_dist <= 1.f); + /** Scaled, normalized distance in [1., 0.] (closest, farthest). */ + const float norm_dist_scaled = FSUB(1.f, norm_dist) / gamma * opacity; + PASSERT(norm_dist_scaled >= 0.f && norm_dist_scaled <= 1.f / gamma); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "drawprep %u|intersection_depth: %.9f, norm_dist: %.9f, " + "norm_dist_scaled: %.9f.\n", + idx, + intersection_depth, + norm_dist, + norm_dist_scaled); + float const* const col_ptr = + cam.n_channels > 3 ? draw_info.color_union.ptr : &draw_info.first_color; + // The implementation for the numerically stable weighted softmax is based + // on https://arxiv.org/pdf/1805.02867.pdf . + if (draw_only) { + /** The old maximum observed value. */ + const float sm_m_old = *sm_m; + *sm_m = FMAX(*sm_m, norm_dist_scaled); + const float coeff_exp = FEXP(norm_dist_scaled - *sm_m); + PASSERT(isfinite(coeff_exp)); + /** The color coefficient for the ball color; in [0., 1.]. */ + const float coeff = closeness * coeff_exp * opacity; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_DRAW_PIX, + "draw %u|coeff: %.9f. closeness: %.9f. coeff_exp: %.9f. " + "opacity: %.9f.\n", + idx, + coeff, + closeness, + coeff_exp, + opacity); + // Rendering. + if (sm_m_old == *sm_m) { + // Use the fact that exp(0) = 1 to avoid the exp calculation for + // the case that the maximum remains the same (which it should + // most of the time). + *sm_d = FADD(*sm_d, coeff); + for (uint c_id = 0; c_id < cam.n_channels; ++c_id) { + PASSERT(isfinite(result[c_id])); + result[c_id] = FMA(coeff, col_ptr[c_id], result[c_id]); + } + } else { + const float exp_correction = FEXP(sm_m_old - *sm_m); + *sm_d = FMA(*sm_d, exp_correction, coeff); + for (uint c_id = 0; c_id < cam.n_channels; ++c_id) { + PASSERT(isfinite(result[c_id])); + result[c_id] = + FMA(coeff, col_ptr[c_id], FMUL(result[c_id], exp_correction)); + } + } + PASSERT(isfinite(*sm_d)); + *intersection_depth_norm_out = intersection_depth; + // Update the depth threshold. + *depth_threshold = + 1.f - (FLN(*sm_d + FEPS) + *ln_pad_over_1minuspad + *sm_m) * gamma; + *depth_threshold = + FMA(*depth_threshold, FSUB(cam.max_dist, cam.min_dist), cam.min_dist); + } else { + // Gradient computation. + const float coeff_exp = FEXP(norm_dist_scaled - *sm_m); + const float gamma_rcp = FRCP(gamma); + const float radius_sq = FMUL(draw_info.radius, draw_info.radius); + const float coeff = FMAX( + FMIN(closeness * coeff_exp * opacity, *sm_d - FEPS), + 0.f); // in [0., sm_d - FEPS]. + PASSERT(coeff >= 0.f && coeff <= *sm_d); + const float otherw = *sm_d - coeff; // in [FEPS, sm_d]. + const float p1__p2_safe = FMAX(p1__p2_, FEPS); // in [eps, t_center]. + const float cam_range = FSUB(cam.max_dist, cam.min_dist); // in ]0, inf[ + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|pos: %.9f, %.9f, %.9f. pixeldirx: %.9f, %.9f, %.9f. " + "pixeldiry: %.9f, %.9f, %.9f. pixel00center: %.9f, %.9f, %.9f.\n", + idx, + draw_info.ray_center_norm.x * draw_info.t_center, + draw_info.ray_center_norm.y * draw_info.t_center, + draw_info.ray_center_norm.z * draw_info.t_center, + cam.pixel_dir_x.x, + cam.pixel_dir_x.y, + cam.pixel_dir_x.z, + cam.pixel_dir_y.x, + cam.pixel_dir_y.y, + cam.pixel_dir_y.z, + cam.pixel_0_0_center.x, + cam.pixel_0_0_center.y, + cam.pixel_0_0_center.z); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|ray_dir: %.9f, %.9f, %.9f. " + "ray_dir_norm: %.9f, %.9f, %.9f. " + "draw_info.ray_center_norm: %.9f, %.9f, %.9f.\n", + idx, + ray_dir->x, + ray_dir->y, + ray_dir->z, + ray_dir_norm.x, + ray_dir_norm.y, + ray_dir_norm.z, + draw_info.ray_center_norm.x, + draw_info.ray_center_norm.y, + draw_info.ray_center_norm.z); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|coeff_exp: %.9f. " + "norm_dist_scaled: %.9f. cam.norm_fac: %f.\n", + idx, + coeff_exp, + norm_dist_scaled, + cam.norm_fac); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|p1__p2_: %.9f. p1__p2_safe: %.9f.\n", + idx, + p1__p2_, + p1__p2_safe); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|o__p1_: %.9f. c__p1_: %.9f.\n", + idx, + o__p1_, + c__p1_); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|intersection_depth: %f. norm_dist: %f. " + "coeff: %.9f. closeness: %f. coeff_exp: %f. opacity: " + "%f. color: %f, %f, %f.\n", + idx, + intersection_depth, + norm_dist, + coeff, + closeness, + coeff_exp, + opacity, + draw_info.first_color, + draw_info.color_union.color[0], + draw_info.color_union.color[1]); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|t_center: %.9f. " + "radius: %.9f. max_dist: %f. min_dist: %f. gamma: %f.\n", + idx, + draw_info.t_center, + draw_info.radius, + cam.max_dist, + cam.min_dist, + gamma); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|sm_d: %f. sm_m: %f. grad_pix (first three): %f, %f, %f.\n", + idx, + *sm_d, + *sm_m, + grad_pix[0], + grad_pix[1], + grad_pix[2]); + PULSAR_LOG_DEV_PIX(PULSAR_LOG_GRAD, "grad %u|otherw: %f.\n", idx, otherw); + if (calc_grad_col) { + const float sm_d_norm = FRCP(FMAX(*sm_d, FEPS)); + // First do the multiplication of coeff (in [0., sm_d]) and 1/sm_d. The + // result is a factor in [0., 1.] to be multiplied with the incoming + // gradient. + for (uint c_id = 0; c_id < cam.n_channels; ++c_id) { + ATOMICADD(grad_col + c_id, grad_pix[c_id] * FMUL(coeff, sm_d_norm)); + } + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdcol.x: %f. dresDdcol.x: %f.\n", + idx, + FMUL(coeff, sm_d_norm) * grad_pix[0], + coeff * sm_d_norm); + } + // We disable the computation for too small spheres. + // The comparison is made this way to avoid subtraction of unsigned types. + if (calc_grad_cam || calc_grad_pos || calc_grad_rad || calc_grad_opy) { + //! First find dimDdcoeff. + const float n0 = + otherw * FRCP(FMAX(*sm_d * *sm_d, FEPS)); // in [0., 1. / sm_d]. + PASSERT(isfinite(n0) && n0 >= 0. && n0 <= 1. / *sm_d + 1e2f * FEPS); + // We'll aggergate dimDdcoeff over all the 'color' channels. + float dimDdcoeff = 0.f; + const float otherw_safe_rcp = FRCP(FMAX(otherw, FEPS)); + float othercol; + for (uint c_id = 0; c_id < cam.n_channels; ++c_id) { + othercol = + (result[c_id] * *sm_d - col_ptr[c_id] * coeff) * otherw_safe_rcp; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|othercol[%u]: %.9f.\n", + idx, + c_id, + othercol); + dimDdcoeff += + FMUL(FMUL(grad_pix[c_id], FSUB(col_ptr[c_id], othercol)), n0); + } + PASSERT(isfinite(dimDdcoeff)); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdcoeff: %.9f, n0: %f.\n", + idx, + dimDdcoeff, + n0); + if (calc_grad_opy) { + //! dimDdopacity. + *grad_opy += dimDdcoeff * coeff_exp * closeness * + (1.f + opacity * (1.f - norm_dist) * gamma_rcp); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdopacity: %.9f, dimDdopacity: %.9f.\n", + idx, + coeff_exp * closeness, + dimDdcoeff * coeff_exp * closeness); + } + if (intersect_info->max.x >= intersect_info->min.x + 3 && + intersect_info->max.y >= intersect_info->min.y + 3) { + //! Now find dcoeffDdintersection_depth and dcoeffDdcloseness. + const float dcoeffDdintersection_depth = + -closeness * coeff_exp * opacity * opacity / (gamma * cam_range); + const float dcoeffDdcloseness = coeff_exp * opacity; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdintersection_depth: %.9f. " + "dimDdintersection_depth: %.9f. " + "dcoeffDdcloseness: %.9f. dimDdcloseness: %.9f.\n", + idx, + dcoeffDdintersection_depth, + dimDdcoeff * dcoeffDdintersection_depth, + dcoeffDdcloseness, + dimDdcoeff * dcoeffDdcloseness); + //! Here, the execution paths for orthogonal and pinyhole camera split. + if (cam.orthogonal_projection) { + if (calc_grad_rad) { + //! Find dcoeffDdrad. + float dcoeffDdrad = + dcoeffDdcloseness * (closeness_world / radius_sq) - + dcoeffDdintersection_depth * draw_info.radius / p1__p2_safe; + PASSERT(isfinite(dcoeffDdrad)); + *grad_rad += FMUL(dimDdcoeff, dcoeffDdrad); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdrad: %.9f. dcoeffDdrad: %.9f.\n", + idx, + FMUL(dimDdcoeff, dcoeffDdrad), + dcoeffDdrad); + } + if (calc_grad_pos || calc_grad_cam) { + float3 dimDdcenter = raydiff / + p1__p2_safe; /* making it dintersection_depthDdcenter. */ + dimDdcenter.z = sign_dir(center.z); + PASSERT(FABS(center.z) >= cam.min_dist && cam.min_dist >= FEPS); + dimDdcenter *= dcoeffDdintersection_depth; // dcoeffDdcenter + dimDdcenter -= dcoeffDdcloseness * /* dclosenessDdcenter. */ + raydiff * FRCP(FMAX(length(raydiff) * draw_info.radius, FEPS)); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdcenter: %.9f, %.9f, %.9f.\n", + idx, + dimDdcenter.x, + dimDdcenter.y, + dimDdcenter.z); + // Now dcoeffDdcenter is stored in dimDdcenter. + dimDdcenter *= dimDdcoeff; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdcenter: %.9f, %.9f, %.9f.\n", + idx, + dimDdcenter.x, + dimDdcenter.y, + dimDdcenter.z); + // Prepare for posglob and cam pos. + const float pixel_size = length(cam.pixel_dir_x); + // pixel_size is the same as length(pixeldiry)! + const float pixel_size_rcp = FRCP(pixel_size); + float3 dcenterDdposglob = + (cam.pixel_dir_x + cam.pixel_dir_y) * pixel_size_rcp + + cam.sensor_dir_z; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcenterDdposglob: %.9f, %.9f, %.9f.\n", + idx, + dcenterDdposglob.x, + dcenterDdposglob.y, + dcenterDdposglob.z); + if (calc_grad_pos) { + //! dcenterDdposglob. + *grad_pos += dimDdcenter * dcenterDdposglob; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpos: %.9f, %.9f, %.9f.\n", + idx, + dimDdcenter.x * dcenterDdposglob.x, + dimDdcenter.y * dcenterDdposglob.y, + dimDdcenter.z * dcenterDdposglob.z); + } + if (calc_grad_cam) { + //! Camera. + grad_cam->cam_pos -= dimDdcenter * dcenterDdposglob; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdeye: %.9f, %.9f, %.9f.\n", + idx, + -dimDdcenter.x * dcenterDdposglob.x, + -dimDdcenter.y * dcenterDdposglob.y, + -dimDdcenter.z * dcenterDdposglob.z); + // coord_world + /* + float3 dclosenessDdcoord_world = + raydiff * FRCP(FMAX(draw_info.radius * length(raydiff), FEPS)); + float3 dintersection_depthDdcoord_world = -2.f * raydiff; + */ + float3 dimDdcoord_world = /* dcoeffDdcoord_world */ + dcoeffDdcloseness * raydiff * + FRCP(FMAX(draw_info.radius * length(raydiff), FEPS)) - + dcoeffDdintersection_depth * raydiff / p1__p2_safe; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdcoord_world: %.9f, %.9f, %.9f.\n", + idx, + dimDdcoord_world.x, + dimDdcoord_world.y, + dimDdcoord_world.z); + dimDdcoord_world *= dimDdcoeff; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdcoord_world: %.9f, %.9f, %.9f.\n", + idx, + dimDdcoord_world.x, + dimDdcoord_world.y, + dimDdcoord_world.z); + // The third component of dimDdcoord_world is 0! + PASSERT(dimDdcoord_world.z == 0.f); + float3 coord_world = center - raydiff; + coord_world.z = 0.f; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|coord_world: %.9f, %.9f, %.9f.\n", + idx, + coord_world.x, + coord_world.y, + coord_world.z); + // Do this component-wise to save unnecessary matmul steps. + grad_cam->pixel_dir_x += dimDdcoord_world.x * cam.pixel_dir_x * + coord_world.x * pixel_size_rcp * pixel_size_rcp; + grad_cam->pixel_dir_x += dimDdcoord_world.y * cam.pixel_dir_x * + coord_world.y * pixel_size_rcp * pixel_size_rcp; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel_dir_x|coord_world: %.9f, %.9f, %.9f.\n", + idx, + grad_cam->pixel_dir_x.x, + grad_cam->pixel_dir_x.y, + grad_cam->pixel_dir_x.z); + // dcenterkDdpixel_dir_k. + float3 center_in_pixels = draw_info.ray_center_norm * + draw_info.t_center * pixel_size_rcp; + grad_cam->pixel_dir_x += dimDdcenter.x * + (center_in_pixels - + outer_product_sum(cam.pixel_dir_x) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcenter0dpixel_dir_x: %.9f, %.9f, %.9f.\n", + idx, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_x) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .x, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_x) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .y, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_x) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .z); + grad_cam->pixel_dir_y += dimDdcenter.y * + (center_in_pixels - + outer_product_sum(cam.pixel_dir_y) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcenter1dpixel_dir_y: %.9f, %.9f, %.9f.\n", + idx, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_y) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .x, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_y) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .y, + (center_in_pixels - + outer_product_sum(cam.pixel_dir_y) * center_in_pixels * + pixel_size_rcp * pixel_size_rcp) + .z); + // dcenterzDdpixel_dir_k. + float sensordirz_norm_rcp = FRCP( + FMAX(length(cross(cam.pixel_dir_y, cam.pixel_dir_x)), FEPS)); + grad_cam->pixel_dir_x += dimDdcenter.z * + (dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_y, cam.sensor_dir_z) - + cross(cam.pixel_dir_y, center)) * + sensordirz_norm_rcp; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcenterzDdpixel_dir_x: %.9f, %.9f, %.9f.\n", + idx, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_y, cam.sensor_dir_z) - + cross(cam.pixel_dir_y, center)) * + sensordirz_norm_rcp) + .x, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_y, cam.sensor_dir_z) - + cross(cam.pixel_dir_y, center)) * + sensordirz_norm_rcp) + .y, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_y, cam.sensor_dir_z) - + cross(cam.pixel_dir_y, center)) * + sensordirz_norm_rcp) + .z); + grad_cam->pixel_dir_y += dimDdcenter.z * + (dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_x, cam.sensor_dir_z) - + cross(cam.pixel_dir_x, center)) * + sensordirz_norm_rcp; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcenterzDdpixel_dir_y: %.9f, %.9f, %.9f.\n", + idx, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_x, cam.sensor_dir_z) - + cross(cam.pixel_dir_x, center)) * + sensordirz_norm_rcp) + .x, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_x, cam.sensor_dir_z) - + cross(cam.pixel_dir_x, center)) * + sensordirz_norm_rcp) + .y, + ((dot(center, cam.sensor_dir_z) * + cross(cam.pixel_dir_x, cam.sensor_dir_z) - + cross(cam.pixel_dir_x, center)) * + sensordirz_norm_rcp) + .z); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel_dir_x: %.9f, %.9f, %.9f.\n", + idx, + grad_cam->pixel_dir_x.x, + grad_cam->pixel_dir_x.y, + grad_cam->pixel_dir_x.z); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel_dir_y: %.9f, %.9f, %.9f.\n", + idx, + grad_cam->pixel_dir_y.x, + grad_cam->pixel_dir_y.y, + grad_cam->pixel_dir_y.z); + } + } + } else { + if (calc_grad_rad) { + //! Find dcoeffDdrad. + float dcoeffDdrad = + dcoeffDdcloseness * (closeness_world / radius_sq) - + dcoeffDdintersection_depth * draw_info.radius / p1__p2_safe; + PASSERT(isfinite(dcoeffDdrad)); + *grad_rad += FMUL(dimDdcoeff, dcoeffDdrad); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdrad: %.9f. dcoeffDdrad: %.9f.\n", + idx, + FMUL(dimDdcoeff, dcoeffDdrad), + dcoeffDdrad); + } + if (calc_grad_pos || calc_grad_cam) { + const float3 tmp1 = center - ray_dir_norm * o__p1_; + const float3 tmp1n = tmp1 / p1__p2_safe; + const float ray_dir_normDotRaydiff = dot(ray_dir_norm, raydiff); + const float3 dcoeffDdray = dcoeffDdintersection_depth * + (tmp1 - o__p1_ * tmp1n) / *norm_ray_dir + + dcoeffDdcloseness * + (ray_dir_norm * -ray_dir_normDotRaydiff + raydiff) / + (closeness_world * draw_info.radius) * + (draw_info.t_center / *norm_ray_dir); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdray: %.9f, %.9f, %.9f. dimDdray: " + "%.9f, %.9f, %.9f.\n", + idx, + dcoeffDdray.x, + dcoeffDdray.y, + dcoeffDdray.z, + dimDdcoeff * dcoeffDdray.x, + dimDdcoeff * dcoeffDdray.y, + dimDdcoeff * dcoeffDdray.z); + const float3 dcoeffDdcenter = + dcoeffDdintersection_depth * (ray_dir_norm + tmp1n) + + dcoeffDdcloseness * + (draw_info.ray_center_norm * ray_dir_normDotRaydiff - + raydiff) / + (closeness_world * draw_info.radius); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dcoeffDdcenter: %.9f, %.9f, %.9f. " + "dimDdcenter: %.9f, %.9f, %.9f.\n", + idx, + dcoeffDdcenter.x, + dcoeffDdcenter.y, + dcoeffDdcenter.z, + dimDdcoeff * dcoeffDdcenter.x, + dimDdcoeff * dcoeffDdcenter.y, + dimDdcoeff * dcoeffDdcenter.z); + if (calc_grad_pos) { + *grad_pos += dimDdcoeff * dcoeffDdcenter; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdposglob: %.9f, %.9f, %.9f.\n", + idx, + dimDdcoeff * dcoeffDdcenter.x, + dimDdcoeff * dcoeffDdcenter.y, + dimDdcoeff * dcoeffDdcenter.z); + } + if (calc_grad_cam) { + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdeye: %.9f, %.9f, %.9f.\n", + idx, + -dimDdcoeff * (dcoeffDdcenter.x + dcoeffDdray.x), + -dimDdcoeff * (dcoeffDdcenter.y + dcoeffDdray.y), + -dimDdcoeff * (dcoeffDdcenter.z + dcoeffDdray.z)); + grad_cam->cam_pos += -dimDdcoeff * (dcoeffDdcenter + dcoeffDdray); + grad_cam->pixel_0_0_center += dimDdcoeff * dcoeffDdray; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel00centerglob: %.9f, %.9f, %.9f.\n", + idx, + dimDdcoeff * dcoeffDdray.x, + dimDdcoeff * dcoeffDdray.y, + dimDdcoeff * dcoeffDdray.z); + grad_cam->pixel_dir_x += + (dimDdcoeff * static_cast(coord_x)) * dcoeffDdray; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel_dir_x: %.9f, %.9f, %.9f.\n", + idx, + (dimDdcoeff * static_cast(coord_x)) * dcoeffDdray.x, + (dimDdcoeff * static_cast(coord_x)) * dcoeffDdray.y, + (dimDdcoeff * static_cast(coord_x)) * dcoeffDdray.z); + grad_cam->pixel_dir_y += + (dimDdcoeff * static_cast(coord_y)) * dcoeffDdray; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_GRAD, + "grad %u|dimDdpixel_dir_y: %.9f, %.9f, %.9f.\n", + idx, + (dimDdcoeff * static_cast(coord_y)) * dcoeffDdray.x, + (dimDdcoeff * static_cast(coord_y)) * dcoeffDdray.y, + (dimDdcoeff * static_cast(coord_y)) * dcoeffDdray.z); + } + } + } + } + } + } + return true; +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.fill_bg.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.fill_bg.device.h new file mode 100644 index 0000000000000000000000000000000000000000..2a737d3eb609781f08120eb734982987866637f4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.fill_bg.device.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_FILL_BG_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_FILL_BG_DEVICE_H_ + +#include "../global.h" +#include "./camera.h" +#include "./commands.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +GLOBAL void fill_bg( + Renderer renderer, + const CamInfo cam, + float const* const bg_col_d, + const float gamma, + const uint mode) { + GET_PARALLEL_IDS_2D(coord_x, coord_y, cam.film_width, cam.film_height); + int write_loc = coord_y * cam.film_width * (3 + 2 * renderer.n_track) + + coord_x * (3 + 2 * renderer.n_track); + if (renderer.forw_info_d[write_loc + 1] // sm_d + == 0.f) { + // This location has not been processed yet. + // Write first the forw_info: + // sm_m + renderer.forw_info_d[write_loc] = + cam.background_normalization_depth / gamma; + // sm_d + renderer.forw_info_d[write_loc + 1] = 1.f; + // max_closest_possible_intersection_hit + renderer.forw_info_d[write_loc + 2] = -1.f; + // sphere IDs and intersection depths. + for (int i = 0; i < renderer.n_track; ++i) { + int sphere_id = -1; + IASF(sphere_id, renderer.forw_info_d[write_loc + 3 + i * 2]); + renderer.forw_info_d[write_loc + 3 + i * 2 + 1] = -1.f; + } + if (mode == 0) { + // Image background. + for (int i = 0; i < cam.n_channels; ++i) { + renderer.result_d + [coord_y * cam.film_width * cam.n_channels + + coord_x * cam.n_channels + i] = bg_col_d[i]; + } + } + } + END_PARALLEL_2D_NORET(); +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.fill_bg.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.fill_bg.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..02830204a6874b8223bde1615fa9ef8ffa4d318c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.fill_bg.instantiate.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.fill_bg.device.h" + +namespace pulsar { +namespace Renderer { + +template GLOBAL void fill_bg( + Renderer renderer, + const CamInfo norm, + float const* const bg_col_d, + const float gamma, + const uint mode); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.device.h new file mode 100644 index 0000000000000000000000000000000000000000..3f0412f576de4dd77b3f2be6a27ff8ddb144ca74 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.device.h @@ -0,0 +1,302 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_FORWARD_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_FORWARD_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +template +void forward( + Renderer* self, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* bg_col_d, + const float* opacity_d, + const size_t& num_balls, + const uint& mode, + cudaStream_t stream) { + ARGCHECK(gamma > 0.f && gamma <= 1.f, 6, "gamma must be in [0., 1.]"); + ARGCHECK( + percent_allowed_difference >= 0.f && percent_allowed_difference <= 1.f, + 7, + "percent_allowed_difference must be in [0., 1.]"); + ARGCHECK(max_n_hits >= 1u, 8, "max_n_hits must be >= 1"); + ARGCHECK( + num_balls > 0 && num_balls <= self->max_num_balls, + 9, + ("num_balls must be >0 and <= max num balls! (" + + std::to_string(num_balls) + " vs. " + + std::to_string(self->max_num_balls) + ")") + .c_str()); + ARGCHECK( + cam.film_width == self->cam.film_width && + cam.film_height == self->cam.film_height, + 5, + "cam result width and height must agree"); + ARGCHECK(mode <= 1, 10, "mode must be <= 1!"); + if (percent_allowed_difference > 1.f - FEPS) { + LOG(WARNING) << "percent_allowed_difference > " << (1.f - FEPS) + << "! Clamping to " << (1.f - FEPS) << "."; + percent_allowed_difference = 1.f - FEPS; + } + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Rendering forward pass..."; + // Update camera and transform into a new virtual camera system with + // centered principal point and subsection rendering. + self->cam.eye = cam.eye; + self->cam.pixel_0_0_center = cam.pixel_0_0_center - cam.eye; + self->cam.pixel_dir_x = cam.pixel_dir_x; + self->cam.pixel_dir_y = cam.pixel_dir_y; + self->cam.sensor_dir_z = cam.sensor_dir_z; + self->cam.half_pixel_size = cam.half_pixel_size; + self->cam.focal_length = cam.focal_length; + self->cam.aperture_width = cam.aperture_width; + self->cam.aperture_height = cam.aperture_height; + self->cam.min_dist = cam.min_dist; + self->cam.max_dist = cam.max_dist; + self->cam.norm_fac = cam.norm_fac; + self->cam.principal_point_offset_x = cam.principal_point_offset_x; + self->cam.principal_point_offset_y = cam.principal_point_offset_y; + self->cam.film_border_left = cam.film_border_left; + self->cam.film_border_top = cam.film_border_top; +#ifdef PULSAR_TIMINGS_ENABLED + START_TIME(calc_signature); +#endif + LAUNCH_MAX_PARALLEL_1D( + calc_signature, + num_balls, + stream, + *self, + reinterpret_cast(vert_pos), + vert_col, + vert_rad, + num_balls); + CHECKLAUNCH(); +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(calc_signature); + START_TIME(sort); +#endif + SORT_ASCENDING_WS( + self->min_depth_d, + self->min_depth_sorted_d, + self->ids_d, + self->ids_sorted_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + SORT_ASCENDING_WS( + self->min_depth_d, + self->min_depth_sorted_d, + self->ii_d, + self->ii_sorted_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + SORT_ASCENDING_WS( + self->min_depth_d, + self->min_depth_sorted_d, + self->di_d, + self->di_sorted_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(sort); + START_TIME(minmax); +#endif + IntersectInfo pixel_minmax; + pixel_minmax.min.x = MAX_USHORT; + pixel_minmax.min.y = MAX_USHORT; + pixel_minmax.max.x = 0; + pixel_minmax.max.y = 0; + REDUCE_WS( + self->ii_sorted_d, + self->min_max_pixels_d, + num_balls, + IntersectInfoMinMax(), + pixel_minmax, + self->workspace_d, + self->workspace_size, + stream); + COPY_DEV_HOST(&pixel_minmax, self->min_max_pixels_d, IntersectInfo, 1); + LOG_IF(INFO, PULSAR_LOG_RENDER) + << "Region with pixels to render: " << pixel_minmax.min.x << ":" + << pixel_minmax.max.x << " (x), " << pixel_minmax.min.y << ":" + << pixel_minmax.max.y << " (y)."; +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(minmax); + START_TIME(render); +#endif + MEMSET( + self->result_d, + 0, + float, + self->cam.film_width * self->cam.film_height * self->cam.n_channels, + stream); + MEMSET( + self->forw_info_d, + 0, + float, + self->cam.film_width * self->cam.film_height * (3 + 2 * self->n_track), + stream); + if (pixel_minmax.max.y > pixel_minmax.min.y && + pixel_minmax.max.x > pixel_minmax.min.x) { + PASSERT( + pixel_minmax.min.x >= static_cast(self->cam.film_border_left) && + pixel_minmax.min.x < + static_cast( + self->cam.film_border_left + self->cam.film_width) && + pixel_minmax.max.x <= + static_cast( + self->cam.film_border_left + self->cam.film_width) && + pixel_minmax.min.y >= static_cast(self->cam.film_border_top) && + pixel_minmax.min.y < + static_cast( + self->cam.film_border_top + self->cam.film_height) && + pixel_minmax.max.y <= + static_cast( + self->cam.film_border_top + self->cam.film_height)); + // Cut the image in 3x3 regions. + int y_step = RENDER_BLOCK_SIZE * + iDivCeil(pixel_minmax.max.y - pixel_minmax.min.y, + 3u * RENDER_BLOCK_SIZE); + int x_step = RENDER_BLOCK_SIZE * + iDivCeil(pixel_minmax.max.x - pixel_minmax.min.x, + 3u * RENDER_BLOCK_SIZE); + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Using image slices of size " << x_step + << ", " << y_step << " (W, H)."; + for (int y_min = pixel_minmax.min.y; y_min < pixel_minmax.max.y; + y_min += y_step) { + for (int x_min = pixel_minmax.min.x; x_min < pixel_minmax.max.x; + x_min += x_step) { + // Create region selection. + LAUNCH_MAX_PARALLEL_1D( + create_selector, + num_balls, + stream, + self->ii_sorted_d, + num_balls, + x_min, + x_min + x_step, + y_min, + y_min + y_step, + self->region_flags_d); + CHECKLAUNCH(); + SELECT_FLAGS_WS( + self->region_flags_d, + self->ii_sorted_d, + self->ii_d, + self->num_selected_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + SELECT_FLAGS_WS( + self->region_flags_d, + self->di_sorted_d, + self->di_d, + self->num_selected_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + SELECT_FLAGS_WS( + self->region_flags_d, + self->ids_sorted_d, + self->ids_d, + self->num_selected_d, + num_balls, + self->workspace_d, + self->workspace_size, + stream); + CHECKLAUNCH(); + LAUNCH_PARALLEL_2D( + render, + x_step, + y_step, + RENDER_BLOCK_SIZE, + RENDER_BLOCK_SIZE, + stream, + self->num_selected_d, + self->ii_d, + self->di_d, + self->min_depth_d, + self->ids_d, + opacity_d, + self->cam, + gamma, + percent_allowed_difference, + max_n_hits, + bg_col_d, + mode, + x_min, + y_min, + x_step, + y_step, + self->result_d, + self->forw_info_d, + self->n_track); + CHECKLAUNCH(); + } + } + } + if (mode == 0) { + LAUNCH_MAX_PARALLEL_2D( + fill_bg, + static_cast(self->cam.film_width), + static_cast(self->cam.film_height), + stream, + *self, + self->cam, + bg_col_d, + gamma, + mode); + CHECKLAUNCH(); + } +#ifdef PULSAR_TIMINGS_ENABLED + STOP_TIME(render); + float time_ms; + // This blocks the result and prevents batch-processing from parallelizing. + GET_TIME(calc_signature, &time_ms); + std::cout << "Time for signature calculation: " << time_ms << " ms" + << std::endl; + GET_TIME(sort, &time_ms); + std::cout << "Time for sorting: " << time_ms << " ms" << std::endl; + GET_TIME(minmax, &time_ms); + std::cout << "Time for minmax pixel calculation: " << time_ms << " ms" + << std::endl; + GET_TIME(render, &time_ms); + std::cout << "Time for rendering: " << time_ms << " ms" << std::endl; +#endif + LOG_IF(INFO, PULSAR_LOG_RENDER) << "Forward pass complete."; +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..7f57bc8681b7c7f1356f3c3e134595ab2d1955f0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.forward.instantiate.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.forward.device.h" + +namespace pulsar { +namespace Renderer { + +template void forward( + Renderer* self, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* bg_col_d, + const float* opacity_d, + const size_t& num_balls, + const uint& mode, + cudaStream_t stream); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.get_screen_area.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.get_screen_area.device.h new file mode 100644 index 0000000000000000000000000000000000000000..1a85a1bd20cfa0773e395163871ea5a7a8b39347 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.get_screen_area.device.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_GET_SCREEN_AREA_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_GET_SCREEN_AREA_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" + +namespace pulsar { +namespace Renderer { + +/** + * Find the closest enclosing screen area rectangle in pixels that encloses a + * ball. + * + * The method returns the two x and the two y values of the boundaries. They + * are not ordered yet and you need to find min and max for the left/right and + * lower/upper boundary. + * + * The return values are floats and need to be rounded appropriately. + */ +INLINE DEVICE bool get_screen_area( + const float3& ball_center_cam, + const float3& ray_center_norm, + const float& vert_rad, + const CamInfo& cam, + const uint& idx, + /* Out variables. */ + float* x_1, + float* x_2, + float* y_1, + float* y_2) { + float cos_alpha = dot(cam.sensor_dir_z, ray_center_norm); + float2 o__c_, alpha, theta; + if (cos_alpha < EPS) { + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|ball not visible. cos_alpha: %.9f.\n", + idx, + cos_alpha); + // No intersection, ball won't be visible. + return false; + } + // Multiply the direction vector with the camera rotation matrix + // to have the optical axis being the canonical z vector (0, 0, 1). + // TODO: optimize. + const float3 ball_center_cam_rot = rotate( + ball_center_cam, + cam.pixel_dir_x / length(cam.pixel_dir_x), + cam.pixel_dir_y / length(cam.pixel_dir_y), + cam.sensor_dir_z); + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|ball_center_cam_rot: %f, %f, %f.\n", + idx, + ball_center_cam.x, + ball_center_cam.y, + ball_center_cam.z); + const float pixel_size_norm_fac = FRCP(2.f * cam.half_pixel_size); + const float optical_offset_x = + (static_cast(cam.aperture_width) - 1.f) * .5f; + const float optical_offset_y = + (static_cast(cam.aperture_height) - 1.f) * .5f; + if (cam.orthogonal_projection) { + *x_1 = + FMA(ball_center_cam_rot.x - vert_rad, + pixel_size_norm_fac, + optical_offset_x); + *x_2 = + FMA(ball_center_cam_rot.x + vert_rad, + pixel_size_norm_fac, + optical_offset_x); + *y_1 = + FMA(ball_center_cam_rot.y - vert_rad, + pixel_size_norm_fac, + optical_offset_y); + *y_2 = + FMA(ball_center_cam_rot.y + vert_rad, + pixel_size_norm_fac, + optical_offset_y); + return true; + } else { + o__c_.x = FMAX( + FSQRT( + ball_center_cam_rot.x * ball_center_cam_rot.x + + ball_center_cam_rot.z * ball_center_cam_rot.z), + FEPS); + o__c_.y = FMAX( + FSQRT( + ball_center_cam_rot.y * ball_center_cam_rot.y + + ball_center_cam_rot.z * ball_center_cam_rot.z), + FEPS); + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|o__c_: %f, %f.\n", + idx, + o__c_.x, + o__c_.y); + alpha.x = sign_dir(ball_center_cam_rot.x) * + acos(FMIN(FMAX(ball_center_cam_rot.z / o__c_.x, -1.f), 1.f)); + alpha.y = -sign_dir(ball_center_cam_rot.y) * + acos(FMIN(FMAX(ball_center_cam_rot.z / o__c_.y, -1.f), 1.f)); + theta.x = asin(FMIN(FMAX(vert_rad / o__c_.x, -1.f), 1.f)); + theta.y = asin(FMIN(FMAX(vert_rad / o__c_.y, -1.f), 1.f)); + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|alpha.x: %f, alpha.y: %f, theta.x: %f, theta.y: %f.\n", + idx, + alpha.x, + alpha.y, + theta.x, + theta.y); + *x_1 = tan(alpha.x - theta.x) * cam.focal_length; + *x_2 = tan(alpha.x + theta.x) * cam.focal_length; + *y_1 = tan(alpha.y - theta.y) * cam.focal_length; + *y_2 = tan(alpha.y + theta.y) * cam.focal_length; + PULSAR_LOG_DEV( + PULSAR_LOG_CALC_SIGNATURE, + "signature %d|in sensor plane: x_1: %f, x_2: %f, y_1: %f, y_2: %f.\n", + idx, + *x_1, + *x_2, + *y_1, + *y_2); + *x_1 = FMA(*x_1, pixel_size_norm_fac, optical_offset_x); + *x_2 = FMA(*x_2, pixel_size_norm_fac, optical_offset_x); + *y_1 = FMA(*y_1, -pixel_size_norm_fac, optical_offset_y); + *y_2 = FMA(*y_2, -pixel_size_norm_fac, optical_offset_y); + return true; + } +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.h new file mode 100644 index 0000000000000000000000000000000000000000..d6755ee91887b8f6316563b03cee9c524a6f7315 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.h @@ -0,0 +1,468 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_H_ + +#include + +#include "../global.h" +#include "./camera.h" + +namespace pulsar { +namespace Renderer { + +//! Remember to order struct members from larger size to smaller size +//! to avoid padding (for more info, see for example here: +//! http://www.catb.org/esr/structure-packing/). + +/** + * This is the information that's needed to do a fast screen point + * intersection with one of the balls. + * + * Aim to keep this below 8 bytes (256 bytes per cache-line / 32 threads in a + * warp = 8 bytes per thread). + */ +struct IntersectInfo { + ushort2 min; /** minimum x, y in pixel coordinates. */ + ushort2 max; /** maximum x, y in pixel coordinates. */ +}; +static_assert( + sizeof(IntersectInfo) == 8, + "The compiled size of `IntersectInfo` is wrong."); + +/** + * Reduction operation to find the limits of multiple IntersectInfo objects. + */ +struct IntersectInfoMinMax { + IHD IntersectInfo + operator()(const IntersectInfo& a, const IntersectInfo& b) const { + // Treat the special case of an invalid intersect info object or one for + // a ball out of bounds. + if (b.max.x == MAX_USHORT && b.min.x == MAX_USHORT && + b.max.y == MAX_USHORT && b.min.y == MAX_USHORT) { + return a; + } + if (a.max.x == MAX_USHORT && a.min.x == MAX_USHORT && + a.max.y == MAX_USHORT && a.min.y == MAX_USHORT) { + return b; + } + IntersectInfo result; + result.min.x = std::min(a.min.x, b.min.x); + result.min.y = std::min(a.min.y, b.min.y); + result.max.x = std::max(a.max.x, b.max.x); + result.max.y = std::max(a.max.y, b.max.y); + return result; + } +}; + +/** + * All information that's needed to draw a ball. + * + * It's necessary to keep this information in float (not half) format, + * because the loss in accuracy would be too high and lead to artifacts. + */ +struct DrawInfo { + float3 ray_center_norm; /** Ray to the ball center, normalized. */ + /** Ball color. + * + * This might be the full color in the case of n_channels <= 3. Otherwise, + * a pointer to the original 'color' data is stored in the following union. + */ + float first_color; + union { + float color[2]; + float* ptr; + } color_union; + float t_center; /** Distance from the camera to the ball center. */ + float radius; /** Ball radius. */ +}; +static_assert( + sizeof(DrawInfo) == 8 * 4, + "The compiled size of `DrawInfo` is wrong."); + +/** + * An object to collect all associated data with the renderer. + * + * The `_d` suffixed pointers point to memory 'on-device', potentially on the + * GPU. All other variables are expected to point to CPU memory. + */ +struct Renderer { + /** Dummy initializer to make sure all pointers are set to NULL to + * be safe for the device-specific 'construct' and 'destruct' methods. + */ + inline Renderer() { + max_num_balls = 0; + result_d = NULL; + min_depth_d = NULL; + min_depth_sorted_d = NULL; + ii_d = NULL; + ii_sorted_d = NULL; + ids_d = NULL; + ids_sorted_d = NULL; + workspace_d = NULL; + di_d = NULL; + di_sorted_d = NULL; + region_flags_d = NULL; + num_selected_d = NULL; + forw_info_d = NULL; + grad_pos_d = NULL; + grad_col_d = NULL; + grad_rad_d = NULL; + grad_cam_d = NULL; + grad_opy_d = NULL; + grad_cam_buf_d = NULL; + n_grad_contributions_d = NULL; + }; + /** The camera for this renderer. In world-coordinates. */ + CamInfo cam; + /** + * The maximum amount of balls the renderer can handle. Resources are + * pre-allocated to account for this size. Less than this amount of balls + * can be rendered, but not more. + */ + int max_num_balls; + /** The result buffer. */ + float* result_d; + /** Closest possible intersection depth per sphere w.r.t. the camera. */ + float* min_depth_d; + /** Closest possible intersection depth per sphere, ordered ascending. */ + float* min_depth_sorted_d; + /** The intersect infos per sphere. */ + IntersectInfo* ii_d; + /** The intersect infos per sphere, ordered by their closest possible + * intersection depth (asc.). */ + IntersectInfo* ii_sorted_d; + /** Original sphere IDs. */ + int* ids_d; + /** Original sphere IDs, ordered by their closest possible intersection depth + * (asc.). */ + int* ids_sorted_d; + /** Workspace for CUB routines. */ + char* workspace_d; + /** Workspace size for CUB routines. */ + size_t workspace_size; + /** The draw information structures for each sphere. */ + DrawInfo* di_d; + /** The draw information structures sorted by closest possible intersection + * depth (asc.). */ + DrawInfo* di_sorted_d; + /** Region association buffer. */ + char* region_flags_d; + /** Num spheres in the current region. */ + size_t* num_selected_d; + /** Pointer to information from the forward pass. */ + float* forw_info_d; + /** Struct containing information about the min max pixels that contain + * rendered information in the image. */ + IntersectInfo* min_max_pixels_d; + /** Gradients w.r.t. position. */ + float3* grad_pos_d; + /** Gradients w.r.t. color. */ + float* grad_col_d; + /** Gradients w.r.t. radius. */ + float* grad_rad_d; + /** Gradients w.r.t. camera parameters. */ + float* grad_cam_d; + /** Gradients w.r.t. opacity. */ + float* grad_opy_d; + /** Camera gradient information by sphere. + * + * Here, every sphere's contribution to the camera gradients is stored. It is + * aggregated and written to grad_cam_d in a separate step. This avoids write + * conflicts when processing the spheres. + */ + CamGradInfo* grad_cam_buf_d; + /** Total of all gradient contributions for this image. */ + int* n_grad_contributions_d; + /** The number of spheres to track for backpropagation. */ + int n_track; +}; + +inline bool operator==(const Renderer& a, const Renderer& b) { + return a.cam == b.cam && a.max_num_balls == b.max_num_balls; +} + +/** + * Construct a renderer. + */ +template +void construct( + Renderer* self, + const size_t& max_num_balls, + const int& width, + const int& height, + const bool& orthogonal_projection, + const bool& right_handed_system, + const float& background_normalization_depth, + const uint& n_channels, + const uint& n_track); + +/** + * Destruct the renderer and free the associated memory. + */ +template +void destruct(Renderer* self); + +/** + * Create a selection of points inside a rectangle. + * + * This write boolen values into `region_flags_d', which can + * for example be used by a CUB function to extract the selection. + */ +template +GLOBAL void create_selector( + IntersectInfo const* const RESTRICT ii_sorted_d, + const uint num_balls, + const int min_x, + const int max_x, + const int min_y, + const int max_y, + /* Out variables. */ + char* RESTRICT region_flags_d); + +/** + * Calculate a signature for a ball. + * + * Populate the `ids_d`, `ii_d`, `di_d` and `min_depth_d` fields of the + * renderer. For spheres not visible in the image, sets the id field to -1, + * min_depth_d to MAX_FLOAT and the ii_d.min.x fields to MAX_USHORT. + */ +template +GLOBAL void calc_signature( + Renderer renderer, + float3 const* const RESTRICT vert_poss, + float const* const RESTRICT vert_cols, + float const* const RESTRICT vert_rads, + const uint num_balls); + +/** + * The block size for rendering. + * + * This should be as large as possible, but is limited due to the amount + * of variables we use and the memory required per thread. + */ +#define RENDER_BLOCK_SIZE 16 +/** + * The buffer size of spheres to be loaded and analyzed for relevance. + * + * This must be at least RENDER_BLOCK_SIZE * RENDER_BLOCK_SIZE so that + * for every iteration through the loading loop every thread could add a + * 'hit' to the buffer. + */ +#define RENDER_BUFFER_SIZE RENDER_BLOCK_SIZE* RENDER_BLOCK_SIZE * 2 +/** + * The threshold after which the spheres that are in the render buffer + * are rendered and the buffer is flushed. + * + * Must be less than RENDER_BUFFER_SIZE. + */ +#define RENDER_BUFFER_LOAD_THRESH 16 * 4 + +/** + * The render function. + * + * Assumptions: + * * the focal length is appropriately chosen, + * * ray_dir_norm.z is > EPS. + * * to be completed... + */ +template +GLOBAL void render( + size_t const* const RESTRICT + num_balls, /** Number of balls relevant for this pass. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + float const* const RESTRICT min_depth_d, /** Minimum depth per sphere. */ + int const* const RESTRICT id_d, /** IDs. */ + float const* const RESTRICT op_d, /** Opacity. */ + const CamInfo cam_norm, /** Camera normalized with all vectors to be in the + * camera coordinate system. + */ + const float gamma, /** Transparency parameter. **/ + const float percent_allowed_difference, /** Maximum allowed + error in color. */ + const uint max_n_hits, + const float* bg_col_d, + const uint mode, + const int x_min, + const int y_min, + const int x_step, + const int y_step, + // Out variables. + float* const RESTRICT result_d, /** The result image. */ + float* const RESTRICT forw_info_d, /** Additional information needed for the + grad computation. */ + // Infrastructure. + const int n_track /** The number of spheres to track. */ +); + +/** + * Makes sure to paint background information. + * + * This is required as a separate post-processing step because certain + * pixels may not be processed during the forward pass if there is no + * possibility for a sphere to be present at their location. + */ +template +GLOBAL void fill_bg( + Renderer renderer, + const CamInfo norm, + float const* const bg_col_d, + const float gamma, + const uint mode); + +/** + * Rendering forward pass. + * + * Takes a renderer and sphere data as inputs and creates a rendering. + */ +template +void forward( + Renderer* self, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* bg_col_d, + const float* opacity_d, + const size_t& num_balls, + const uint& mode, + cudaStream_t stream); + +/** + * Normalize the camera gradients by the number of spheres that contributed. + */ +template +GLOBAL void norm_cam_gradients(Renderer renderer); + +/** + * Normalize the sphere gradients. + * + * We're assuming that the samples originate from a Monte Carlo + * sampling process and normalize by number and sphere area. + */ +template +GLOBAL void norm_sphere_gradients(Renderer renderer, const int num_balls); + +#define GRAD_BLOCK_SIZE 16 +/** Calculate the gradients. + */ +template +GLOBAL void calc_gradients( + const CamInfo cam, /** Camera in world coordinates. */ + float const* const RESTRICT grad_im, /** The gradient image. */ + const float + gamma, /** The transparency parameter used in the forward pass. */ + float3 const* const RESTRICT vert_poss, /** Vertex position vector. */ + float const* const RESTRICT vert_cols, /** Vertex color vector. */ + float const* const RESTRICT vert_rads, /** Vertex radius vector. */ + float const* const RESTRICT opacity, /** Vertex opacity. */ + const uint num_balls, /** Number of balls. */ + float const* const RESTRICT result_d, /** Result image. */ + float const* const RESTRICT forw_info_d, /** Forward pass info. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + // Mode switches. + const bool calc_grad_pos, + const bool calc_grad_col, + const bool calc_grad_rad, + const bool calc_grad_cam, + const bool calc_grad_opy, + // Out variables. + float* const RESTRICT grad_rad_d, /** Radius gradients. */ + float* const RESTRICT grad_col_d, /** Color gradients. */ + float3* const RESTRICT grad_pos_d, /** Position gradients. */ + CamGradInfo* const RESTRICT grad_cam_buf_d, /** Camera gradient buffer. */ + float* const RESTRICT grad_opy_d, /** Opacity gradient buffer. */ + int* const RESTRICT + grad_contributed_d, /** Gradient contribution counter. */ + // Infrastructure. + const int n_track, + const uint offs_x = 0, + const uint offs_y = 0); + +/** + * A full backward pass. + * + * Creates the gradients for the given gradient_image and the spheres. + */ +template +void backward( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + cudaStream_t stream); + +/** + * A debug backward pass. + * + * This is a function to debug the gradient calculation. It calculates the + * gradients for exactly one pixel (set with pos_x and pos_y) without averaging. + * + * *Uses only the first sphere for camera gradient calculation!* + */ +template +void backward_dbg( + Renderer* self, + const float* grad_im, + const float* image, + const float* forw_info, + const float* vert_pos, + const float* vert_col, + const float* vert_rad, + const CamInfo& cam, + const float& gamma, + float percent_allowed_difference, + const uint& max_n_hits, + const float* vert_opy, + const size_t& num_balls, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + const uint& pos_x, + const uint& pos_y, + cudaStream_t stream); + +template +void nn( + const float* ref_ptr, + const float* tar_ptr, + const uint& k, + const uint& d, + const uint& n, + float* dist_ptr, + int32_t* inds_ptr, + cudaStream_t stream); + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.device.h new file mode 100644 index 0000000000000000000000000000000000000000..e1dfd55d0b1363c1d8d38709460e00a75efeef5a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.device.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_NORM_CAM_GRADIENTS_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_NORM_CAM_GRADIENTS_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +/** + * Normalize the camera gradients by the number of spheres that contributed. + */ +template +GLOBAL void norm_cam_gradients(Renderer renderer) { + GET_PARALLEL_IDX_1D(idx, 1); + CamGradInfo* cgi = reinterpret_cast(renderer.grad_cam_d); + *cgi = *cgi * FRCP(static_cast(*renderer.n_grad_contributions_d)); + END_PARALLEL_NORET(); +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..98e05a67e470237a9328d7a441e7b700a7ce675d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_cam_gradients.instantiate.h @@ -0,0 +1,17 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.norm_cam_gradients.device.h" + +namespace pulsar { +namespace Renderer { + +template GLOBAL void norm_cam_gradients(Renderer renderer); + +} // namespace Renderer +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_sphere_gradients.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_sphere_gradients.device.h new file mode 100644 index 0000000000000000000000000000000000000000..37e0eb00a5179911216a5d2827feb83ade487755 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.norm_sphere_gradients.device.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_NORM_SPHERE_GRADIENTS_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_NORM_SPHERE_GRADIENTS_H_ + +#include "../global.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +namespace pulsar { +namespace Renderer { + +/** + * Normalize the sphere gradients. + * + * We're assuming that the samples originate from a Monte Carlo + * sampling process and normalize by number and sphere area. + */ +template +GLOBAL void norm_sphere_gradients(Renderer renderer, const int num_balls) { + GET_PARALLEL_IDX_1D(idx, num_balls); + float norm_fac = 0.f; + IntersectInfo ii; + if (renderer.ids_sorted_d[idx] > 0) { + ii = renderer.ii_d[idx]; + // Normalize the sphere gradients as averages. + // This avoids the case that there are small spheres in a scene with still + // un-converged colors whereas the big spheres already converged, just + // because their integrated learning rate is 'higher'. + norm_fac = FRCP(static_cast(renderer.ids_sorted_d[idx])); + } + PULSAR_LOG_DEV_NODE( + PULSAR_LOG_NORMALIZE, + "ids_sorted_d[idx]: %d, norm_fac: %.9f.\n", + renderer.ids_sorted_d[idx], + norm_fac); + renderer.grad_rad_d[idx] *= norm_fac; + for (uint c_idx = 0; c_idx < renderer.cam.n_channels; ++c_idx) { + renderer.grad_col_d[idx * renderer.cam.n_channels + c_idx] *= norm_fac; + } + renderer.grad_pos_d[idx] *= norm_fac; + renderer.grad_opy_d[idx] *= norm_fac; + + if (renderer.ids_sorted_d[idx] > 0) { + // For the camera, we need to be more correct and have the gradients + // be proportional to the area they cover in the image. + // This leads to a formulation very much like in monte carlo integration: + norm_fac = FRCP(static_cast(renderer.ids_sorted_d[idx])) * + (static_cast(ii.max.x) - static_cast(ii.min.x)) * + (static_cast(ii.max.y) - static_cast(ii.min.y)) * + 1e-3f; // for better numerics. + } + renderer.grad_cam_buf_d[idx].cam_pos *= norm_fac; + renderer.grad_cam_buf_d[idx].pixel_0_0_center *= norm_fac; + renderer.grad_cam_buf_d[idx].pixel_dir_x *= norm_fac; + renderer.grad_cam_buf_d[idx].pixel_dir_y *= norm_fac; + // The sphere only contributes to the camera gradients if it is + // large enough in screen space. + if (renderer.ids_sorted_d[idx] > 0 && ii.max.x >= ii.min.x + 3 && + ii.max.y >= ii.min.y + 3) + renderer.ids_sorted_d[idx] = 1; + END_PARALLEL_NORET(); +}; + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.render.device.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.render.device.h new file mode 100644 index 0000000000000000000000000000000000000000..ab13c66d0002f08ece06b38d386019660168d32c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.render.device.h @@ -0,0 +1,422 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_RENDER_DEVICE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_RENDER_DEVICE_H_ + +#include "../global.h" +#include "./camera.device.h" +#include "./commands.h" +#include "./math.h" +#include "./renderer.h" + +#include "./closest_sphere_tracker.device.h" +#include "./renderer.draw.device.h" + +namespace pulsar { +namespace Renderer { + +template +GLOBAL void render( + size_t const* const RESTRICT + num_balls, /** Number of balls relevant for this pass. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + float const* const RESTRICT min_depth_d, /** Minimum depth per sphere. */ + int const* const RESTRICT ids_d, /** IDs. */ + float const* const RESTRICT op_d, /** Opacity. */ + const CamInfo cam_norm, /** Camera normalized with all vectors to be in the + * camera coordinate system. + */ + const float gamma, /** Transparency parameter. **/ + const float percent_allowed_difference, /** Maximum allowed + error in color. */ + const uint max_n_hits, + const float* bg_col, + const uint mode, + const int x_min, + const int y_min, + const int x_step, + const int y_step, + // Out variables. + float* const RESTRICT result_d, /** The result image. */ + float* const RESTRICT forw_info_d, /** Additional information needed for the + grad computation. */ + const int n_track /** The number of spheres to track for backprop. */ +) { + // Do not early stop threads in this block here. They can all contribute to + // the scanning process, we just have to prevent from writing their result. + GET_PARALLEL_IDS_2D(offs_x, offs_y, x_step, y_step); + // Variable declarations and const initializations. + const float ln_pad_over_1minuspad = + FLN(percent_allowed_difference / (1.f - percent_allowed_difference)); + /** A facility to track the closest spheres to the camera + (in preparation for gradient calculation). */ + ClosestSphereTracker tracker(n_track); + const uint coord_x = x_min + offs_x; /** Ray coordinate x. */ + const uint coord_y = y_min + offs_y; /** Ray coordinate y. */ + float3 ray_dir_norm; /** Ray cast through the pixel, normalized. */ + float2 projected_ray; /** Ray intersection with the sensor. */ + if (cam_norm.orthogonal_projection) { + ray_dir_norm = cam_norm.sensor_dir_z; + projected_ray.x = static_cast(coord_x); + projected_ray.y = static_cast(coord_y); + } else { + ray_dir_norm = normalize( + cam_norm.pixel_0_0_center + coord_x * cam_norm.pixel_dir_x + + coord_y * cam_norm.pixel_dir_y); + // This is a reasonable assumption for normal focal lengths and image sizes. + PASSERT(FABS(ray_dir_norm.z) > FEPS); + projected_ray.x = ray_dir_norm.x / ray_dir_norm.z * cam_norm.focal_length; + projected_ray.y = ray_dir_norm.y / ray_dir_norm.z * cam_norm.focal_length; + } + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_RENDER_PIX, + "render|ray_dir_norm: %.9f, %.9f, %.9f. projected_ray: %.9f, %.9f.\n", + ray_dir_norm.x, + ray_dir_norm.y, + ray_dir_norm.z, + projected_ray.x, + projected_ray.y); + // Set up shared infrastructure. + /** This entire thread block. */ + cg::thread_block thread_block = cg::this_thread_block(); + /** The collaborators within a warp. */ + cg::coalesced_group thread_warp = cg::coalesced_threads(); + /** The number of loaded balls in the load buffer di_l. */ + SHARED uint n_loaded; + /** Draw information buffer. */ + SHARED DrawInfo di_l[RENDER_BUFFER_SIZE]; + /** The original sphere id of each loaded sphere. */ + SHARED uint sphere_id_l[RENDER_BUFFER_SIZE]; + /** The number of pixels in this block that are done. */ + SHARED int n_pixels_done; + /** Whether loading of balls is completed. */ + SHARED bool loading_done; + /** The number of balls loaded overall (just for statistics). */ + [[maybe_unused]] SHARED int n_balls_loaded; + /** The area this thread block covers. */ + SHARED IntersectInfo block_area; + if (thread_block.thread_rank() == 0) { + // Initialize the shared variables. + n_loaded = 0; + block_area.min.x = static_cast(coord_x); + block_area.max.x = static_cast(IMIN( + coord_x + blockDim.x, cam_norm.film_border_left + cam_norm.film_width)); + block_area.min.y = static_cast(coord_y); + block_area.max.y = static_cast(IMIN( + coord_y + blockDim.y, cam_norm.film_border_top + cam_norm.film_height)); + n_pixels_done = 0; + loading_done = false; + n_balls_loaded = 0; + } + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_RENDER_PIX, + "render|block_area.min: %d, %d. block_area.max: %d, %d.\n", + block_area.min.x, + block_area.min.y, + block_area.max.x, + block_area.max.y); + // Initialization of the pixel with the background color. + /** + * The result of this very pixel. + * the offset calculation might overflow if this thread is out of + * bounds of the film. However, in this case result is not + * accessed, so this is fine. + */ + float* result = result_d + + (coord_y - cam_norm.film_border_top) * cam_norm.film_width * + cam_norm.n_channels + + (coord_x - cam_norm.film_border_left) * cam_norm.n_channels; + if (coord_x >= cam_norm.film_border_left && + coord_x < cam_norm.film_border_left + cam_norm.film_width && + coord_y >= cam_norm.film_border_top && + coord_y < cam_norm.film_border_top + cam_norm.film_height) { + // Initialize the result. + if (mode == 0u) { + for (uint c_id = 0; c_id < cam_norm.n_channels; ++c_id) + result[c_id] = bg_col[c_id]; + } else { + result[0] = 0.f; + } + } + /** Normalization denominator. */ + float sm_d = 1.f; + /** Normalization tracker for stable softmax. The maximum observed value. */ + float sm_m = cam_norm.background_normalization_depth / gamma; + /** Whether this pixel has had all information needed for drawing. */ + bool done = + (coord_x < cam_norm.film_border_left || + coord_x >= cam_norm.film_border_left + cam_norm.film_width || + coord_y < cam_norm.film_border_top || + coord_y >= cam_norm.film_border_top + cam_norm.film_height); + /** The depth threshold for a new point to have at least + * `percent_allowed_difference` influence on the result color. All points that + * are further away than this are ignored. + */ + float depth_threshold = done ? -1.f : MAX_FLOAT; + /** The closest intersection possible of a ball that was hit by this pixel + * ray. */ + float max_closest_possible_intersection_hit = -1.f; + bool hit; /** Whether a sphere was hit. */ + float intersection_depth; /** The intersection_depth for a sphere at this + pixel. */ + float closest_possible_intersection; /** The closest possible intersection + for this sphere. */ + float max_closest_possible_intersection; + // Sync up threads so that everyone is similarly initialized. + thread_block.sync(); + //! Coalesced loading and intersection analysis of balls. + for (uint ball_idx = thread_block.thread_rank(); + ball_idx < iDivCeil(static_cast(*num_balls), thread_block.size()) * + thread_block.size() && + !loading_done && n_pixels_done < thread_block.size(); + ball_idx += thread_block.size()) { + if (ball_idx < static_cast(*num_balls)) { // Account for overflow. + const IntersectInfo& ii = ii_d[ball_idx]; + hit = (ii.min.x <= block_area.max.x) && (ii.max.x > block_area.min.x) && + (ii.min.y <= block_area.max.y) && (ii.max.y > block_area.min.y); + if (hit) { + uint write_idx = ATOMICADD_B(&n_loaded, 1u); + di_l[write_idx] = di_d[ball_idx]; + sphere_id_l[write_idx] = static_cast(ids_d[ball_idx]); + PULSAR_LOG_DEV_PIXB( + PULSAR_LOG_RENDER_PIX, + "render|found intersection with sphere %u.\n", + sphere_id_l[write_idx]); + } + if (ii.min.x == MAX_USHORT) + // This is an invalid sphere (out of image). These spheres have + // maximum depth. Since we ordered the spheres by earliest possible + // intersection depth we re certain that there will no other sphere + // that is relevant after this one. + loading_done = true; + } + // Reset n_pixels_done. + n_pixels_done = 0; + thread_block.sync(); // Make sure n_loaded is updated. + if (n_loaded > RENDER_BUFFER_LOAD_THRESH) { + // The load buffer is full enough. Draw. + if (thread_block.thread_rank() == 0) + n_balls_loaded += n_loaded; + max_closest_possible_intersection = 0.f; + // This excludes threads outside of the image boundary. Also, it reduces + // block artifacts. + if (!done) { + for (uint draw_idx = 0; draw_idx < n_loaded; ++draw_idx) { + intersection_depth = 0.f; + if (cam_norm.orthogonal_projection) { + // The closest possible intersection is the distance to the camera + // plane. + closest_possible_intersection = min_depth_d[sphere_id_l[draw_idx]]; + } else { + closest_possible_intersection = + di_l[draw_idx].t_center - di_l[draw_idx].radius; + } + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_RENDER_PIX, + "render|drawing sphere %u (depth: %f, " + "closest possible intersection: %f).\n", + sphere_id_l[draw_idx], + di_l[draw_idx].t_center, + closest_possible_intersection); + hit = draw( + di_l[draw_idx], // Sphere to draw. + op_d == NULL ? 1.f : op_d[sphere_id_l[draw_idx]], // Opacity. + cam_norm, // Cam. + gamma, // Gamma. + ray_dir_norm, // Ray direction. + projected_ray, // Ray intersection with the image. + // Mode switches. + true, // Draw. + false, + false, + false, + false, + false, // No gradients. + // Position info. + coord_x, + coord_y, + sphere_id_l[draw_idx], + // Optional in variables. + NULL, // intersect information. + NULL, // ray_dir. + NULL, // norm_ray_dir. + NULL, // grad_pix. + &ln_pad_over_1minuspad, + // in/out variables + &sm_d, + &sm_m, + result, + // Optional out. + &depth_threshold, + &intersection_depth, + NULL, + NULL, + NULL, + NULL, + NULL // gradients. + ); + if (hit) { + max_closest_possible_intersection_hit = FMAX( + max_closest_possible_intersection_hit, + closest_possible_intersection); + tracker.track( + sphere_id_l[draw_idx], intersection_depth, coord_x, coord_y); + } + max_closest_possible_intersection = FMAX( + max_closest_possible_intersection, closest_possible_intersection); + } + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_RENDER_PIX, + "render|max_closest_possible_intersection: %f, " + "depth_threshold: %f.\n", + max_closest_possible_intersection, + depth_threshold); + } + done = done || + (percent_allowed_difference > 0.f && + max_closest_possible_intersection > depth_threshold) || + tracker.get_n_hits() >= max_n_hits; +#if defined(__CUDACC__) && defined(__HIP_PLATFORM_AMD__) + unsigned long long warp_done = __ballot(done); + int warp_done_bit_cnt = __popcll(warp_done); +#else + uint warp_done = thread_warp.ballot(done); + int warp_done_bit_cnt = POPC(warp_done); +#endif //__CUDACC__ && __HIP_PLATFORM_AMD__ + if (thread_warp.thread_rank() == 0) + ATOMICADD_B(&n_pixels_done, warp_done_bit_cnt); + // This sync is necessary to keep n_loaded until all threads are done with + // painting. + thread_block.sync(); + n_loaded = 0; + } + thread_block.sync(); + } + if (thread_block.thread_rank() == 0) + n_balls_loaded += n_loaded; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_RENDER_PIX, + "render|loaded %d balls in total.\n", + n_balls_loaded); + if (!done) { + for (uint draw_idx = 0; draw_idx < n_loaded; ++draw_idx) { + intersection_depth = 0.f; + if (cam_norm.orthogonal_projection) { + // The closest possible intersection is the distance to the camera + // plane. + closest_possible_intersection = min_depth_d[sphere_id_l[draw_idx]]; + } else { + closest_possible_intersection = + di_l[draw_idx].t_center - di_l[draw_idx].radius; + } + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_RENDER_PIX, + "render|drawing sphere %u (depth: %f, " + "closest possible intersection: %f).\n", + sphere_id_l[draw_idx], + di_l[draw_idx].t_center, + closest_possible_intersection); + hit = draw( + di_l[draw_idx], // Sphere to draw. + op_d == NULL ? 1.f : op_d[sphere_id_l[draw_idx]], // Opacity. + cam_norm, // Cam. + gamma, // Gamma. + ray_dir_norm, // Ray direction. + projected_ray, // Ray intersection with the image. + // Mode switches. + true, // Draw. + false, + false, + false, + false, + false, // No gradients. + // Logging info. + coord_x, + coord_y, + sphere_id_l[draw_idx], + // Optional in variables. + NULL, // intersect information. + NULL, // ray_dir. + NULL, // norm_ray_dir. + NULL, // grad_pix. + &ln_pad_over_1minuspad, + // in/out variables + &sm_d, + &sm_m, + result, + // Optional out. + &depth_threshold, + &intersection_depth, + NULL, + NULL, + NULL, + NULL, + NULL // gradients. + ); + if (hit) { + max_closest_possible_intersection_hit = FMAX( + max_closest_possible_intersection_hit, + closest_possible_intersection); + tracker.track( + sphere_id_l[draw_idx], intersection_depth, coord_x, coord_y); + } + } + } + if (coord_x < cam_norm.film_border_left || + coord_y < cam_norm.film_border_top || + coord_x >= cam_norm.film_border_left + cam_norm.film_width || + coord_y >= cam_norm.film_border_top + cam_norm.film_height) { + RETURN_PARALLEL(); + } + if (mode == 1u) { + // The subtractions, for example coord_y - cam_norm.film_border_left, are + // safe even though both components are uints. We checked their relation + // just above. + result_d + [(coord_y - cam_norm.film_border_top) * cam_norm.film_width * + cam_norm.n_channels + + (coord_x - cam_norm.film_border_left) * cam_norm.n_channels] = + static_cast(tracker.get_n_hits()); + } else { + float sm_d_normfac = FRCP(FMAX(sm_d, FEPS)); + for (uint c_id = 0; c_id < cam_norm.n_channels; ++c_id) + result[c_id] *= sm_d_normfac; + int write_loc = (coord_y - cam_norm.film_border_top) * cam_norm.film_width * + (3 + 2 * n_track) + + (coord_x - cam_norm.film_border_left) * (3 + 2 * n_track); + forw_info_d[write_loc] = sm_m; + forw_info_d[write_loc + 1] = sm_d; + forw_info_d[write_loc + 2] = max_closest_possible_intersection_hit; + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_RENDER_PIX, + "render|writing the %d most important ball infos.\n", + IMIN(n_track, tracker.get_n_hits())); + for (int i = 0; i < n_track; ++i) { + int sphere_id = tracker.get_closest_sphere_id(i); + IASF(sphere_id, forw_info_d[write_loc + 3 + i * 2]); + forw_info_d[write_loc + 3 + i * 2 + 1] = + tracker.get_closest_sphere_depth(i) == MAX_FLOAT + ? -1.f + : tracker.get_closest_sphere_depth(i); + PULSAR_LOG_DEV_PIX( + PULSAR_LOG_RENDER_PIX, + "render|writing %d most important: id: %d, normalized depth: %f.\n", + i, + tracker.get_closest_sphere_id(i), + tracker.get_closest_sphere_depth(i)); + } + } + END_PARALLEL_2D(); +} + +} // namespace Renderer +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.render.instantiate.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.render.instantiate.h new file mode 100644 index 0000000000000000000000000000000000000000..9c1f326e63b8b4860137d9f0d0f440896adb2a88 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/include/renderer.render.instantiate.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_INCLUDE_RENDERER_RENDER_INSTANTIATE_H_ +#define PULSAR_NATIVE_INCLUDE_RENDERER_RENDER_INSTANTIATE_H_ + +#include "./renderer.render.device.h" + +namespace pulsar { +namespace Renderer { +template GLOBAL void render( + size_t const* const RESTRICT + num_balls, /** Number of balls relevant for this pass. */ + IntersectInfo const* const RESTRICT ii_d, /** Intersect information. */ + DrawInfo const* const RESTRICT di_d, /** Draw information. */ + float const* const RESTRICT min_depth_d, /** Minimum depth per sphere. */ + int const* const RESTRICT id_d, /** IDs. */ + float const* const RESTRICT op_d, /** Opacity. */ + const CamInfo cam_norm, /** Camera normalized with all vectors to be in the + * camera coordinate system. + */ + const float gamma, /** Transparency parameter. **/ + const float percent_allowed_difference, /** Maximum allowed + error in color. */ + const uint max_n_hits, + const float* bg_col_d, + const uint mode, + const int x_min, + const int y_min, + const int x_step, + const int y_step, + // Out variables. + float* const RESTRICT result_d, /** The result image. */ + float* const RESTRICT forw_info_d, /** Additional information needed for the + grad computation. */ + const int n_track /** The number of spheres to track for backprop. */ +); +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/logging.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..63d472257671287156ccf77531c6897beff1fcd2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/logging.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_LOGGING_H_ +#define PULSAR_LOGGING_H_ + +// #define PULSAR_LOGGING_ENABLED +/** + * Enable detailed per-operation timings. + * + * This timing scheme is not appropriate to measure batched calculations. + * Use `PULSAR_TIMINGS_BATCHED_ENABLED` for that. + */ +// #define PULSAR_TIMINGS_ENABLED +/** + * Time batched operations. + */ +// #define PULSAR_TIMINGS_BATCHED_ENABLED +#if defined(PULSAR_TIMINGS_BATCHED_ENABLED) && defined(PULSAR_TIMINGS_ENABLED) +#pragma message("Pulsar|batched and unbatched timings enabled. This will not") +#pragma message("Pulsar|create meaningful results.") +#endif + +#ifdef PULSAR_LOGGING_ENABLED + +// Control logging. +// 0: INFO, 1: WARNING, 2: ERROR, 3: FATAL (Abort after logging). +#define CAFFE2_LOG_THRESHOLD 0 +#define PULSAR_LOG_INIT false +#define PULSAR_LOG_FORWARD false +#define PULSAR_LOG_CALC_SIGNATURE false +#define PULSAR_LOG_RENDER false +#define PULSAR_LOG_RENDER_PIX false +#define PULSAR_LOG_RENDER_PIX_X 428 +#define PULSAR_LOG_RENDER_PIX_Y 669 +#define PULSAR_LOG_RENDER_PIX_ALL false +#define PULSAR_LOG_TRACKER_PIX false +#define PULSAR_LOG_TRACKER_PIX_X 428 +#define PULSAR_LOG_TRACKER_PIX_Y 669 +#define PULSAR_LOG_TRACKER_PIX_ALL false +#define PULSAR_LOG_DRAW_PIX false +#define PULSAR_LOG_DRAW_PIX_X 428 +#define PULSAR_LOG_DRAW_PIX_Y 669 +#define PULSAR_LOG_DRAW_PIX_ALL false +#define PULSAR_LOG_BACKWARD false +#define PULSAR_LOG_GRAD false +#define PULSAR_LOG_GRAD_X 509 +#define PULSAR_LOG_GRAD_Y 489 +#define PULSAR_LOG_GRAD_ALL false +#define PULSAR_LOG_NORMALIZE false +#define PULSAR_LOG_NORMALIZE_X 0 +#define PULSAR_LOG_NORMALIZE_ALL false + +#define PULSAR_LOG_DEV(ID, ...) \ + if ((ID)) { \ + printf(__VA_ARGS__); \ + } +#define PULSAR_LOG_DEV_APIX(ID, MSG, ...) \ + if ((ID) && (film_coord_x == (ID##_X) && film_coord_y == (ID##_Y)) || \ + ID##_ALL) { \ + printf( \ + "%u %u (ap %u %u)|" MSG, \ + film_coord_x, \ + film_coord_y, \ + ap_coord_x, \ + ap_coord_y, \ + __VA_ARGS__); \ + } +#define PULSAR_LOG_DEV_PIX(ID, MSG, ...) \ + if ((ID) && (coord_x == (ID##_X) && coord_y == (ID##_Y)) || ID##_ALL) { \ + printf("%u %u|" MSG, coord_x, coord_y, __VA_ARGS__); \ + } +#ifdef __CUDACC__ +#define PULSAR_LOG_DEV_PIXB(ID, MSG, ...) \ + if ((ID) && static_cast(block_area.min.x) <= (ID##_X) && \ + static_cast(block_area.max.x) > (ID##_X) && \ + static_cast(block_area.min.y) <= (ID##_Y) && \ + static_cast(block_area.max.y) > (ID##_Y)) { \ + printf("%u %u|" MSG, coord_x, coord_y, __VA_ARGS__); \ + } +#else +#define PULSAR_LOG_DEV_PIXB(ID, MSG, ...) \ + if ((ID) && coord_x == (ID##_X) && coord_y == (ID##_Y)) { \ + printf("%u %u|" MSG, coord_x, coord_y, __VA_ARGS__); \ + } +#endif +#define PULSAR_LOG_DEV_NODE(ID, MSG, ...) \ + if ((ID) && idx == (ID##_X) || (ID##_ALL)) { \ + printf("%u|" MSG, idx, __VA_ARGS__); \ + } + +#else + +#define CAFFE2_LOG_THRESHOLD 2 + +#define PULSAR_LOG_RENDER false +#define PULSAR_LOG_INIT false +#define PULSAR_LOG_FORWARD false +#define PULSAR_LOG_BACKWARD false +#define PULSAR_LOG_TRACKER_PIX false + +#define PULSAR_LOG_DEV(...) +#define PULSAR_LOG_DEV_APIX(...) +#define PULSAR_LOG_DEV_PIX(...) +#define PULSAR_LOG_DEV_PIXB(...) +#define PULSAR_LOG_DEV_NODE(...) + +#endif + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c3794e7edf90f4af50632ea91bc131bd87fd751f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.cpp @@ -0,0 +1,70 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./camera.h" +#include "../include/math.h" + +namespace pulsar { +namespace pytorch { + +CamInfo cam_info_from_params( + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& principal_point_offset, + const float& focal_length, + const uint& width, + const uint& height, + const float& min_dist, + const float& max_dist, + const bool& right_handed) { + CamInfo res; + fill_cam_vecs( + cam_pos.detach().cpu(), + pixel_0_0_center.detach().cpu(), + pixel_vec_x.detach().cpu(), + pixel_vec_y.detach().cpu(), + principal_point_offset.detach().cpu(), + right_handed, + &res); + res.half_pixel_size = 0.5f * length(res.pixel_dir_x); + if (length(res.pixel_dir_y) * 0.5f - res.half_pixel_size > EPS) { + throw std::runtime_error("Pixel sizes must agree in x and y direction!"); + } + res.focal_length = focal_length; + res.aperture_width = + width + 2u * static_cast(abs(res.principal_point_offset_x)); + res.aperture_height = + height + 2u * static_cast(abs(res.principal_point_offset_y)); + res.pixel_0_0_center -= + res.pixel_dir_x * static_cast(abs(res.principal_point_offset_x)); + res.pixel_0_0_center -= + res.pixel_dir_y * static_cast(abs(res.principal_point_offset_y)); + res.film_width = width; + res.film_height = height; + res.film_border_left = + static_cast(std::max(0, 2 * res.principal_point_offset_x)); + res.film_border_top = + static_cast(std::max(0, 2 * res.principal_point_offset_y)); + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Aperture width, height: " << res.aperture_width << ", " + << res.aperture_height; + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Film width, height: " << res.film_width << ", " << res.film_height; + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Film border left, top: " << res.film_border_left << ", " + << res.film_border_top; + res.min_dist = min_dist; + res.max_dist = max_dist; + res.norm_fac = 1.f / (max_dist - min_dist); + return res; +}; + +} // namespace pytorch +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.h new file mode 100644 index 0000000000000000000000000000000000000000..9ecd95353ad76efd2760a4a634493917fda7b468 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/camera.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_CAMERA_H_ +#define PULSAR_NATIVE_CAMERA_H_ + +#include +#include "../global.h" + +#include "../include/camera.h" + +namespace pulsar { +namespace pytorch { + +inline void fill_cam_vecs( + const torch::Tensor& pos_vec, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_dir_x, + const torch::Tensor& pixel_dir_y, + const torch::Tensor& principal_point_offset, + const bool& right_handed, + CamInfo* res) { + res->eye.x = pos_vec.data_ptr()[0]; + res->eye.y = pos_vec.data_ptr()[1]; + res->eye.z = pos_vec.data_ptr()[2]; + res->pixel_0_0_center.x = pixel_0_0_center.data_ptr()[0]; + res->pixel_0_0_center.y = pixel_0_0_center.data_ptr()[1]; + res->pixel_0_0_center.z = pixel_0_0_center.data_ptr()[2]; + res->pixel_dir_x.x = pixel_dir_x.data_ptr()[0]; + res->pixel_dir_x.y = pixel_dir_x.data_ptr()[1]; + res->pixel_dir_x.z = pixel_dir_x.data_ptr()[2]; + res->pixel_dir_y.x = pixel_dir_y.data_ptr()[0]; + res->pixel_dir_y.y = pixel_dir_y.data_ptr()[1]; + res->pixel_dir_y.z = pixel_dir_y.data_ptr()[2]; + auto sensor_dir_z = pixel_dir_y.cross(pixel_dir_x, -1); + sensor_dir_z /= sensor_dir_z.norm(); + if (right_handed) { + sensor_dir_z *= -1.f; + } + res->sensor_dir_z.x = sensor_dir_z.data_ptr()[0]; + res->sensor_dir_z.y = sensor_dir_z.data_ptr()[1]; + res->sensor_dir_z.z = sensor_dir_z.data_ptr()[2]; + res->principal_point_offset_x = principal_point_offset.data_ptr()[0]; + res->principal_point_offset_y = principal_point_offset.data_ptr()[1]; +} + +CamInfo cam_info_from_params( + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& principal_point_offset, + const float& focal_length, + const uint& width, + const uint& height, + const float& min_dist, + const float& max_dist, + const bool& right_handed); + +} // namespace pytorch +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..018ca1ad792923e22949d96087d32600d185d1c3 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.cpp @@ -0,0 +1,1599 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include "./renderer.h" +#include "../include/commands.h" +#include "./camera.h" +#include "./util.h" + +#include +#ifdef WITH_CUDA +#include +#include +#endif + +#ifndef TORCH_CHECK_ARG +// torch <= 1.10 +#define TORCH_CHECK_ARG(cond, argN, ...) \ + TORCH_CHECK(cond, "invalid argument ", argN, ": ", __VA_ARGS__) +#endif + +namespace PRE = ::pulsar::Renderer; + +namespace pulsar { +namespace pytorch { + +Renderer::Renderer( + const unsigned int& width, + const unsigned int& height, + const unsigned int& max_n_balls, + const bool& orthogonal_projection, + const bool& right_handed_system, + const float& background_normalization_depth, + const uint& n_channels, + const uint& n_track) { + LOG_IF(INFO, PULSAR_LOG_INIT) << "Initializing renderer."; + TORCH_CHECK_ARG(width > 0, 1, "image width must be > 0!"); + TORCH_CHECK_ARG(height > 0, 2, "image height must be > 0!"); + TORCH_CHECK_ARG(max_n_balls > 0, 3, "max_n_balls must be > 0!"); + TORCH_CHECK_ARG( + background_normalization_depth > 0.f && + background_normalization_depth < 1.f, + 5, + "background_normalization_depth must be in ]0., 1.["); + TORCH_CHECK_ARG(n_channels > 0, 6, "n_channels must be > 0"); + TORCH_CHECK_ARG( + n_track > 0 && n_track <= MAX_GRAD_SPHERES, + 7, + ("n_track must be > 0 and <" + std::to_string(MAX_GRAD_SPHERES) + + ". Is " + std::to_string(n_track) + ".") + .c_str()); + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Image width: " << width << ", height: " << height; + this->renderer_vec.emplace_back(); + this->device_type = c10::DeviceType::CPU; + this->device_index = -1; + PRE::construct( + this->renderer_vec.data(), + max_n_balls, + width, + height, + orthogonal_projection, + right_handed_system, + background_normalization_depth, + n_channels, + n_track); + this->device_tracker = torch::zeros(1); +}; + +Renderer::~Renderer() { + if (this->device_type == c10::DeviceType::CUDA) { +// Can't happen in the case that not compiled with CUDA. +#ifdef WITH_CUDA + at::cuda::CUDAGuard device_guard(this->device_tracker.device()); + for (auto nrend : this->renderer_vec) { + PRE::destruct(&nrend); + } +#endif + } else { + for (auto nrend : this->renderer_vec) { + PRE::destruct(&nrend); + } + } +} + +bool Renderer::operator==(const Renderer& rhs) const { + LOG_IF(INFO, PULSAR_LOG_INIT) << "Equality check."; + bool renderer_agrees = (this->renderer_vec[0] == rhs.renderer_vec[0]); + LOG_IF(INFO, PULSAR_LOG_INIT) << " Renderer agrees: " << renderer_agrees; + bool device_agrees = + (this->device_tracker.device() == rhs.device_tracker.device()); + LOG_IF(INFO, PULSAR_LOG_INIT) << " Device agrees: " << device_agrees; + return (renderer_agrees && device_agrees); +}; + +void Renderer::ensure_on_device(torch::Device device, bool /*non_blocking*/) { + TORCH_CHECK_ARG( + device.type() == c10::DeviceType::CUDA || + device.type() == c10::DeviceType::CPU, + 1, + "Only CPU and CUDA device types are supported."); + if (device.type() != this->device_type || + device.index() != this->device_index) { +#ifdef WITH_CUDA + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Transferring render buffers between devices."; + int prev_active; + cudaGetDevice(&prev_active); + if (this->device_type == c10::DeviceType::CUDA) { + LOG_IF(INFO, PULSAR_LOG_INIT) << " Destructing on CUDA."; + cudaSetDevice(this->device_index); + for (auto& nrend : this->renderer_vec) { + PRE::destruct(&nrend); + } + } else { + LOG_IF(INFO, PULSAR_LOG_INIT) << " Destructing on CPU."; + for (auto& nrend : this->renderer_vec) { + PRE::destruct(&nrend); + } + } + if (device.type() == c10::DeviceType::CUDA) { + LOG_IF(INFO, PULSAR_LOG_INIT) << " Constructing on CUDA."; + cudaSetDevice(device.index()); + for (auto& nrend : this->renderer_vec) { + PRE::construct( + &nrend, + this->renderer_vec[0].max_num_balls, + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + this->renderer_vec[0].cam.orthogonal_projection, + this->renderer_vec[0].cam.right_handed, + this->renderer_vec[0].cam.background_normalization_depth, + this->renderer_vec[0].cam.n_channels, + this->n_track()); + } + } else { + LOG_IF(INFO, PULSAR_LOG_INIT) << " Constructing on CPU."; + for (auto& nrend : this->renderer_vec) { + PRE::construct( + &nrend, + this->renderer_vec[0].max_num_balls, + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + this->renderer_vec[0].cam.orthogonal_projection, + this->renderer_vec[0].cam.right_handed, + this->renderer_vec[0].cam.background_normalization_depth, + this->renderer_vec[0].cam.n_channels, + this->n_track()); + } + } + cudaSetDevice(prev_active); + this->device_type = device.type(); + this->device_index = device.index(); +#else + throw std::runtime_error( + "pulsar was built without CUDA " + "but a device move to a CUDA device was initiated."); +#endif + } +}; + +void Renderer::ensure_n_renderers_gte(const size_t& batch_size) { + if (this->renderer_vec.size() < batch_size) { + ptrdiff_t diff = batch_size - this->renderer_vec.size(); + LOG_IF(INFO, PULSAR_LOG_INIT) + << "Increasing render buffers by " << diff + << " to account for batch size " << batch_size; + for (ptrdiff_t i = 0; i < diff; ++i) { + this->renderer_vec.emplace_back(); + if (this->device_type == c10::DeviceType::CUDA) { +#ifdef WITH_CUDA + PRE::construct( + &this->renderer_vec[this->renderer_vec.size() - 1], + this->max_num_balls(), + this->width(), + this->height(), + this->renderer_vec[0].cam.orthogonal_projection, + this->renderer_vec[0].cam.right_handed, + this->renderer_vec[0].cam.background_normalization_depth, + this->renderer_vec[0].cam.n_channels, + this->n_track()); +#endif + } else { + PRE::construct( + &this->renderer_vec[this->renderer_vec.size() - 1], + this->max_num_balls(), + this->width(), + this->height(), + this->renderer_vec[0].cam.orthogonal_projection, + this->renderer_vec[0].cam.right_handed, + this->renderer_vec[0].cam.background_normalization_depth, + this->renderer_vec[0].cam.n_channels, + this->n_track()); + } + } + } +} + +std::tuple Renderer::arg_check( + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float& min_depth, + const std::optional& bg_col, + const std::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode) { + LOG_IF(INFO, PULSAR_LOG_FORWARD || PULSAR_LOG_BACKWARD) << "Arg check."; + size_t batch_size = 1; + size_t n_points; + bool batch_processing = false; + if (vert_pos.ndimension() == 3) { + // Check all parameters adhere batch size. + batch_processing = true; + batch_size = vert_pos.size(0); + TORCH_CHECK_ARG( + vert_col.ndimension() == 3 && + vert_col.size(0) == static_cast(batch_size), + 2, + "vert_col needs to have batch size."); + TORCH_CHECK_ARG( + vert_radii.ndimension() == 2 && + vert_radii.size(0) == static_cast(batch_size), + 3, + "vert_radii must be specified per batch."); + TORCH_CHECK_ARG( + cam_pos.ndimension() == 2 && + cam_pos.size(0) == static_cast(batch_size), + 4, + "cam_pos must be specified per batch and have the correct batch size."); + TORCH_CHECK_ARG( + pixel_0_0_center.ndimension() == 2 && + pixel_0_0_center.size(0) == static_cast(batch_size), + 5, + "pixel_0_0_center must be specified per batch."); + TORCH_CHECK_ARG( + pixel_vec_x.ndimension() == 2 && + pixel_vec_x.size(0) == static_cast(batch_size), + 6, + "pixel_vec_x must be specified per batch."); + TORCH_CHECK_ARG( + pixel_vec_y.ndimension() == 2 && + pixel_vec_y.size(0) == static_cast(batch_size), + 7, + "pixel_vec_y must be specified per batch."); + TORCH_CHECK_ARG( + focal_length.ndimension() == 1 && + focal_length.size(0) == static_cast(batch_size), + 8, + "focal_length must be specified per batch."); + TORCH_CHECK_ARG( + principal_point_offsets.ndimension() == 2 && + principal_point_offsets.size(0) == static_cast(batch_size), + 9, + "principal_point_offsets must be specified per batch."); + if (opacity.has_value()) { + TORCH_CHECK_ARG( + opacity.value().ndimension() == 2 && + opacity.value().size(0) == static_cast(batch_size), + 13, + "Opacity needs to be specified batch-wise."); + } + // Check all parameters are for a matching number of points. + n_points = vert_pos.size(1); + TORCH_CHECK_ARG( + vert_col.size(1) == static_cast(n_points), + 2, + ("The number of points for vertex positions (" + + std::to_string(n_points) + ") and vertex colors (" + + std::to_string(vert_col.size(1)) + ") doesn't agree.") + .c_str()); + TORCH_CHECK_ARG( + vert_radii.size(1) == static_cast(n_points), + 3, + ("The number of points for vertex positions (" + + std::to_string(n_points) + ") and vertex radii (" + + std::to_string(vert_col.size(1)) + ") doesn't agree.") + .c_str()); + if (opacity.has_value()) { + TORCH_CHECK_ARG( + opacity.value().size(1) == static_cast(n_points), + 13, + "Opacity needs to be specified per point."); + } + // Check all parameters have the correct last dimension size. + TORCH_CHECK_ARG( + vert_pos.size(2) == 3, + 1, + ("Vertex positions must be 3D (have shape " + + std::to_string(vert_pos.size(2)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + vert_col.size(2) == this->renderer_vec[0].cam.n_channels, + 2, + ("Vertex colors must have the right number of channels (have shape " + + std::to_string(vert_col.size(2)) + ", need " + + std::to_string(this->renderer_vec[0].cam.n_channels) + ")!") + .c_str()); + TORCH_CHECK_ARG( + cam_pos.size(1) == 3, + 4, + ("Camera position must be 3D (has shape " + + std::to_string(cam_pos.size(1)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_0_0_center.size(1) == 3, + 5, + ("pixel_0_0_center must be 3D (has shape " + + std::to_string(pixel_0_0_center.size(1)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_vec_x.size(1) == 3, + 6, + ("pixel_vec_x must be 3D (has shape " + + std::to_string(pixel_vec_x.size(1)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_vec_y.size(1) == 3, + 7, + ("pixel_vec_y must be 3D (has shape " + + std::to_string(pixel_vec_y.size(1)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + principal_point_offsets.size(1) == 2, + 9, + "principal_point_offsets must contain x and y offsets."); + // Ensure enough renderers are available for the batch. + ensure_n_renderers_gte(batch_size); + } else { + // Check all parameters are of correct dimension. + TORCH_CHECK_ARG( + vert_col.ndimension() == 2, 2, "vert_col needs to have dimension 2."); + TORCH_CHECK_ARG( + vert_radii.ndimension() == 1, 3, "vert_radii must have dimension 1."); + TORCH_CHECK_ARG( + cam_pos.ndimension() == 1, 4, "cam_pos must have dimension 1."); + TORCH_CHECK_ARG( + pixel_0_0_center.ndimension() == 1, + 5, + "pixel_0_0_center must have dimension 1."); + TORCH_CHECK_ARG( + pixel_vec_x.ndimension() == 1, 6, "pixel_vec_x must have dimension 1."); + TORCH_CHECK_ARG( + pixel_vec_y.ndimension() == 1, 7, "pixel_vec_y must have dimension 1."); + TORCH_CHECK_ARG( + focal_length.ndimension() == 0, + 8, + "focal_length must have dimension 0."); + TORCH_CHECK_ARG( + principal_point_offsets.ndimension() == 1, + 9, + "principal_point_offsets must have dimension 1."); + if (opacity.has_value()) { + TORCH_CHECK_ARG( + opacity.value().ndimension() == 1, + 13, + "Opacity needs to be specified per sample."); + } + // Check each. + n_points = vert_pos.size(0); + TORCH_CHECK_ARG( + vert_col.size(0) == static_cast(n_points), + 2, + ("The number of points for vertex positions (" + + std::to_string(n_points) + ") and vertex colors (" + + std::to_string(vert_col.size(0)) + ") doesn't agree.") + .c_str()); + TORCH_CHECK_ARG( + vert_radii.size(0) == static_cast(n_points), + 3, + ("The number of points for vertex positions (" + + std::to_string(n_points) + ") and vertex radii (" + + std::to_string(vert_col.size(0)) + ") doesn't agree.") + .c_str()); + if (opacity.has_value()) { + TORCH_CHECK_ARG( + opacity.value().size(0) == static_cast(n_points), + 12, + "Opacity needs to be specified per point."); + } + // Check all parameters have the correct last dimension size. + TORCH_CHECK_ARG( + vert_pos.size(1) == 3, + 1, + ("Vertex positions must be 3D (have shape " + + std::to_string(vert_pos.size(1)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + vert_col.size(1) == this->renderer_vec[0].cam.n_channels, + 2, + ("Vertex colors must have the right number of channels (have shape " + + std::to_string(vert_col.size(1)) + ", need " + + std::to_string(this->renderer_vec[0].cam.n_channels) + ")!") + .c_str()); + TORCH_CHECK_ARG( + cam_pos.size(0) == 3, + 4, + ("Camera position must be 3D (has shape " + + std::to_string(cam_pos.size(0)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_0_0_center.size(0) == 3, + 5, + ("pixel_0_0_center must be 3D (has shape " + + std::to_string(pixel_0_0_center.size(0)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_vec_x.size(0) == 3, + 6, + ("pixel_vec_x must be 3D (has shape " + + std::to_string(pixel_vec_x.size(0)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + pixel_vec_y.size(0) == 3, + 7, + ("pixel_vec_y must be 3D (has shape " + + std::to_string(pixel_vec_y.size(0)) + ")!") + .c_str()); + TORCH_CHECK_ARG( + principal_point_offsets.size(0) == 2, + 9, + "principal_point_offsets must have x and y component."); + } + // Check device placement. + auto dev = torch::device_of(vert_pos).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 1, + ("Vertex positions must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(vert_col).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 2, + ("Vertex colors must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(vert_radii).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 3, + ("Vertex radii must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(cam_pos).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 4, + ("Camera position must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(pixel_0_0_center).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 5, + ("pixel_0_0_center must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(pixel_vec_x).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 6, + ("pixel_vec_x must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(pixel_vec_y).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 7, + ("pixel_vec_y must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(principal_point_offsets).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 9, + ("principal_point_offsets must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + if (opacity.has_value()) { + dev = torch::device_of(opacity.value()).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 13, + ("opacity must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Is stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + } + // Type checks. + TORCH_CHECK_ARG( + vert_pos.scalar_type() == c10::kFloat, 1, "pulsar requires float types."); + TORCH_CHECK_ARG( + vert_col.scalar_type() == c10::kFloat, 2, "pulsar requires float types."); + TORCH_CHECK_ARG( + vert_radii.scalar_type() == c10::kFloat, + 3, + "pulsar requires float types."); + TORCH_CHECK_ARG( + cam_pos.scalar_type() == c10::kFloat, 4, "pulsar requires float types."); + TORCH_CHECK_ARG( + pixel_0_0_center.scalar_type() == c10::kFloat, + 5, + "pulsar requires float types."); + TORCH_CHECK_ARG( + pixel_vec_x.scalar_type() == c10::kFloat, + 6, + "pulsar requires float types."); + TORCH_CHECK_ARG( + pixel_vec_y.scalar_type() == c10::kFloat, + 7, + "pulsar requires float types."); + TORCH_CHECK_ARG( + focal_length.scalar_type() == c10::kFloat, + 8, + "pulsar requires float types."); + TORCH_CHECK_ARG( + // Unfortunately, the PyTorch interface is inconsistent for + // Int32: in Python, there exists an explicit int32 type, in + // C++ this is currently `c10::kInt`. + principal_point_offsets.scalar_type() == c10::kInt, + 9, + "principal_point_offsets must be provided as int32."); + if (opacity.has_value()) { + TORCH_CHECK_ARG( + opacity.value().scalar_type() == c10::kFloat, + 13, + "opacity must be a float type."); + } + // Content checks. + TORCH_CHECK_ARG( + (vert_radii > FEPS).all().item(), + 3, + ("Vertex radii must be > FEPS (min is " + + std::to_string(vert_radii.min().item()) + ").") + .c_str()); + if (this->orthogonal()) { + TORCH_CHECK_ARG( + (focal_length == 0.f).all().item(), + 8, + ("for an orthogonal projection focal length must be zero (abs max: " + + std::to_string(focal_length.abs().max().item()) + ").") + .c_str()); + } else { + TORCH_CHECK_ARG( + (focal_length > FEPS).all().item(), + 8, + ("for a perspective projection focal length must be > FEPS (min " + + std::to_string(focal_length.min().item()) + ").") + .c_str()); + } + TORCH_CHECK_ARG( + gamma <= 1.f && gamma >= 1E-5f, + 10, + ("gamma must be in [1E-5, 1] (" + std::to_string(gamma) + ").").c_str()); + if (min_depth == 0.f) { + min_depth = focal_length.max().item() + 2.f * FEPS; + } + TORCH_CHECK_ARG( + min_depth > focal_length.max().item(), + 12, + ("min_depth must be > focal_length (" + std::to_string(min_depth) + + " vs. " + std::to_string(focal_length.max().item()) + ").") + .c_str()); + TORCH_CHECK_ARG( + max_depth > min_depth + FEPS, + 11, + ("max_depth must be > min_depth + FEPS (" + std::to_string(max_depth) + + " vs. " + std::to_string(min_depth + FEPS) + ").") + .c_str()); + TORCH_CHECK_ARG( + percent_allowed_difference >= 0.f && percent_allowed_difference < 1.f, + 14, + ("percent_allowed_difference must be in [0., 1.[ (" + + std::to_string(percent_allowed_difference) + ").") + .c_str()); + TORCH_CHECK_ARG(max_n_hits > 0, 14, "max_n_hits must be > 0!"); + TORCH_CHECK_ARG(mode < 2, 15, "mode must be in {0, 1}."); + torch::Tensor real_bg_col; + if (bg_col.has_value()) { + TORCH_CHECK_ARG( + bg_col.value().device().type() == this->device_type && + bg_col.value().device().index() == this->device_index, + 13, + "bg_col must be stored on the renderer device!"); + TORCH_CHECK_ARG( + bg_col.value().ndimension() == 1 && + bg_col.value().size(0) == renderer_vec[0].cam.n_channels, + 13, + "bg_col must have the same number of channels as the image,)."); + real_bg_col = bg_col.value(); + } else { + real_bg_col = torch::ones( + {renderer_vec[0].cam.n_channels}, + c10::Device(this->device_type, this->device_index)) + .to(c10::kFloat); + } + if (opacity.has_value()) { + TORCH_CHECK_ARG( + (opacity.value() >= 0.f).all().item(), + 13, + "opacity must be >= 0."); + TORCH_CHECK_ARG( + (opacity.value() <= 1.f).all().item(), + 13, + "opacity must be <= 1."); + } + LOG_IF(INFO, PULSAR_LOG_FORWARD || PULSAR_LOG_BACKWARD) + << " batch_size: " << batch_size; + LOG_IF(INFO, PULSAR_LOG_FORWARD || PULSAR_LOG_BACKWARD) + << " n_points: " << n_points; + LOG_IF(INFO, PULSAR_LOG_FORWARD || PULSAR_LOG_BACKWARD) + << " batch_processing: " << batch_processing; + return std::tuple( + batch_size, n_points, batch_processing, real_bg_col); +} + +std::tuple Renderer::forward( + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float min_depth, + const std::optional& bg_col, + const std::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode) { + // Parameter checks. + this->ensure_on_device(this->device_tracker.device()); + size_t batch_size; + size_t n_points; + bool batch_processing; + torch::Tensor real_bg_col; + std::tie(batch_size, n_points, batch_processing, real_bg_col) = + this->arg_check( + vert_pos, + vert_col, + vert_radii, + cam_pos, + pixel_0_0_center, + pixel_vec_x, + pixel_vec_y, + focal_length, + principal_point_offsets, + gamma, + max_depth, + min_depth, + bg_col, + opacity, + percent_allowed_difference, + max_n_hits, + mode); + LOG_IF(INFO, PULSAR_LOG_FORWARD) << "Extracting camera objects..."; + // Create the camera information. + std::vector cam_infos(batch_size); + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + cam_infos[batch_i] = cam_info_from_params( + cam_pos[batch_i], + pixel_0_0_center[batch_i], + pixel_vec_x[batch_i], + pixel_vec_y[batch_i], + principal_point_offsets[batch_i], + focal_length[batch_i].item(), + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + min_depth, + max_depth, + this->renderer_vec[0].cam.right_handed); + } + } else { + cam_infos[0] = cam_info_from_params( + cam_pos, + pixel_0_0_center, + pixel_vec_x, + pixel_vec_y, + principal_point_offsets, + focal_length.item(), + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + min_depth, + max_depth, + this->renderer_vec[0].cam.right_handed); + } + LOG_IF(INFO, PULSAR_LOG_FORWARD) << "Processing..."; + // Let's go! + // Contiguous version of opacity, if available. We need to create this object + // in scope to keep it alive. + torch::Tensor opacity_contiguous; + float const* opacity_ptr = nullptr; + if (opacity.has_value()) { + opacity_contiguous = opacity.value().contiguous(); + opacity_ptr = opacity_contiguous.data_ptr(); + } + if (this->device_type == c10::DeviceType::CUDA) { +// No else check necessary - if not compiled with CUDA +// we can't even reach this code (the renderer can't be +// moved to a CUDA device). +#ifdef WITH_CUDA + int prev_active; + cudaGetDevice(&prev_active); + cudaSetDevice(this->device_index); +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + START_TIME_CU(batch_forward); +#endif + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + // These calls are non-blocking and just kick off the computations. + PRE::forward( + &this->renderer_vec[batch_i], + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + real_bg_col.contiguous().data_ptr(), + opacity_ptr, + n_points, + mode, + at::cuda::getCurrentCUDAStream()); + } + } else { + PRE::forward( + this->renderer_vec.data(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + real_bg_col.contiguous().data_ptr(), + opacity_ptr, + n_points, + mode, + at::cuda::getCurrentCUDAStream()); + } +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + STOP_TIME_CU(batch_forward); + float time_ms; + GET_TIME_CU(batch_forward, &time_ms); + std::cout << "Forward render batched time per example: " + << time_ms / static_cast(batch_size) << "ms" << std::endl; +#endif + cudaSetDevice(prev_active); +#endif + } else { +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + START_TIME(batch_forward); +#endif + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + // These calls are non-blocking and just kick off the computations. + PRE::forward( + &this->renderer_vec[batch_i], + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + real_bg_col.contiguous().data_ptr(), + opacity_ptr, + n_points, + mode, + nullptr); + } + } else { + PRE::forward( + this->renderer_vec.data(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + real_bg_col.contiguous().data_ptr(), + opacity_ptr, + n_points, + mode, + nullptr); + } +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + STOP_TIME(batch_forward); + float time_ms; + GET_TIME(batch_forward, &time_ms); + std::cout << "Forward render batched time per example: " + << time_ms / static_cast(batch_size) << "ms" << std::endl; +#endif + } + LOG_IF(INFO, PULSAR_LOG_FORWARD) << "Extracting results..."; + // Create the results. + std::vector results(batch_size); + std::vector forw_infos(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + results[batch_i] = from_blob( + this->renderer_vec[batch_i].result_d, + {this->renderer_vec[0].cam.film_height, + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.n_channels}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + if (mode == 1) + results[batch_i] = results[batch_i].slice(2, 0, 1, 1); + forw_infos[batch_i] = from_blob( + this->renderer_vec[batch_i].forw_info_d, + {this->renderer_vec[0].cam.film_height, + this->renderer_vec[0].cam.film_width, + 3 + 2 * this->n_track()}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + LOG_IF(INFO, PULSAR_LOG_FORWARD) << "Forward render complete."; + if (batch_processing) { + return std::tuple( + torch::stack(results), torch::stack(forw_infos)); + } else { + return std::tuple(results[0], forw_infos[0]); + } +}; + +std::tuple< + std::optional, + std::optional, + std::optional, + std::optional, + std::optional, + std::optional, + std::optional, + std::optional> +Renderer::backward( + const torch::Tensor& grad_im, + const torch::Tensor& image, + const torch::Tensor& forw_info, + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float min_depth, + const std::optional& bg_col, + const std::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + const std::optional>& dbg_pos) { + this->ensure_on_device(this->device_tracker.device()); + size_t batch_size; + size_t n_points; + bool batch_processing; + torch::Tensor real_bg_col; + std::tie(batch_size, n_points, batch_processing, real_bg_col) = + this->arg_check( + vert_pos, + vert_col, + vert_radii, + cam_pos, + pixel_0_0_center, + pixel_vec_x, + pixel_vec_y, + focal_length, + principal_point_offsets, + gamma, + max_depth, + min_depth, + bg_col, + opacity, + percent_allowed_difference, + max_n_hits, + mode); + // Additional checks for the gradient computation. + TORCH_CHECK_ARG( + (grad_im.ndimension() == 3 + batch_processing && + static_cast(grad_im.size(0 + batch_processing)) == + this->height() && + static_cast(grad_im.size(1 + batch_processing)) == this->width() && + static_cast(grad_im.size(2 + batch_processing)) == + this->renderer_vec[0].cam.n_channels), + 1, + "The gradient image size is not correct."); + TORCH_CHECK_ARG( + (image.ndimension() == 3 + batch_processing && + static_cast(image.size(0 + batch_processing)) == this->height() && + static_cast(image.size(1 + batch_processing)) == this->width() && + static_cast(image.size(2 + batch_processing)) == + this->renderer_vec[0].cam.n_channels), + 2, + "The result image size is not correct."); + TORCH_CHECK_ARG( + grad_im.scalar_type() == c10::kFloat, + 1, + "The gradient image must be of float type."); + TORCH_CHECK_ARG( + image.scalar_type() == c10::kFloat, + 2, + "The image must be of float type."); + if (dif_opy) { + TORCH_CHECK_ARG( + opacity.has_value(), 13, "dif_opy set requires opacity values."); + } + if (batch_processing) { + TORCH_CHECK_ARG( + grad_im.size(0) == static_cast(batch_size), + 1, + "Gradient image batch size must agree."); + TORCH_CHECK_ARG( + image.size(0) == static_cast(batch_size), + 2, + "Image batch size must agree."); + TORCH_CHECK_ARG( + forw_info.size(0) == static_cast(batch_size), + 3, + "forward info must have batch size."); + } + TORCH_CHECK_ARG( + (forw_info.ndimension() == 3 + batch_processing && + static_cast(forw_info.size(0 + batch_processing)) == + this->height() && + static_cast(forw_info.size(1 + batch_processing)) == + this->width() && + static_cast(forw_info.size(2 + batch_processing)) == + 3 + 2 * this->n_track()), + 3, + "The forward info image size is not correct."); + TORCH_CHECK_ARG( + forw_info.scalar_type() == c10::kFloat, + 3, + "The forward info must be of float type."); + // Check device. + auto dev = torch::device_of(grad_im).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 1, + ("grad_im must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(image).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 2, + ("image must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + dev = torch::device_of(forw_info).value(); + TORCH_CHECK_ARG( + dev.type() == this->device_type && dev.index() == this->device_index, + 3, + ("forw_info must be stored on device " + + c10::DeviceTypeName(this->device_type) + ", index " + + std::to_string(this->device_index) + "! Are stored on " + + c10::DeviceTypeName(dev.type()) + ", index " + + std::to_string(dev.index()) + ".") + .c_str()); + if (dbg_pos.has_value()) { + TORCH_CHECK_ARG( + dbg_pos.value().first < this->width() && + dbg_pos.value().second < this->height(), + 23, + "The debug position must be within image bounds."); + } + // Prepare the return value. + std::tuple< + std::optional, + std::optional, + std::optional, + std::optional, + std::optional, + std::optional, + std::optional, + std::optional> + ret; + if (mode == 1 || (!dif_pos && !dif_col && !dif_rad && !dif_cam && !dif_opy)) { + return ret; + } + // Create the camera information. + std::vector cam_infos(batch_size); + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + cam_infos[batch_i] = cam_info_from_params( + cam_pos[batch_i], + pixel_0_0_center[batch_i], + pixel_vec_x[batch_i], + pixel_vec_y[batch_i], + principal_point_offsets[batch_i], + focal_length[batch_i].item(), + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + min_depth, + max_depth, + this->renderer_vec[0].cam.right_handed); + } + } else { + cam_infos[0] = cam_info_from_params( + cam_pos, + pixel_0_0_center, + pixel_vec_x, + pixel_vec_y, + principal_point_offsets, + focal_length.item(), + this->renderer_vec[0].cam.film_width, + this->renderer_vec[0].cam.film_height, + min_depth, + max_depth, + this->renderer_vec[0].cam.right_handed); + } + // Let's go! + // Contiguous version of opacity, if available. We need to create this object + // in scope to keep it alive. + torch::Tensor opacity_contiguous; + float const* opacity_ptr = nullptr; + if (opacity.has_value()) { + opacity_contiguous = opacity.value().contiguous(); + opacity_ptr = opacity_contiguous.data_ptr(); + } + if (this->device_type == c10::DeviceType::CUDA) { +// No else check necessary - it's not possible to move +// the renderer to a CUDA device if not built with CUDA. +#ifdef WITH_CUDA + int prev_active; + cudaGetDevice(&prev_active); + cudaSetDevice(this->device_index); +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + START_TIME_CU(batch_backward); +#endif + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + // These calls are non-blocking and just kick off the computations. + if (dbg_pos.has_value()) { + PRE::backward_dbg( + &this->renderer_vec[batch_i], + grad_im[batch_i].contiguous().data_ptr(), + image[batch_i].contiguous().data_ptr(), + forw_info[batch_i].contiguous().data_ptr(), + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + dbg_pos.value().first, + dbg_pos.value().second, + at::cuda::getCurrentCUDAStream()); + } else { + PRE::backward( + &this->renderer_vec[batch_i], + grad_im[batch_i].contiguous().data_ptr(), + image[batch_i].contiguous().data_ptr(), + forw_info[batch_i].contiguous().data_ptr(), + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + at::cuda::getCurrentCUDAStream()); + } + } + } else { + if (dbg_pos.has_value()) { + PRE::backward_dbg( + this->renderer_vec.data(), + grad_im.contiguous().data_ptr(), + image.contiguous().data_ptr(), + forw_info.contiguous().data_ptr(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + dbg_pos.value().first, + dbg_pos.value().second, + at::cuda::getCurrentCUDAStream()); + } else { + PRE::backward( + this->renderer_vec.data(), + grad_im.contiguous().data_ptr(), + image.contiguous().data_ptr(), + forw_info.contiguous().data_ptr(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + at::cuda::getCurrentCUDAStream()); + } + } + cudaSetDevice(prev_active); +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + STOP_TIME_CU(batch_backward); + float time_ms; + GET_TIME_CU(batch_backward, &time_ms); + std::cout << "Backward render batched time per example: " + << time_ms / static_cast(batch_size) << "ms" << std::endl; +#endif +#endif // WITH_CUDA + } else { +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + START_TIME(batch_backward); +#endif + if (batch_processing) { + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + // These calls are non-blocking and just kick off the computations. + if (dbg_pos.has_value()) { + PRE::backward_dbg( + &this->renderer_vec[batch_i], + grad_im[batch_i].contiguous().data_ptr(), + image[batch_i].contiguous().data_ptr(), + forw_info[batch_i].contiguous().data_ptr(), + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + dbg_pos.value().first, + dbg_pos.value().second, + nullptr); + } else { + PRE::backward( + &this->renderer_vec[batch_i], + grad_im[batch_i].contiguous().data_ptr(), + image[batch_i].contiguous().data_ptr(), + forw_info[batch_i].contiguous().data_ptr(), + vert_pos[batch_i].contiguous().data_ptr(), + vert_col[batch_i].contiguous().data_ptr(), + vert_radii[batch_i].contiguous().data_ptr(), + cam_infos[batch_i], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + nullptr); + } + } + } else { + if (dbg_pos.has_value()) { + PRE::backward_dbg( + this->renderer_vec.data(), + grad_im.contiguous().data_ptr(), + image.contiguous().data_ptr(), + forw_info.contiguous().data_ptr(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + dbg_pos.value().first, + dbg_pos.value().second, + nullptr); + } else { + PRE::backward( + this->renderer_vec.data(), + grad_im.contiguous().data_ptr(), + image.contiguous().data_ptr(), + forw_info.contiguous().data_ptr(), + vert_pos.contiguous().data_ptr(), + vert_col.contiguous().data_ptr(), + vert_radii.contiguous().data_ptr(), + cam_infos[0], + gamma, + percent_allowed_difference, + max_n_hits, + opacity_ptr, + n_points, + mode, + dif_pos, + dif_col, + dif_rad, + dif_cam, + dif_opy, + nullptr); + } + } +#ifdef PULSAR_TIMINGS_BATCHED_ENABLED + STOP_TIME(batch_backward); + float time_ms; + GET_TIME(batch_backward, &time_ms); + std::cout << "Backward render batched time per example: " + << time_ms / static_cast(batch_size) << "ms" << std::endl; +#endif + } + if (dif_pos) { + if (batch_processing) { + std::vector results(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + results[batch_i] = from_blob( + reinterpret_cast(this->renderer_vec[batch_i].grad_pos_d), + {static_cast(n_points), 3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + std::get<0>(ret) = torch::stack(results); + } else { + std::get<0>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_pos_d), + {static_cast(n_points), 3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + } + if (dif_col) { + if (batch_processing) { + std::vector results(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + results[batch_i] = from_blob( + reinterpret_cast(this->renderer_vec[batch_i].grad_col_d), + {static_cast(n_points), + this->renderer_vec[0].cam.n_channels}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + std::get<1>(ret) = torch::stack(results); + } else { + std::get<1>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_col_d), + {static_cast(n_points), + this->renderer_vec[0].cam.n_channels}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + } + if (dif_rad) { + if (batch_processing) { + std::vector results(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + results[batch_i] = from_blob( + reinterpret_cast(this->renderer_vec[batch_i].grad_rad_d), + {static_cast(n_points)}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + std::get<2>(ret) = torch::stack(results); + } else { + std::get<2>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_rad_d), + {static_cast(n_points)}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + } + if (dif_cam) { + if (batch_processing) { + std::vector res_p1(batch_size); + std::vector res_p2(batch_size); + std::vector res_p3(batch_size); + std::vector res_p4(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + res_p1[batch_i] = from_blob( + reinterpret_cast(this->renderer_vec[batch_i].grad_cam_d), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + res_p2[batch_i] = from_blob( + reinterpret_cast( + this->renderer_vec[batch_i].grad_cam_d + 3), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + res_p3[batch_i] = from_blob( + reinterpret_cast( + this->renderer_vec[batch_i].grad_cam_d + 6), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + res_p4[batch_i] = from_blob( + reinterpret_cast( + this->renderer_vec[batch_i].grad_cam_d + 9), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + std::get<3>(ret) = torch::stack(res_p1); + std::get<4>(ret) = torch::stack(res_p2); + std::get<5>(ret) = torch::stack(res_p3); + std::get<6>(ret) = torch::stack(res_p4); + } else { + std::get<3>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_cam_d), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + std::get<4>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_cam_d + 3), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + std::get<5>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_cam_d + 6), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + std::get<6>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_cam_d + 9), + {3}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + } + if (dif_opy) { + if (batch_processing) { + std::vector results(batch_size); + for (size_t batch_i = 0; batch_i < batch_size; ++batch_i) { + results[batch_i] = from_blob( + reinterpret_cast(this->renderer_vec[batch_i].grad_opy_d), + {static_cast(n_points)}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + std::get<7>(ret) = torch::stack(results); + } else { + std::get<7>(ret) = from_blob( + reinterpret_cast(this->renderer_vec[0].grad_opy_d), + {static_cast(n_points)}, + this->device_type, + this->device_index, + torch::kFloat, + this->device_type == c10::DeviceType::CUDA +#ifdef WITH_CUDA + ? at::cuda::getCurrentCUDAStream() +#else + ? (cudaStream_t) nullptr +#endif + : (cudaStream_t) nullptr); + } + } + return ret; +}; + +} // namespace pytorch +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.h new file mode 100644 index 0000000000000000000000000000000000000000..90bc3c8248c2eaeb8b63636adc92ccf4bc185379 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/renderer.h @@ -0,0 +1,174 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_PYTORCH_RENDERER_H_ +#define PULSAR_NATIVE_PYTORCH_RENDERER_H_ + +#include "../global.h" +#include "../include/renderer.h" + +namespace pulsar { +namespace pytorch { + +struct Renderer { + public: + /** + * Pytorch Pulsar differentiable rendering module. + */ + explicit Renderer( + const unsigned int& width, + const unsigned int& height, + const uint& max_n_balls, + const bool& orthogonal_projection, + const bool& right_handed_system, + const float& background_normalization_depth, + const uint& n_channels, + const uint& n_track); + ~Renderer(); + + std::tuple forward( + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float min_depth, + const std::optional& bg_col, + const std::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode); + + std::tuple< + std::optional, + std::optional, + std::optional, + std::optional, + std::optional, + std::optional, + std::optional, + std::optional> + backward( + const torch::Tensor& grad_im, + const torch::Tensor& image, + const torch::Tensor& forw_info, + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float min_depth, + const std::optional& bg_col, + const std::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode, + const bool& dif_pos, + const bool& dif_col, + const bool& dif_rad, + const bool& dif_cam, + const bool& dif_opy, + const std::optional>& dbg_pos); + + // Infrastructure. + /** + * Ensure that the renderer is placed on this device. + * Is nearly a no-op if the device is correct. + */ + void ensure_on_device(torch::Device device, bool non_blocking = false); + + /** + * Ensure that at least n renderers are available. + */ + void ensure_n_renderers_gte(const size_t& batch_size); + + /** + * Check the parameters. + */ + std::tuple arg_check( + const torch::Tensor& vert_pos, + const torch::Tensor& vert_col, + const torch::Tensor& vert_radii, + const torch::Tensor& cam_pos, + const torch::Tensor& pixel_0_0_center, + const torch::Tensor& pixel_vec_x, + const torch::Tensor& pixel_vec_y, + const torch::Tensor& focal_length, + const torch::Tensor& principal_point_offsets, + const float& gamma, + const float& max_depth, + float& min_depth, + const std::optional& bg_col, + const std::optional& opacity, + const float& percent_allowed_difference, + const uint& max_n_hits, + const uint& mode); + + bool operator==(const Renderer& rhs) const; + inline friend std::ostream& operator<<( + std::ostream& stream, + const Renderer& self) { + stream << "pulsar::Renderer["; + // Device info. + stream << self.device_type; + if (self.device_index != -1) + stream << ", ID " << self.device_index; + stream << "]"; + return stream; + } + + inline uint width() const { + return this->renderer_vec[0].cam.film_width; + } + inline uint height() const { + return this->renderer_vec[0].cam.film_height; + } + inline int max_num_balls() const { + return this->renderer_vec[0].max_num_balls; + } + inline bool orthogonal() const { + return this->renderer_vec[0].cam.orthogonal_projection; + } + inline bool right_handed() const { + return this->renderer_vec[0].cam.right_handed; + } + inline uint n_track() const { + return static_cast(this->renderer_vec[0].n_track); + } + + /** A tensor that is registered as a buffer with this Module to track its + * device placement. Unfortunately, pytorch doesn't offer tracking Module + * device placement in a better way as of now. + */ + torch::Tensor device_tracker; + + protected: + /** The device type for this renderer. */ + c10::DeviceType device_type; + /** The device index for this renderer. */ + c10::DeviceIndex device_index; + /** Pointer to the underlying pulsar renderers. */ + std::vector renderer_vec; +}; + +} // namespace pytorch +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b372e0bf3ba4407d532ae17da92bf3422871bbc9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.cpp @@ -0,0 +1,64 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifdef WITH_CUDA +#include +#include +#include +#endif +#include + +#include "./tensor_util.h" + +namespace pulsar { +namespace pytorch { + +torch::Tensor sphere_ids_from_result_info_nograd( + const torch::Tensor& forw_info) { + torch::Tensor result = torch::zeros( + {forw_info.size(0), + forw_info.size(1), + forw_info.size(2), + (forw_info.size(3) - 3) / 2}, + torch::TensorOptions().device(forw_info.device()).dtype(torch::kInt32)); + // Get the relevant slice, contiguous. + torch::Tensor tmp = + forw_info + .slice( + /*dim=*/3, /*start=*/3, /*end=*/forw_info.size(3), /*step=*/2) + .contiguous(); + if (forw_info.device().type() == c10::DeviceType::CUDA) { +#ifdef WITH_CUDA + C10_CUDA_CHECK(cudaMemcpyAsync( + result.data_ptr(), + tmp.data_ptr(), + sizeof(uint32_t) * tmp.size(0) * tmp.size(1) * tmp.size(2) * + tmp.size(3), + cudaMemcpyDeviceToDevice, + at::cuda::getCurrentCUDAStream())); +#else + throw std::runtime_error( + "Copy on CUDA device initiated but built " + "without CUDA support."); +#endif + } else { + memcpy( + result.data_ptr(), + tmp.data_ptr(), + sizeof(uint32_t) * tmp.size(0) * tmp.size(1) * tmp.size(2) * + tmp.size(3)); + } + // `tmp` is freed after this, the memory might get reallocated. However, + // only kernels in the same stream should ever be able to write to this + // memory, which are executed only after the memcpy is complete. That's + // why we can just continue. + return result; +} + +} // namespace pytorch +} // namespace pulsar diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.h new file mode 100644 index 0000000000000000000000000000000000000000..9f1d677cbfd4377f27224e05abc66085a06aa60c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/tensor_util.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_PYTORCH_TENSOR_UTIL_H_ +#define PULSAR_NATIVE_PYTORCH_TENSOR_UTIL_H_ + +#include + +namespace pulsar { +namespace pytorch { + +torch::Tensor sphere_ids_from_result_info_nograd( + const torch::Tensor& forw_info); + +} +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..87eb8815f45731672aa0e306d68758d25bc5c447 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.cpp @@ -0,0 +1,36 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifdef WITH_CUDA +#include +#include + +namespace pulsar { +namespace pytorch { + +void cudaDevToDev( + void* trg, + const void* src, + const int& size, + const cudaStream_t& stream) { + C10_CUDA_CHECK( + cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToDevice, stream)); +} + +void cudaDevToHost( + void* trg, + const void* src, + const int& size, + const cudaStream_t& stream) { + C10_CUDA_CHECK( + cudaMemcpyAsync(trg, src, size, cudaMemcpyDeviceToHost, stream)); +} + +} // namespace pytorch +} // namespace pulsar +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.h new file mode 100644 index 0000000000000000000000000000000000000000..be3dc80defbb78c6e65722a1dda5d70e288e73c7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/pytorch/util.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#ifndef PULSAR_NATIVE_PYTORCH_UTIL_H_ +#define PULSAR_NATIVE_PYTORCH_UTIL_H_ + +#include +#include "../global.h" + +namespace pulsar { +namespace pytorch { + +void cudaDevToDev( + void* trg, + const void* src, + const int& size, + const cudaStream_t& stream); +void cudaDevToHost( + void* trg, + const void* src, + const int& size, + const cudaStream_t& stream); + +/** + * This method takes a memory pointer and wraps it into a pytorch tensor. + * + * This is preferred over `torch::from_blob`, since that requires a CUDA + * managed pointer. However, working with these for high performance + * operations is slower. Most of the rendering operations should stay + * local to the respective GPU anyways, so unmanaged pointers are + * preferred. + */ +template +torch::Tensor from_blob( + const T* ptr, + const torch::IntArrayRef& shape, + const c10::DeviceType& device_type, + const c10::DeviceIndex& device_index, + const torch::Dtype& dtype, + const cudaStream_t& stream) { + torch::Tensor ret = torch::zeros( + shape, torch::device({device_type, device_index}).dtype(dtype)); + const int num_elements = + std::accumulate(shape.begin(), shape.end(), 1, std::multiplies{}); + if (device_type == c10::DeviceType::CUDA) { +#ifdef WITH_CUDA + cudaDevToDev( + ret.data_ptr(), + static_cast(ptr), + sizeof(T) * num_elements, + stream); +#else + throw std::runtime_error( + "Initiating devToDev copy on a build without CUDA."); +#endif + // TODO: check for synchronization. + } else { + memcpy(ret.data_ptr(), ptr, sizeof(T) * num_elements); + } + return ret; +}; + +} // namespace pytorch +} // namespace pulsar + +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/warnings.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/warnings.cpp new file mode 100644 index 0000000000000000000000000000000000000000..281ca509df96693bb20e2322132e57b500b7a7c1 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/pulsar/warnings.cpp @@ -0,0 +1,18 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +/** + * A compilation unit to provide warnings about the code and avoid + * repeated messages. + */ +#ifdef PULSAR_ASSERTIONS +#pragma message("WARNING: assertions are enabled in Pulsar.") +#endif +#ifdef PULSAR_LOGGING_ENABLED +#pragma message("WARNING: logging is enabled in Pulsar.") +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_coarse/bitmask.cuh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_coarse/bitmask.cuh new file mode 100644 index 0000000000000000000000000000000000000000..729650ba1778125d7fafb9bfcec61517e4bba470 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_coarse/bitmask.cuh @@ -0,0 +1,79 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#define BINMASK_H + +// A BitMask represents a bool array of shape (H, W, N). We pack values into +// the bits of unsigned ints; a single unsigned int has B = 32 bits, so to hold +// all values we use H * W * (N / B) = H * W * D values. We want to store +// BitMasks in shared memory, so we assume that the memory has already been +// allocated for it elsewhere. +class BitMask { + public: + __device__ BitMask(unsigned int* data, int H, int W, int N) + : data(data), H(H), W(W), B(8 * sizeof(unsigned int)), D(N / B) { + // TODO: check if the data is null. + N = ceilf(N % 32); // take ceil incase N % 32 != 0 + block_clear(); // clear the data + } + + // Use all threads in the current block to clear all bits of this BitMask + __device__ void block_clear() { + for (auto i = threadIdx.x; i < H * W * D; i += blockDim.x) { + data[i] = 0; + } + __syncthreads(); + } + + __device__ int _get_elem_idx(int y, int x, int d) { + return y * W * D + x * D + d / B; + } + + __device__ int _get_bit_idx(int d) { + return d % B; + } + + // Turn on a single bit (y, x, d) + __device__ void set(int y, int x, int d) { + int elem_idx = _get_elem_idx(y, x, d); + int bit_idx = _get_bit_idx(d); + const unsigned int mask = 1U << bit_idx; + atomicOr(data + elem_idx, mask); + } + + // Turn off a single bit (y, x, d) + __device__ void unset(int y, int x, int d) { + int elem_idx = _get_elem_idx(y, x, d); + int bit_idx = _get_bit_idx(d); + const unsigned int mask = ~(1U << bit_idx); + atomicAnd(data + elem_idx, mask); + } + + // Check whether the bit (y, x, d) is on or off + __device__ bool get(int y, int x, int d) { + int elem_idx = _get_elem_idx(y, x, d); + int bit_idx = _get_bit_idx(d); + return (data[elem_idx] >> bit_idx) & 1U; + } + + // Compute the number of bits set in the row (y, x, :) + __device__ int count(int y, int x) { + int total = 0; + for (int i = 0; i < D; ++i) { + int elem_idx = y * W * D + x * D + i; + unsigned int elem = data[elem_idx]; + total += __popc(elem); + } + return total; + } + + private: + unsigned int* data; + int H, W, B, D; +}; diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.cu new file mode 100644 index 0000000000000000000000000000000000000000..f093ef0513a66b6fcf0bede1fa597c57b979a03d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.cu @@ -0,0 +1,388 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include "rasterize_coarse/bitmask.cuh" +#include "rasterize_points/rasterization_utils.cuh" +#include "utils/float_math.cuh" +#include "utils/geometry_utils.cuh" // For kEpsilon -- gross + +__global__ void TriangleBoundingBoxKernel( + const float* face_verts, // (F, 3, 3) + const int F, + const float blur_radius, + float* bboxes, // (4, F) + bool* skip_face) { // (F,) + const auto tid = blockIdx.x * blockDim.x + threadIdx.x; + const auto num_threads = blockDim.x * gridDim.x; + const float sqrt_radius = sqrt(blur_radius); + for (int f = tid; f < F; f += num_threads) { + const float v0x = face_verts[f * 9 + 0 * 3 + 0]; + const float v0y = face_verts[f * 9 + 0 * 3 + 1]; + const float v0z = face_verts[f * 9 + 0 * 3 + 2]; + const float v1x = face_verts[f * 9 + 1 * 3 + 0]; + const float v1y = face_verts[f * 9 + 1 * 3 + 1]; + const float v1z = face_verts[f * 9 + 1 * 3 + 2]; + const float v2x = face_verts[f * 9 + 2 * 3 + 0]; + const float v2y = face_verts[f * 9 + 2 * 3 + 1]; + const float v2z = face_verts[f * 9 + 2 * 3 + 2]; + const float xmin = FloatMin3(v0x, v1x, v2x) - sqrt_radius; + const float xmax = FloatMax3(v0x, v1x, v2x) + sqrt_radius; + const float ymin = FloatMin3(v0y, v1y, v2y) - sqrt_radius; + const float ymax = FloatMax3(v0y, v1y, v2y) + sqrt_radius; + const float zmin = FloatMin3(v0z, v1z, v2z); + const bool skip = zmin < kEpsilon; + bboxes[0 * F + f] = xmin; + bboxes[1 * F + f] = xmax; + bboxes[2 * F + f] = ymin; + bboxes[3 * F + f] = ymax; + skip_face[f] = skip; + } +} + +__global__ void PointBoundingBoxKernel( + const float* points, // (P, 3) + const float* radius, // (P,) + const int P, + float* bboxes, // (4, P) + bool* skip_points) { + const auto tid = blockIdx.x * blockDim.x + threadIdx.x; + const auto num_threads = blockDim.x * gridDim.x; + for (int p = tid; p < P; p += num_threads) { + const float x = points[p * 3 + 0]; + const float y = points[p * 3 + 1]; + const float z = points[p * 3 + 2]; + const float r = radius[p]; + // TODO: change to kEpsilon to match triangles? + const bool skip = z < 0; + bboxes[0 * P + p] = x - r; + bboxes[1 * P + p] = x + r; + bboxes[2 * P + p] = y - r; + bboxes[3 * P + p] = y + r; + skip_points[p] = skip; + } +} + +__global__ void RasterizeCoarseCudaKernel( + const float* bboxes, // (4, E) (xmin, xmax, ymin, ymax) + const bool* should_skip, // (E,) + const int64_t* elem_first_idxs, + const int64_t* elems_per_batch, + const int N, + const int E, + const int H, + const int W, + const int bin_size, + const int chunk_size, + const int max_elem_per_bin, + int* elems_per_bin, + int* bin_elems) { + extern __shared__ char sbuf[]; + const int M = max_elem_per_bin; + // Integer divide round up + const int num_bins_x = 1 + (W - 1) / bin_size; + const int num_bins_y = 1 + (H - 1) / bin_size; + + // NDC range depends on the ratio of W/H + // The shorter side from (H, W) is given an NDC range of 2.0 and + // the other side is scaled by the ratio of H:W. + const float NDC_x_half_range = NonSquareNdcRange(W, H) / 2.0f; + const float NDC_y_half_range = NonSquareNdcRange(H, W) / 2.0f; + + // Size of half a pixel in NDC units is the NDC half range + // divided by the corresponding image dimension + const float half_pix_x = NDC_x_half_range / W; + const float half_pix_y = NDC_y_half_range / H; + + // This is a boolean array of shape (num_bins_y, num_bins_x, chunk_size) + // stored in shared memory that will track whether each elem in the chunk + // falls into each bin of the image. + BitMask binmask((unsigned int*)sbuf, num_bins_y, num_bins_x, chunk_size); + + // Have each block handle a chunk of elements + const int chunks_per_batch = 1 + (E - 1) / chunk_size; + const int num_chunks = N * chunks_per_batch; + + for (auto chunk = blockIdx.x; chunk < num_chunks; chunk += gridDim.x) { + const int batch_idx = chunk / chunks_per_batch; // batch index + const int chunk_idx = chunk % chunks_per_batch; + const int elem_chunk_start_idx = chunk_idx * chunk_size; + + binmask.block_clear(); + const int64_t elem_start_idx = elem_first_idxs[batch_idx]; + const int64_t elem_stop_idx = elem_start_idx + elems_per_batch[batch_idx]; + + // Have each thread handle a different face within the chunk + for (auto e = threadIdx.x; e < chunk_size; e += blockDim.x) { + const int e_idx = elem_chunk_start_idx + e; + + // Check that we are still within the same element of the batch + if (e_idx >= elem_stop_idx || e_idx < elem_start_idx) { + continue; + } + + if (should_skip[e_idx]) { + continue; + } + const float xmin = bboxes[0 * E + e_idx]; + const float xmax = bboxes[1 * E + e_idx]; + const float ymin = bboxes[2 * E + e_idx]; + const float ymax = bboxes[3 * E + e_idx]; + + // Brute-force search over all bins; TODO(T54294966) something smarter. + for (int by = 0; by < num_bins_y; ++by) { + // Y coordinate of the top and bottom of the bin. + // PixToNdc gives the location of the center of each pixel, so we + // need to add/subtract a half pixel to get the true extent of the bin. + // Reverse ordering of Y axis so that +Y is upwards in the image. + const float bin_y_min = + PixToNonSquareNdc(by * bin_size, H, W) - half_pix_y; + const float bin_y_max = + PixToNonSquareNdc((by + 1) * bin_size - 1, H, W) + half_pix_y; + const bool y_overlap = (ymin <= bin_y_max) && (bin_y_min < ymax); + + for (int bx = 0; bx < num_bins_x; ++bx) { + // X coordinate of the left and right of the bin. + // Reverse ordering of x axis so that +X is left. + const float bin_x_max = + PixToNonSquareNdc((bx + 1) * bin_size - 1, W, H) + half_pix_x; + const float bin_x_min = + PixToNonSquareNdc(bx * bin_size, W, H) - half_pix_x; + + const bool x_overlap = (xmin <= bin_x_max) && (bin_x_min < xmax); + if (y_overlap && x_overlap) { + binmask.set(by, bx, e); + } + } + } + } + __syncthreads(); + // Now we have processed every elem in the current chunk. We need to + // count the number of elems in each bin so we can write the indices + // out to global memory. We have each thread handle a different bin. + for (auto byx = threadIdx.x; byx < num_bins_y * num_bins_x; + byx += blockDim.x) { + const int by = byx / num_bins_x; + const int bx = byx % num_bins_x; + const int count = binmask.count(by, bx); + const int elems_per_bin_idx = + batch_idx * num_bins_y * num_bins_x + by * num_bins_x + bx; + + // This atomically increments the (global) number of elems found + // in the current bin, and gets the previous value of the counter; + // this effectively allocates space in the bin_faces array for the + // elems in the current chunk that fall into this bin. + const int start = atomicAdd(elems_per_bin + elems_per_bin_idx, count); + if (start + count > M) { + // The number of elems in this bin is so big that they won't fit. + // We print a warning using CUDA's printf. This may be invisible + // to notebook users, but apparent to others. It would be nice to + // also have a Python-friendly warning, but it is not obvious + // how to do this without slowing down the normal case. + const char* warning = + "Bin size was too small in the coarse rasterization phase. " + "This caused an overflow, meaning output may be incomplete. " + "To solve, " + "try increasing max_faces_per_bin / max_points_per_bin, " + "decreasing bin_size, " + "or setting bin_size to 0 to use the naive rasterization."; + printf(warning); + continue; + } + + // Now loop over the binmask and write the active bits for this bin + // out to bin_faces. + int next_idx = batch_idx * num_bins_y * num_bins_x * M + + by * num_bins_x * M + bx * M + start; + for (int e = 0; e < chunk_size; ++e) { + if (binmask.get(by, bx, e)) { + // TODO(T54296346) find the correct method for handling errors in + // CUDA. Throw an error if num_faces_per_bin > max_faces_per_bin. + // Either decrease bin size or increase max_faces_per_bin + bin_elems[next_idx] = elem_chunk_start_idx + e; + next_idx++; + } + } + } + __syncthreads(); + } +} + +at::Tensor RasterizeCoarseCuda( + const at::Tensor& bboxes, + const at::Tensor& should_skip, + const at::Tensor& elem_first_idxs, + const at::Tensor& elems_per_batch, + const std::tuple image_size, + const int bin_size, + const int max_elems_per_bin) { + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(bboxes.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int H = std::get<0>(image_size); + const int W = std::get<1>(image_size); + + const int E = bboxes.size(1); + const int N = elems_per_batch.size(0); + const int M = max_elems_per_bin; + + // Integer divide round up + const int num_bins_y = 1 + (H - 1) / bin_size; + const int num_bins_x = 1 + (W - 1) / bin_size; + + if (num_bins_y >= kMaxItemsPerBin || num_bins_x >= kMaxItemsPerBin) { + std::stringstream ss; + ss << "In RasterizeCoarseCuda got num_bins_y: " << num_bins_y + << ", num_bins_x: " << num_bins_x << ", " << "; that's too many!"; + AT_ERROR(ss.str()); + } + auto opts = elems_per_batch.options().dtype(at::kInt); + at::Tensor elems_per_bin = at::zeros({N, num_bins_y, num_bins_x}, opts); + at::Tensor bin_elems = at::full({N, num_bins_y, num_bins_x, M}, -1, opts); + + if (bin_elems.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return bin_elems; + } + + const int chunk_size = 512; + const size_t shared_size = num_bins_y * num_bins_x * chunk_size / 8; + const size_t blocks = 64; + const size_t threads = 512; + + RasterizeCoarseCudaKernel<<>>( + bboxes.contiguous().data_ptr(), + should_skip.contiguous().data_ptr(), + elem_first_idxs.contiguous().data_ptr(), + elems_per_batch.contiguous().data_ptr(), + N, + E, + H, + W, + bin_size, + chunk_size, + M, + elems_per_bin.data_ptr(), + bin_elems.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + return bin_elems; +} + +at::Tensor RasterizeMeshesCoarseCuda( + const at::Tensor& face_verts, + const at::Tensor& mesh_to_face_first_idx, + const at::Tensor& num_faces_per_mesh, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int max_faces_per_bin) { + TORCH_CHECK( + face_verts.ndimension() == 3 && face_verts.size(1) == 3 && + face_verts.size(2) == 3, + "face_verts must have dimensions (num_faces, 3, 3)"); + + // Check inputs are on the same device + at::TensorArg face_verts_t{face_verts, "face_verts", 1}, + mesh_to_face_first_idx_t{ + mesh_to_face_first_idx, "mesh_to_face_first_idx", 2}, + num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}; + at::CheckedFrom c = "RasterizeMeshesCoarseCuda"; + at::checkAllSameGPU( + c, {face_verts_t, mesh_to_face_first_idx_t, num_faces_per_mesh_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(face_verts.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Allocate tensors for bboxes and should_skip + const int F = face_verts.size(0); + auto float_opts = face_verts.options().dtype(at::kFloat); + auto bool_opts = face_verts.options().dtype(at::kBool); + at::Tensor bboxes = at::empty({4, F}, float_opts); + at::Tensor should_skip = at::empty({F}, bool_opts); + + // Launch kernel to compute triangle bboxes + const size_t blocks = 128; + const size_t threads = 256; + TriangleBoundingBoxKernel<<>>( + face_verts.contiguous().data_ptr(), + F, + blur_radius, + bboxes.contiguous().data_ptr(), + should_skip.contiguous().data_ptr()); + AT_CUDA_CHECK(cudaGetLastError()); + + return RasterizeCoarseCuda( + bboxes, + should_skip, + mesh_to_face_first_idx, + num_faces_per_mesh, + image_size, + bin_size, + max_faces_per_bin); +} + +at::Tensor RasterizePointsCoarseCuda( + const at::Tensor& points, // (P, 3) + const at::Tensor& cloud_to_packed_first_idx, // (N,) + const at::Tensor& num_points_per_cloud, // (N,) + const std::tuple image_size, + const at::Tensor& radius, + const int bin_size, + const int max_points_per_bin) { + TORCH_CHECK( + points.ndimension() == 2 && points.size(1) == 3, + "points must have dimensions (num_points, 3)"); + + // Check inputs are on the same device + at::TensorArg points_t{points, "points", 1}, + cloud_to_packed_first_idx_t{ + cloud_to_packed_first_idx, "cloud_to_packed_first_idx", 2}, + num_points_per_cloud_t{num_points_per_cloud, "num_points_per_cloud", 3}; + at::CheckedFrom c = "RasterizePointsCoarseCuda"; + at::checkAllSameGPU( + c, {points_t, cloud_to_packed_first_idx_t, num_points_per_cloud_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(points.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // Allocate tensors for bboxes and should_skip + const int P = points.size(0); + auto float_opts = points.options().dtype(at::kFloat); + auto bool_opts = points.options().dtype(at::kBool); + at::Tensor bboxes = at::empty({4, P}, float_opts); + at::Tensor should_skip = at::empty({P}, bool_opts); + + // Launch kernel to compute point bboxes + const size_t blocks = 128; + const size_t threads = 256; + PointBoundingBoxKernel<<>>( + points.contiguous().data_ptr(), + radius.contiguous().data_ptr(), + P, + bboxes.contiguous().data_ptr(), + should_skip.contiguous().data_ptr()); + AT_CUDA_CHECK(cudaGetLastError()); + + return RasterizeCoarseCuda( + bboxes, + should_skip, + cloud_to_packed_first_idx, + num_points_per_cloud, + image_size, + bin_size, + max_points_per_bin); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.h new file mode 100644 index 0000000000000000000000000000000000000000..858407cb66b2a252f1b2b223f2adaa2ce8074543 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_coarse/rasterize_coarse.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +#include +#include + +// Arguments are the same as RasterizeMeshesCoarse from +// rasterize_meshes/rasterize_meshes.h +#ifdef WITH_CUDA +torch::Tensor RasterizeMeshesCoarseCuda( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int max_faces_per_bin); +#endif + +// Arguments are the same as RasterizePointsCoarse from +// rasterize_points/rasterize_points.h +#ifdef WITH_CUDA +torch::Tensor RasterizePointsCoarseCuda( + const torch::Tensor& points, + const torch::Tensor& cloud_to_packed_first_idx, + const torch::Tensor& num_points_per_cloud, + const std::tuple image_size, + const torch::Tensor& radius, + const int bin_size, + const int max_points_per_bin); +#endif diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.cu b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.cu new file mode 100644 index 0000000000000000000000000000000000000000..28c546c6f8b0e98304c22fe96fc7f8486dc7cf96 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.cu @@ -0,0 +1,823 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "rasterize_points/rasterization_utils.cuh" +#include "utils/float_math.cuh" +#include "utils/geometry_utils.cuh" + +namespace { +// A structure for holding details about a pixel. +struct Pixel { + float z; + int64_t idx; // idx of face + float dist; // abs distance of pixel to face + float3 bary; +}; + +__device__ bool operator<(const Pixel& a, const Pixel& b) { + return a.z < b.z || (a.z == b.z && a.idx < b.idx); +} + +// Get the xyz coordinates of the three vertices for the face given by the +// index face_idx into face_verts. +__device__ thrust::tuple GetSingleFaceVerts( + const float* face_verts, + int face_idx) { + const float x0 = face_verts[face_idx * 9 + 0]; + const float y0 = face_verts[face_idx * 9 + 1]; + const float z0 = face_verts[face_idx * 9 + 2]; + const float x1 = face_verts[face_idx * 9 + 3]; + const float y1 = face_verts[face_idx * 9 + 4]; + const float z1 = face_verts[face_idx * 9 + 5]; + const float x2 = face_verts[face_idx * 9 + 6]; + const float y2 = face_verts[face_idx * 9 + 7]; + const float z2 = face_verts[face_idx * 9 + 8]; + + const float3 v0xyz = make_float3(x0, y0, z0); + const float3 v1xyz = make_float3(x1, y1, z1); + const float3 v2xyz = make_float3(x2, y2, z2); + + return thrust::make_tuple(v0xyz, v1xyz, v2xyz); +} + +// Get the min/max x/y/z values for the face given by vertices v0, v1, v2. +__device__ thrust::tuple +GetFaceBoundingBox(float3 v0, float3 v1, float3 v2) { + const float xmin = FloatMin3(v0.x, v1.x, v2.x); + const float ymin = FloatMin3(v0.y, v1.y, v2.y); + const float zmin = FloatMin3(v0.z, v1.z, v2.z); + const float xmax = FloatMax3(v0.x, v1.x, v2.x); + const float ymax = FloatMax3(v0.y, v1.y, v2.y); + const float zmax = FloatMax3(v0.z, v1.z, v2.z); + + return thrust::make_tuple( + make_float2(xmin, xmax), + make_float2(ymin, ymax), + make_float2(zmin, zmax)); +} + +// Check if the point (px, py) lies outside the face bounding box face_bbox. +// Return true if the point is outside. +__device__ bool CheckPointOutsideBoundingBox( + float3 v0, + float3 v1, + float3 v2, + float blur_radius, + float2 pxy) { + const auto bbox = GetFaceBoundingBox(v0, v1, v2); + const float2 xlims = thrust::get<0>(bbox); + const float2 ylims = thrust::get<1>(bbox); + const float2 zlims = thrust::get<2>(bbox); + + const float x_min = xlims.x - blur_radius; + const float y_min = ylims.x - blur_radius; + const float x_max = xlims.y + blur_radius; + const float y_max = ylims.y + blur_radius; + + // Faces with at least one vertex behind the camera won't render correctly + // and should be removed or clipped before calling the rasterizer + const bool z_invalid = zlims.x < kEpsilon; + + // Check if the current point is oustside the triangle bounding box. + return ( + pxy.x > x_max || pxy.x < x_min || pxy.y > y_max || pxy.y < y_min || + z_invalid); +} + +// This function checks if a pixel given by xy location pxy lies within the +// face with index face_idx in face_verts. One of the inputs is a list (q) +// which contains Pixel structs with the indices of the faces which intersect +// with this pixel sorted by closest z distance. If the point pxy lies in the +// face, the list (q) is updated and re-orderered in place. In addition +// the auxiliary variables q_size, q_max_z and q_max_idx are also modified. +// This code is shared between RasterizeMeshesNaiveCudaKernel and +// RasterizeMeshesFineCudaKernel. +template +__device__ void CheckPixelInsideFace( + const float* face_verts, // (F, 3, 3) + const int64_t* clipped_faces_neighbor_idx, // (F,) + const int face_idx, + int& q_size, + float& q_max_z, + int& q_max_idx, + FaceQ& q, + const float blur_radius, + const float2 pxy, // Coordinates of the pixel + const int K, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + const auto v012 = GetSingleFaceVerts(face_verts, face_idx); + const float3 v0 = thrust::get<0>(v012); + const float3 v1 = thrust::get<1>(v012); + const float3 v2 = thrust::get<2>(v012); + + // Only need xy for barycentric coordinates and distance calculations. + const float2 v0xy = make_float2(v0.x, v0.y); + const float2 v1xy = make_float2(v1.x, v1.y); + const float2 v2xy = make_float2(v2.x, v2.y); + + // Perform checks and skip if: + // 1. the face is behind the camera + // 2. the face is facing away from the camera + // 3. the face has very small face area + // 4. the pixel is outside the face bbox + const float zmax = FloatMax3(v0.z, v1.z, v2.z); + const bool outside_bbox = CheckPointOutsideBoundingBox( + v0, v1, v2, sqrt(blur_radius), pxy); // use sqrt of blur for bbox + const float face_area = EdgeFunctionForward(v0xy, v1xy, v2xy); + // Check if the face is visible to the camera. + const bool back_face = face_area < 0.0; + const bool zero_face_area = + (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon); + + if (zmax < 0 || (cull_backfaces && back_face) || outside_bbox || + zero_face_area) { + return; + } + + // Calculate barycentric coords and euclidean dist to triangle. + const float3 p_bary0 = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); + const float3 p_bary = !perspective_correct + ? p_bary0 + : BarycentricPerspectiveCorrectionForward(p_bary0, v0.z, v1.z, v2.z); + const float3 p_bary_clip = + !clip_barycentric_coords ? p_bary : BarycentricClipForward(p_bary); + + const float pz = + p_bary_clip.x * v0.z + p_bary_clip.y * v1.z + p_bary_clip.z * v2.z; + + if (pz < 0) { + return; // Face is behind the image plane. + } + + // Get abs squared distance + const float dist = PointTriangleDistanceForward(pxy, v0xy, v1xy, v2xy); + + // Use the unclipped bary coordinates to determine if the point is inside the + // face. + const bool inside = p_bary.x > 0.0f && p_bary.y > 0.0f && p_bary.z > 0.0f; + const float signed_dist = inside ? -dist : dist; + // Check if pixel is outside blur region + if (!inside && dist >= blur_radius) { + return; + } + + // Handle the case where a face (f) partially behind the image plane is + // clipped to a quadrilateral and then split into two faces (t1, t2). In this + // case we: + // 1. Find the index of the neighboring face (e.g. for t1 need index of t2) + // 2. Check if the neighboring face (t2) is already in the top K faces + // 3. If yes, compare the distance of the pixel to t1 with the distance to t2. + // 4. If dist_t1 < dist_t2, overwrite the values for t2 in the top K faces. + const int neighbor_idx = clipped_faces_neighbor_idx[face_idx]; + int neighbor_idx_top_k = -1; + + // Check if neighboring face is already in the top K. + // -1 is the fill value in clipped_faces_neighbor_idx + if (neighbor_idx != -1) { + // Only need to loop until q_size. + for (int i = 0; i < q_size; i++) { + if (q[i].idx == neighbor_idx) { + neighbor_idx_top_k = i; + break; + } + } + } + // If neighbor idx is not -1 then it is in the top K struct. + if (neighbor_idx_top_k != -1) { + // If dist of current face is less than neighbor then overwrite the + // neighbor face values in the top K struct. + float neighbor_dist = abs(q[neighbor_idx_top_k].dist); + if (dist < neighbor_dist) { + // Overwrite the neighbor face values + q[neighbor_idx_top_k] = {pz, face_idx, signed_dist, p_bary_clip}; + + // If pz > q_max then overwrite the max values and index of the max. + // q_size stays the same. + if (pz > q_max_z) { + q_max_z = pz; + q_max_idx = neighbor_idx_top_k; + } + } + } else { + // Handle as a normal face + if (q_size < K) { + // Just insert it. + q[q_size] = {pz, face_idx, signed_dist, p_bary_clip}; + if (pz > q_max_z) { + q_max_z = pz; + q_max_idx = q_size; + } + q_size++; + } else if (pz < q_max_z) { + // Overwrite the old max, and find the new max. + q[q_max_idx] = {pz, face_idx, signed_dist, p_bary_clip}; + q_max_z = pz; + for (int i = 0; i < K; i++) { + if (q[i].z > q_max_z) { + q_max_z = q[i].z; + q_max_idx = i; + } + } + } + } +} + +} // namespace + +// **************************************************************************** +// * NAIVE RASTERIZATION * +// **************************************************************************** +__global__ void RasterizeMeshesNaiveCudaKernel( + const float* face_verts, + const int64_t* mesh_to_face_first_idx, + const int64_t* num_faces_per_mesh, + const int64_t* clipped_faces_neighbor_idx, + const float blur_radius, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces, + const int N, + const int H, + const int W, + const int K, + int64_t* face_idxs, + float* zbuf, + float* pix_dists, + float* bary) { + // Simple version: One thread per output pixel + auto num_threads = gridDim.x * blockDim.x; + auto tid = blockDim.x * blockIdx.x + threadIdx.x; + + for (int i = tid; i < N * H * W; i += num_threads) { + // Convert linear index to 3D index + const int n = i / (H * W); // batch index. + const int pix_idx = i % (H * W); + + // Reverse ordering of X and Y axes + const int yi = H - 1 - pix_idx / W; + const int xi = W - 1 - pix_idx % W; + + // screen coordinates to ndc coordinates of pixel. + const float xf = PixToNonSquareNdc(xi, W, H); + const float yf = PixToNonSquareNdc(yi, H, W); + const float2 pxy = make_float2(xf, yf); + + // For keeping track of the K closest points we want a data structure + // that (1) gives O(1) access to the closest point for easy comparisons, + // and (2) allows insertion of new elements. In the CPU version we use + // std::priority_queue; then (2) is O(log K). We can't use STL + // containers in CUDA; we could roll our own max heap in an array, but + // that would likely have a lot of warp divergence so we do something + // simpler instead: keep the elements in an unsorted array, but keep + // track of the max value and the index of the max value. Then (1) is + // still O(1) time, while (2) is O(K) with a clean loop. Since K <= 8 + // this should be fast enough for our purposes. + Pixel q[kMaxPointsPerPixel]; + int q_size = 0; + float q_max_z = -1000; + int q_max_idx = -1; + + // Using the batch index of the thread get the start and stop + // indices for the faces. + const int64_t face_start_idx = mesh_to_face_first_idx[n]; + const int64_t face_stop_idx = face_start_idx + num_faces_per_mesh[n]; + + // Loop through the faces in the mesh. + for (int f = face_start_idx; f < face_stop_idx; ++f) { + // Check if the pixel pxy is inside the face bounding box and if it is, + // update q, q_size, q_max_z and q_max_idx in place. + + CheckPixelInsideFace( + face_verts, + clipped_faces_neighbor_idx, + f, + q_size, + q_max_z, + q_max_idx, + q, + blur_radius, + pxy, + K, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); + } + + // TODO: make sorting an option as only top k is needed, not sorted values. + BubbleSort(q, q_size); + int idx = n * H * W * K + pix_idx * K; + + for (int k = 0; k < q_size; ++k) { + face_idxs[idx + k] = q[k].idx; + zbuf[idx + k] = q[k].z; + pix_dists[idx + k] = q[k].dist; + bary[(idx + k) * 3 + 0] = q[k].bary.x; + bary[(idx + k) * 3 + 1] = q[k].bary.y; + bary[(idx + k) * 3 + 2] = q[k].bary.z; + } + } +} + +std::tuple +RasterizeMeshesNaiveCuda( + const at::Tensor& face_verts, + const at::Tensor& mesh_to_faces_packed_first_idx, + const at::Tensor& num_faces_per_mesh, + const at::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int num_closest, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + TORCH_CHECK( + face_verts.ndimension() == 3 && face_verts.size(1) == 3 && + face_verts.size(2) == 3, + "face_verts must have dimensions (num_faces, 3, 3)"); + + TORCH_CHECK( + num_faces_per_mesh.size(0) == mesh_to_faces_packed_first_idx.size(0), + "num_faces_per_mesh must have save size first dimension as mesh_to_faces_packed_first_idx"); + + TORCH_CHECK( + clipped_faces_neighbor_idx.size(0) == face_verts.size(0), + "clipped_faces_neighbor_idx must have save size first dimension as face_verts"); + + if (num_closest > kMaxPointsPerPixel) { + std::stringstream ss; + ss << "Must have points_per_pixel <= " << kMaxPointsPerPixel; + AT_ERROR(ss.str()); + } + + // Check inputs are on the same device + at::TensorArg face_verts_t{face_verts, "face_verts", 1}, + mesh_to_faces_packed_first_idx_t{ + mesh_to_faces_packed_first_idx, "mesh_to_faces_packed_first_idx", 2}, + num_faces_per_mesh_t{num_faces_per_mesh, "num_faces_per_mesh", 3}, + clipped_faces_neighbor_idx_t{ + clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 4}; + at::CheckedFrom c = "RasterizeMeshesNaiveCuda"; + at::checkAllSameGPU( + c, + {face_verts_t, + mesh_to_faces_packed_first_idx_t, + num_faces_per_mesh_t, + clipped_faces_neighbor_idx_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(face_verts.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int N = num_faces_per_mesh.size(0); // batch size. + const int H = std::get<0>(image_size); + const int W = std::get<1>(image_size); + const int K = num_closest; + + auto long_opts = num_faces_per_mesh.options().dtype(at::kLong); + auto float_opts = face_verts.options().dtype(at::kFloat); + + at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); + at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); + at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); + at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); + + if (face_idxs.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(face_idxs, zbuf, bary, pix_dists); + } + + const size_t blocks = 1024; + const size_t threads = 64; + + RasterizeMeshesNaiveCudaKernel<<>>( + face_verts.contiguous().data_ptr(), + mesh_to_faces_packed_first_idx.contiguous().data_ptr(), + num_faces_per_mesh.contiguous().data_ptr(), + clipped_faces_neighbor_idx.contiguous().data_ptr(), + blur_radius, + perspective_correct, + clip_barycentric_coords, + cull_backfaces, + N, + H, + W, + K, + face_idxs.data_ptr(), + zbuf.data_ptr(), + pix_dists.data_ptr(), + bary.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(face_idxs, zbuf, bary, pix_dists); +} + +// **************************************************************************** +// * BACKWARD PASS * +// **************************************************************************** +// TODO: benchmark parallelizing over faces_verts instead of over pixels. +__global__ void RasterizeMeshesBackwardCudaKernel( + const float* face_verts, // (F, 3, 3) + const int64_t* pix_to_face, // (N, H, W, K) + const bool perspective_correct, + const bool clip_barycentric_coords, + const int N, + const int H, + const int W, + const int K, + const float* grad_zbuf, // (N, H, W, K) + const float* grad_bary, // (N, H, W, K, 3) + const float* grad_dists, // (N, H, W, K) + float* grad_face_verts) { // (F, 3, 3) + + // Parallelize over each pixel in images of + // size H * W, for each image in the batch of size N. + const auto num_threads = gridDim.x * blockDim.x; + const auto tid = blockIdx.x * blockDim.x + threadIdx.x; + + for (int t_i = tid; t_i < N * H * W; t_i += num_threads) { + // Convert linear index to 3D index + const int n = t_i / (H * W); // batch index. + const int pix_idx = t_i % (H * W); + + // Reverse ordering of X and Y axes. + const int yi = H - 1 - pix_idx / W; + const int xi = W - 1 - pix_idx % W; + + const float xf = PixToNonSquareNdc(xi, W, H); + const float yf = PixToNonSquareNdc(yi, H, W); + const float2 pxy = make_float2(xf, yf); + + // Loop over all the faces for this pixel. + for (int k = 0; k < K; k++) { + // Index into (N, H, W, K, :) grad tensors + // pixel index + top k index + int i = n * H * W * K + pix_idx * K + k; + + const int f = pix_to_face[i]; + if (f < 0) { + continue; // padded face. + } + // Get xyz coordinates of the three face vertices. + const auto v012 = GetSingleFaceVerts(face_verts, f); + const float3 v0 = thrust::get<0>(v012); + const float3 v1 = thrust::get<1>(v012); + const float3 v2 = thrust::get<2>(v012); + + // Only neex xy for barycentric coordinate and distance calculations. + const float2 v0xy = make_float2(v0.x, v0.y); + const float2 v1xy = make_float2(v1.x, v1.y); + const float2 v2xy = make_float2(v2.x, v2.y); + + // Get upstream gradients for the face. + const float grad_dist_upstream = grad_dists[i]; + const float grad_zbuf_upstream = grad_zbuf[i]; + const float grad_bary_upstream_w0 = grad_bary[i * 3 + 0]; + const float grad_bary_upstream_w1 = grad_bary[i * 3 + 1]; + const float grad_bary_upstream_w2 = grad_bary[i * 3 + 2]; + const float3 grad_bary_upstream = make_float3( + grad_bary_upstream_w0, grad_bary_upstream_w1, grad_bary_upstream_w2); + + const float3 b_w = BarycentricCoordsForward(pxy, v0xy, v1xy, v2xy); + const float3 b_pp = !perspective_correct + ? b_w + : BarycentricPerspectiveCorrectionForward(b_w, v0.z, v1.z, v2.z); + + const float3 b_w_clip = + !clip_barycentric_coords ? b_pp : BarycentricClipForward(b_pp); + + const bool inside = b_pp.x > 0.0f && b_pp.y > 0.0f && b_pp.z > 0.0f; + const float sign = inside ? -1.0f : 1.0f; + + auto grad_dist_f = PointTriangleDistanceBackward( + pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); + const float2 ddist_d_v0 = thrust::get<1>(grad_dist_f); + const float2 ddist_d_v1 = thrust::get<2>(grad_dist_f); + const float2 ddist_d_v2 = thrust::get<3>(grad_dist_f); + + // Upstream gradient for barycentric coords from zbuf calculation: + // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 + // Therefore + // d_zbuf/d_bary_w0 = z0 + // d_zbuf/d_bary_w1 = z1 + // d_zbuf/d_bary_w2 = z2 + const float3 d_zbuf_d_bwclip = make_float3(v0.z, v1.z, v2.z); + + // Total upstream barycentric gradients are the sum of + // external upstream gradients and contribution from zbuf. + const float3 grad_bary_f_sum = + (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_bwclip); + + float3 grad_bary0 = grad_bary_f_sum; + + if (clip_barycentric_coords) { + grad_bary0 = BarycentricClipBackward(b_w, grad_bary_f_sum); + } + + float dz0_persp = 0.0f, dz1_persp = 0.0f, dz2_persp = 0.0f; + if (perspective_correct) { + auto perspective_grads = BarycentricPerspectiveCorrectionBackward( + b_w, v0.z, v1.z, v2.z, grad_bary0); + grad_bary0 = thrust::get<0>(perspective_grads); + dz0_persp = thrust::get<1>(perspective_grads); + dz1_persp = thrust::get<2>(perspective_grads); + dz2_persp = thrust::get<3>(perspective_grads); + } + + auto grad_bary_f = + BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); + const float2 dbary_d_v0 = thrust::get<1>(grad_bary_f); + const float2 dbary_d_v1 = thrust::get<2>(grad_bary_f); + const float2 dbary_d_v2 = thrust::get<3>(grad_bary_f); + + atomicAdd(grad_face_verts + f * 9 + 0, dbary_d_v0.x + ddist_d_v0.x); + atomicAdd(grad_face_verts + f * 9 + 1, dbary_d_v0.y + ddist_d_v0.y); + atomicAdd( + grad_face_verts + f * 9 + 2, + grad_zbuf_upstream * b_w_clip.x + dz0_persp); + atomicAdd(grad_face_verts + f * 9 + 3, dbary_d_v1.x + ddist_d_v1.x); + atomicAdd(grad_face_verts + f * 9 + 4, dbary_d_v1.y + ddist_d_v1.y); + atomicAdd( + grad_face_verts + f * 9 + 5, + grad_zbuf_upstream * b_w_clip.y + dz1_persp); + atomicAdd(grad_face_verts + f * 9 + 6, dbary_d_v2.x + ddist_d_v2.x); + atomicAdd(grad_face_verts + f * 9 + 7, dbary_d_v2.y + ddist_d_v2.y); + atomicAdd( + grad_face_verts + f * 9 + 8, + grad_zbuf_upstream * b_w_clip.z + dz2_persp); + } + } +} + +at::Tensor RasterizeMeshesBackwardCuda( + const at::Tensor& face_verts, // (F, 3, 3) + const at::Tensor& pix_to_face, // (N, H, W, K) + const at::Tensor& grad_zbuf, // (N, H, W, K) + const at::Tensor& grad_bary, // (N, H, W, K, 3) + const at::Tensor& grad_dists, // (N, H, W, K) + const bool perspective_correct, + const bool clip_barycentric_coords) { + // Check inputs are on the same device + at::TensorArg face_verts_t{face_verts, "face_verts", 1}, + pix_to_face_t{pix_to_face, "pix_to_face", 2}, + grad_zbuf_t{grad_zbuf, "grad_zbuf", 3}, + grad_bary_t{grad_bary, "grad_bary", 4}, + grad_dists_t{grad_dists, "grad_dists", 5}; + at::CheckedFrom c = "RasterizeMeshesBackwardCuda"; + at::checkAllSameGPU( + c, {face_verts_t, pix_to_face_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); + at::checkAllSameType( + c, {face_verts_t, grad_zbuf_t, grad_bary_t, grad_dists_t}); + + // This is nondeterministic because atomicAdd + at::globalContext().alertNotDeterministic("RasterizeMeshesBackwardCuda"); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(face_verts.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + const int F = face_verts.size(0); + const int N = pix_to_face.size(0); + const int H = pix_to_face.size(1); + const int W = pix_to_face.size(2); + const int K = pix_to_face.size(3); + + at::Tensor grad_face_verts = at::zeros({F, 3, 3}, face_verts.options()); + + if (grad_face_verts.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_face_verts; + } + + const size_t blocks = 1024; + const size_t threads = 64; + + RasterizeMeshesBackwardCudaKernel<<>>( + face_verts.contiguous().data_ptr(), + pix_to_face.contiguous().data_ptr(), + perspective_correct, + clip_barycentric_coords, + N, + H, + W, + K, + grad_zbuf.contiguous().data_ptr(), + grad_bary.contiguous().data_ptr(), + grad_dists.contiguous().data_ptr(), + grad_face_verts.data_ptr()); + + AT_CUDA_CHECK(cudaGetLastError()); + return grad_face_verts; +} + +// **************************************************************************** +// * FINE RASTERIZATION * +// **************************************************************************** +__global__ void RasterizeMeshesFineCudaKernel( + const float* face_verts, // (F, 3, 3) + const int32_t* bin_faces, // (N, BH, BW, T) + const int64_t* clipped_faces_neighbor_idx, // (F,) + const float blur_radius, + const int bin_size, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces, + const int N, + const int BH, + const int BW, + const int M, + const int H, + const int W, + const int K, + int64_t* face_idxs, // (N, H, W, K) + float* zbuf, // (N, H, W, K) + float* pix_dists, // (N, H, W, K) + float* bary // (N, H, W, K, 3) +) { + // This can be more than H * W if H or W are not divisible by bin_size. + int num_pixels = N * BH * BW * bin_size * bin_size; + auto num_threads = gridDim.x * blockDim.x; + auto tid = blockIdx.x * blockDim.x + threadIdx.x; + + for (int pid = tid; pid < num_pixels; pid += num_threads) { + // Convert linear index into bin and pixel indices. We make the within + // block pixel ids move the fastest, so that adjacent threads will fall + // into the same bin; this should give them coalesced memory reads when + // they read from faces and bin_faces. + int i = pid; + const int n = i / (BH * BW * bin_size * bin_size); + i %= BH * BW * bin_size * bin_size; + // bin index y + const int by = i / (BW * bin_size * bin_size); + i %= BW * bin_size * bin_size; + // bin index y + const int bx = i / (bin_size * bin_size); + // pixel within the bin + i %= bin_size * bin_size; + + // Pixel x, y indices + const int yi = i / bin_size + by * bin_size; + const int xi = i % bin_size + bx * bin_size; + + if (yi >= H || xi >= W) + continue; + + const float xf = PixToNonSquareNdc(xi, W, H); + const float yf = PixToNonSquareNdc(yi, H, W); + + const float2 pxy = make_float2(xf, yf); + + // This part looks like the naive rasterization kernel, except we use + // bin_faces to only look at a subset of faces already known to fall + // in this bin. TODO abstract out this logic into some data structure + // that is shared by both kernels? + Pixel q[kMaxPointsPerPixel]; + int q_size = 0; + float q_max_z = -1000; + int q_max_idx = -1; + + for (int m = 0; m < M; m++) { + const int f = bin_faces[n * BH * BW * M + by * BW * M + bx * M + m]; + if (f < 0) { + continue; // bin_faces uses -1 as a sentinal value. + } + // Check if the pixel pxy is inside the face bounding box and if it is, + // update q, q_size, q_max_z and q_max_idx in place. + CheckPixelInsideFace( + face_verts, + clipped_faces_neighbor_idx, + f, + q_size, + q_max_z, + q_max_idx, + q, + blur_radius, + pxy, + K, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); + } + + // Now we've looked at all the faces for this bin, so we can write + // output for the current pixel. + // TODO: make sorting an option as only top k is needed, not sorted values. + BubbleSort(q, q_size); + + // Reverse ordering of the X and Y axis so that + // in the image +Y is pointing up and +X is pointing left. + const int yidx = H - 1 - yi; + const int xidx = W - 1 - xi; + + const int pix_idx = n * H * W * K + yidx * W * K + xidx * K; + for (int k = 0; k < q_size; k++) { + face_idxs[pix_idx + k] = q[k].idx; + zbuf[pix_idx + k] = q[k].z; + pix_dists[pix_idx + k] = q[k].dist; + bary[(pix_idx + k) * 3 + 0] = q[k].bary.x; + bary[(pix_idx + k) * 3 + 1] = q[k].bary.y; + bary[(pix_idx + k) * 3 + 2] = q[k].bary.z; + } + } +} + +std::tuple +RasterizeMeshesFineCuda( + const at::Tensor& face_verts, + const at::Tensor& bin_faces, + const at::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + TORCH_CHECK( + face_verts.ndimension() == 3 && face_verts.size(1) == 3 && + face_verts.size(2) == 3, + "face_verts must have dimensions (num_faces, 3, 3)"); + TORCH_CHECK(bin_faces.ndimension() == 4, "bin_faces must have 4 dimensions"); + TORCH_CHECK( + clipped_faces_neighbor_idx.size(0) == face_verts.size(0), + "clipped_faces_neighbor_idx must have the same first dimension as face_verts"); + + // Check inputs are on the same device + at::TensorArg face_verts_t{face_verts, "face_verts", 1}, + bin_faces_t{bin_faces, "bin_faces", 2}, + clipped_faces_neighbor_idx_t{ + clipped_faces_neighbor_idx, "clipped_faces_neighbor_idx", 3}; + at::CheckedFrom c = "RasterizeMeshesFineCuda"; + at::checkAllSameGPU( + c, {face_verts_t, bin_faces_t, clipped_faces_neighbor_idx_t}); + + // Set the device for the kernel launch based on the device of the input + at::cuda::CUDAGuard device_guard(face_verts.device()); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + // bin_faces shape (N, BH, BW, M) + const int N = bin_faces.size(0); + const int BH = bin_faces.size(1); + const int BW = bin_faces.size(2); + const int M = bin_faces.size(3); + const int K = faces_per_pixel; + + const int H = std::get<0>(image_size); + const int W = std::get<1>(image_size); + + if (K > kMaxPointsPerPixel) { + AT_ERROR("Must have num_closest <= 150"); + } + auto long_opts = bin_faces.options().dtype(at::kLong); + auto float_opts = face_verts.options().dtype(at::kFloat); + + at::Tensor face_idxs = at::full({N, H, W, K}, -1, long_opts); + at::Tensor zbuf = at::full({N, H, W, K}, -1, float_opts); + at::Tensor pix_dists = at::full({N, H, W, K}, -1, float_opts); + at::Tensor bary = at::full({N, H, W, K, 3}, -1, float_opts); + + if (face_idxs.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(face_idxs, zbuf, bary, pix_dists); + } + + const size_t blocks = 1024; + const size_t threads = 64; + + RasterizeMeshesFineCudaKernel<<>>( + face_verts.contiguous().data_ptr(), + bin_faces.contiguous().data_ptr(), + clipped_faces_neighbor_idx.contiguous().data_ptr(), + blur_radius, + bin_size, + perspective_correct, + clip_barycentric_coords, + cull_backfaces, + N, + BH, + BW, + M, + H, + W, + K, + face_idxs.data_ptr(), + zbuf.data_ptr(), + pix_dists.data_ptr(), + bary.data_ptr()); + + return std::make_tuple(face_idxs, zbuf, bary, pix_dists); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h new file mode 100644 index 0000000000000000000000000000000000000000..6731cc1be4259b5b8c64d8da0e75f0553f0bbeb5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes.h @@ -0,0 +1,562 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include +#include "rasterize_coarse/rasterize_coarse.h" +#include "utils/pytorch3d_cutils.h" + +// **************************************************************************** +// * FORWARD PASS * +// **************************************************************************** + +std::tuple +RasterizeMeshesNaiveCpu( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces); + +#ifdef WITH_CUDA +std::tuple +RasterizeMeshesNaiveCuda( + const at::Tensor& face_verts, + const at::Tensor& mesh_to_face_first_idx, + const at::Tensor& num_faces_per_mesh, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int num_closest, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces); +#endif +// Forward pass for rasterizing a batch of meshes. +// +// Args: +// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for +// faces in all the meshes in the batch. Concretely, +// face_verts[f, i] = [x, y, z] gives the coordinates for the +// ith vertex of the fth face. These vertices are expected to be +// in NDC coordinates in the range [-1, 1]. +// mesh_to_face_first_idx: LongTensor of shape (N) giving the index in +// faces_verts of the first face in each mesh in +// the batch where N is the batch size. +// num_faces_per_mesh: LongTensor of shape (N) giving the number of faces +// for each mesh in the batch. +// clipped_faces_neighbor_idx: LongTensor of shape (F,) giving the +// index of the neighboring face for each face which was clipped to a +// quadrilateral and then divided into two triangles. +// e.g. for a face f partially behind the image plane which is split into +// two triangles (t1, t2): clipped_faces_neighbor_idx[t1_idx] = t2_idx +// Faces which are not clipped and subdivided are set to -1. +// image_size: Tuple (H, W) giving the size in pixels of the output +// image to be rasterized. +// blur_radius: float distance in NDC coordinates uses to expand the face +// bounding boxes for the rasterization. Set to 0.0 if no blur +// is required. +// faces_per_pixel: the number of closeset faces to rasterize per pixel. +// perspective_correct: Whether to apply perspective correction when +// computing barycentric coordinates. If this is True, +// then this function returns world-space barycentric +// coordinates for each pixel; if this is False then +// this function instead returns screen-space +// barycentric coordinates for each pixel. +// clip_barycentric_coords: Whether, after any perspective correction +// is applied but before the depth is calculated (e.g. for +// z clipping), to "correct" a location outside the face (i.e. with +// a negative barycentric coordinate) to a position on the edge of the +// face. +// cull_backfaces: Bool, Whether to only rasterize mesh faces which are +// visible to the camera. This assumes that vertices of +// front-facing triangles are ordered in an anti-clockwise +// fashion, and triangles that face away from the camera are +// in a clockwise order relative to the current view +// direction. NOTE: This will only work if the mesh faces are +// consistently defined with counter-clockwise ordering when +// viewed from the outside. +// +// Returns: +// A 4 element tuple of: +// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of +// each of the closest faces to the pixel in the rasterized +// image, or -1 for pixels that are not covered by any face. +// zbuf: float32 Tensor of shape (N, H, W, K) giving the depth of each of +// the closest faces for each pixel. +// barycentric_coords: float tensor of shape (N, H, W, K, 3) giving +// barycentric coordinates of the pixel with respect to +// each of the closest faces along the z axis, padded +// with -1 for pixels hit by fewer than +// faces_per_pixel faces. +// dists: float tensor of shape (N, H, W, K) giving the euclidean distance +// in the (NDC) x/y plane between each pixel and its K closest +// faces along the z axis padded with -1 for pixels hit by fewer than +// faces_per_pixel faces. +inline std::tuple +RasterizeMeshesNaive( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + // TODO: Better type checking. + if (face_verts.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(face_verts); + CHECK_CUDA(mesh_to_face_first_idx); + CHECK_CUDA(num_faces_per_mesh); + return RasterizeMeshesNaiveCuda( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + faces_per_pixel, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + CHECK_CPU(face_verts); + CHECK_CPU(mesh_to_face_first_idx); + CHECK_CPU(num_faces_per_mesh); + return RasterizeMeshesNaiveCpu( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + faces_per_pixel, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); + } +} + +// **************************************************************************** +// * BACKWARD PASS * +// **************************************************************************** + +torch::Tensor RasterizeMeshesBackwardCpu( + const torch::Tensor& face_verts, + const torch::Tensor& pix_to_face, + const torch::Tensor& grad_zbuf, + const torch::Tensor& grad_bary, + const torch::Tensor& grad_dists, + const bool perspective_correct, + const bool clip_barycentric_coords); + +#ifdef WITH_CUDA +torch::Tensor RasterizeMeshesBackwardCuda( + const torch::Tensor& face_verts, + const torch::Tensor& pix_to_face, + const torch::Tensor& grad_zbuf, + const torch::Tensor& grad_bary, + const torch::Tensor& grad_dists, + const bool perspective_correct, + const bool clip_barycentric_coords); +#endif + +// Args: +// face_verts: float32 Tensor of shape (F, 3, 3) (from forward pass) giving +// (packed) vertex positions for faces in all the meshes in +// the batch. +// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of +// each of the closest faces to the pixel in the rasterized +// image, or -1 for pixels that are not covered by any face. +// grad_zbuf: Tensor of shape (N, H, W, K) giving upstream gradients +// d(loss)/d(zbuf) of the zbuf tensor from the forward pass. +// grad_bary: Tensor of shape (N, H, W, K, 3) giving upstream gradients +// d(loss)/d(bary) of the barycentric_coords tensor returned by +// the forward pass. +// grad_dists: Tensor of shape (N, H, W, K) giving upstream gradients +// d(loss)/d(dists) of the dists tensor from the forward pass. +// perspective_correct: Whether to apply perspective correction when +// computing barycentric coordinates. If this is True, +// then this function returns world-space barycentric +// coordinates for each pixel; if this is False then +// this function instead returns screen-space +// barycentric coordinates for each pixel. +// clip_barycentric_coords: Whether, after any perspective correction +// is applied but before the depth is calculated (e.g. for +// z clipping), to "correct" a location outside the face (i.e. with +// a negative barycentric coordinate) to a position on the edge of the +// face. +// +// Returns: +// grad_face_verts: float32 Tensor of shape (F, 3, 3) giving downstream +// gradients for the face vertices. +torch::Tensor RasterizeMeshesBackward( + const torch::Tensor& face_verts, + const torch::Tensor& pix_to_face, + const torch::Tensor& grad_zbuf, + const torch::Tensor& grad_bary, + const torch::Tensor& grad_dists, + const bool perspective_correct, + const bool clip_barycentric_coords) { + if (face_verts.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(face_verts); + CHECK_CUDA(pix_to_face); + CHECK_CUDA(grad_zbuf); + CHECK_CUDA(grad_bary); + CHECK_CUDA(grad_dists); + return RasterizeMeshesBackwardCuda( + face_verts, + pix_to_face, + grad_zbuf, + grad_bary, + grad_dists, + perspective_correct, + clip_barycentric_coords); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + CHECK_CPU(face_verts); + CHECK_CPU(pix_to_face); + CHECK_CPU(grad_zbuf); + CHECK_CPU(grad_bary); + CHECK_CPU(grad_dists); + return RasterizeMeshesBackwardCpu( + face_verts, + pix_to_face, + grad_zbuf, + grad_bary, + grad_dists, + perspective_correct, + clip_barycentric_coords); + } +} + +// **************************************************************************** +// * COARSE RASTERIZATION * +// **************************************************************************** + +// RasterizeMeshesCoarseCuda in rasterize_coarse/rasterize_coarse.h + +torch::Tensor RasterizeMeshesCoarseCpu( + const torch::Tensor& face_verts, + const at::Tensor& mesh_to_face_first_idx, + const at::Tensor& num_faces_per_mesh, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int max_faces_per_bin); + +// Args: +// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for +// faces in all the meshes in the batch. Concretely, +// face_verts[f, i] = [x, y, z] gives the coordinates for the +// ith vertex of the fth face. These vertices are expected to be +// in NDC coordinates in the range [-1, 1]. +// mesh_to_face_first_idx: LongTensor of shape (N) giving the index in +// faces_verts of the first face in each mesh in +// the batch where N is the batch size. +// num_faces_per_mesh: LongTensor of shape (N) giving the number of faces +// for each mesh in the batch. +// image_size: Tuple (H, W) giving the size in pixels of the output +// image to be rasterized. +// blur_radius: float distance in NDC coordinates uses to expand the face +// bounding boxes for the rasterization. Set to 0.0 if no blur +// is required. +// bin_size: Size of each bin within the image (in pixels) +// max_faces_per_bin: Maximum number of faces to count in each bin. +// +// Returns: +// bin_face_idxs: Tensor of shape (N, num_bins, num_bins, K) giving the +// indices of faces that fall into each bin. + +torch::Tensor RasterizeMeshesCoarse( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int max_faces_per_bin) { + if (face_verts.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(face_verts); + CHECK_CUDA(mesh_to_face_first_idx); + CHECK_CUDA(num_faces_per_mesh); + return RasterizeMeshesCoarseCuda( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + image_size, + blur_radius, + bin_size, + max_faces_per_bin); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + CHECK_CPU(face_verts); + CHECK_CPU(mesh_to_face_first_idx); + CHECK_CPU(num_faces_per_mesh); + return RasterizeMeshesCoarseCpu( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + image_size, + blur_radius, + bin_size, + max_faces_per_bin); + } +} + +// **************************************************************************** +// * FINE RASTERIZATION * +// **************************************************************************** + +#ifdef WITH_CUDA +std::tuple +RasterizeMeshesFineCuda( + const torch::Tensor& face_verts, + const torch::Tensor& bin_faces, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces); +#endif +// Args: +// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for +// faces in all the meshes in the batch. Concretely, +// face_verts[f, i] = [x, y, z] gives the coordinates for the +// ith vertex of the fth face. These vertices are expected to be +// in NDC coordinates in the range [-1, 1]. +// bin_faces: int32 Tensor of shape (N, B, B, M) giving the indices of faces +// that fall into each bin (output from coarse rasterization). +// clipped_faces_neighbor_idx: LongTensor of shape (F,) giving the +// index of the neighboring face for each face which was clipped to a +// quadrilateral and then divided into two triangles. +// e.g. for a face f partially behind the image plane which is split into +// two triangles (t1, t2): clipped_faces_neighbor_idx[t1_idx] = t2_idx +// Faces which are not clipped and subdivided are set to -1. +// image_size: Tuple (H, W) giving the size in pixels of the output +// image to be rasterized. +// blur_radius: float distance in NDC coordinates uses to expand the face +// bounding boxes for the rasterization. Set to 0.0 if no blur +// is required. +// bin_size: Size of each bin within the image (in pixels) +// faces_per_pixel: the number of closeset faces to rasterize per pixel. +// perspective_correct: Whether to apply perspective correction when +// computing barycentric coordinates. If this is True, +// then this function returns world-space barycentric +// coordinates for each pixel; if this is False then +// this function instead returns screen-space +// barycentric coordinates for each pixel. +// clip_barycentric_coords: Whether, after any perspective correction +// is applied but before the depth is calculated (e.g. for +// z clipping), to "correct" a location outside the face (i.e. with +// a negative barycentric coordinate) to a position on the edge of the +// face. +// cull_backfaces: Bool, Whether to only rasterize mesh faces which are +// visible to the camera. This assumes that vertices of +// front-facing triangles are ordered in an anti-clockwise +// fashion, and triangles that face away from the camera are +// in a clockwise order relative to the current view +// direction. NOTE: This will only work if the mesh faces are +// consistently defined with counter-clockwise ordering when +// viewed from the outside. +// +// Returns (same as rasterize_meshes): +// A 4 element tuple of: +// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of +// each of the closest faces to the pixel in the rasterized +// image, or -1 for pixels that are not covered by any face. +// zbuf: float32 Tensor of shape (N, H, W, K) giving the depth of each of +// the closest faces for each pixel. +// barycentric_coords: float tensor of shape (N, H, W, K, 3) giving +// barycentric coordinates of the pixel with respect to +// each of the closest faces along the z axis, padded +// with -1 for pixels hit by fewer than +// faces_per_pixel faces. +// dists: float tensor of shape (N, H, W, K) giving the euclidean distance +// in the (NDC) x/y plane between each pixel and its K closest +// faces along the z axis padded with -1 for pixels hit by fewer than +// faces_per_pixel faces. +std::tuple +RasterizeMeshesFine( + const torch::Tensor& face_verts, + const torch::Tensor& bin_faces, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + if (face_verts.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(face_verts); + CHECK_CUDA(bin_faces); + return RasterizeMeshesFineCuda( + face_verts, + bin_faces, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + bin_size, + faces_per_pixel, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); +#else + AT_ERROR("Not compiled with GPU support"); +#endif + } else { + CHECK_CPU(face_verts); + CHECK_CPU(bin_faces); + AT_ERROR("NOT IMPLEMENTED"); + } +} + +// **************************************************************************** +// * MAIN ENTRY POINT * +// **************************************************************************** + +// This is the main entry point for the forward pass of the mesh rasterizer; +// it uses either naive or coarse-to-fine rasterization based on bin_size. +// +// Args: +// face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions for +// faces in all the meshes in the batch. Concretely, +// face_verts[f, i] = [x, y, z] gives the coordinates for the +// ith vertex of the fth face. These vertices are expected to be +// in NDC coordinates in the range [-1, 1]. +// mesh_to_face_first_idx: LongTensor of shape (N) giving the index in +// faces_verts of the first face in each mesh in +// the batch where N is the batch size. +// num_faces_per_mesh: LongTensor of shape (N) giving the number of faces +// for each mesh in the batch. +// clipped_faces_neighbor_idx: LongTensor of shape (F,) giving the +// index of the neighboring face for each face which was clipped to a +// quadrilateral and then divided into two triangles. +// e.g. for a face f partially behind the image plane which is split into +// two triangles (t1, t2): clipped_faces_neighbor_idx[t1_idx] = t2_idx +// Faces which are not clipped and subdivided are set to -1. +// image_size: Tuple (H, W) giving the size in pixels of the output +// image to be rasterized. +// blur_radius: float distance in NDC coordinates uses to expand the face +// bounding boxes for the rasterization. Set to 0.0 if no blur +// is required. +// faces_per_pixel: the number of closeset faces to rasterize per pixel. +// bin_size: Bin size (in pixels) for coarse-to-fine rasterization. Setting +// bin_size=0 uses naive rasterization instead. +// max_faces_per_bin: The maximum number of faces allowed to fall into each +// bin when using coarse-to-fine rasterization. +// perspective_correct: Whether to apply perspective correction when +// computing barycentric coordinates. If this is True, +// then this function returns world-space barycentric +// coordinates for each pixel; if this is False then +// this function instead returns screen-space +// barycentric coordinates for each pixel. +// clip_barycentric_coords: Whether, after any perspective correction +// is applied but before the depth is calculated (e.g. for +// z clipping), to "correct" a location outside the face (i.e. with +// a negative barycentric coordinate) to a position on the edge of the +// face. +// cull_backfaces: Bool, Whether to only rasterize mesh faces which are +// visible to the camera. This assumes that vertices of +// front-facing triangles are ordered in an anti-clockwise +// fashion, and triangles that face away from the camera are +// in a clockwise order relative to the current view +// direction. NOTE: This will only work if the mesh faces are +// consistently defined with counter-clockwise ordering when +// viewed from the outside. +// +// Returns: +// A 4 element tuple of: +// pix_to_face: int64 tensor of shape (N, H, W, K) giving the face index of +// each of the closest faces to the pixel in the rasterized +// image, or -1 for pixels that are not covered by any face. +// zbuf: float32 Tensor of shape (N, H, W, K) giving the depth of each of +// the closest faces for each pixel. +// barycentric_coords: float tensor of shape (N, H, W, K, 3) giving +// barycentric coordinates of the pixel with respect to +// each of the closest faces along the z axis, padded +// with -1 for pixels hit by fewer than +// faces_per_pixel faces. +// dists: float tensor of shape (N, H, W, K) giving the euclidean distance +// in the (NDC) x/y plane between each pixel and its K closest +// faces along the z axis padded with -1 for pixels hit by fewer than +// faces_per_pixel faces. +std::tuple +RasterizeMeshes( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int faces_per_pixel, + const int bin_size, + const int max_faces_per_bin, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + if (bin_size > 0 && max_faces_per_bin > 0) { + // Use coarse-to-fine rasterization + at::Tensor bin_faces = RasterizeMeshesCoarse( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + image_size, + blur_radius, + bin_size, + max_faces_per_bin); + return RasterizeMeshesFine( + face_verts, + bin_faces, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + bin_size, + faces_per_pixel, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); + } else { + // Use the naive per-pixel implementation + return RasterizeMeshesNaive( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + faces_per_pixel, + perspective_correct, + clip_barycentric_coords, + cull_backfaces); + } +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1a2d652e1b842a029204e3a2769f29fd3341c406 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/rasterize_meshes/rasterize_meshes_cpu.cpp @@ -0,0 +1,639 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include +#include "ATen/core/TensorAccessor.h" +#include "rasterize_points/rasterization_utils.h" +#include "utils/geometry_utils.h" +#include "utils/vec2.h" +#include "utils/vec3.h" + +// Get (x, y, z) values for vertex from (3, 3) tensor face. +template +auto ExtractVerts(const Face& face, const int vertex_index) { + return std::make_tuple( + face[vertex_index][0], face[vertex_index][1], face[vertex_index][2]); +} + +// Compute min/max x/y for each face. +auto ComputeFaceBoundingBoxes(const torch::Tensor& face_verts) { + const int total_F = face_verts.size(0); + auto float_opts = face_verts.options().dtype(torch::kFloat32); + auto face_verts_a = face_verts.accessor(); + torch::Tensor face_bboxes = torch::full({total_F, 6}, -2.0, float_opts); + + // Loop through all the faces + for (int f = 0; f < total_F; ++f) { + const auto& face = face_verts_a[f]; + float x0, x1, x2, y0, y1, y2, z0, z1, z2; + std::tie(x0, y0, z0) = ExtractVerts(face, 0); + std::tie(x1, y1, z1) = ExtractVerts(face, 1); + std::tie(x2, y2, z2) = ExtractVerts(face, 2); + + const float x_min = std::min(x0, std::min(x1, x2)); + const float y_min = std::min(y0, std::min(y1, y2)); + const float x_max = std::max(x0, std::max(x1, x2)); + const float y_max = std::max(y0, std::max(y1, y2)); + const float z_min = std::min(z0, std::min(z1, z2)); + const float z_max = std::max(z0, std::max(z1, z2)); + + face_bboxes[f][0] = x_min; + face_bboxes[f][1] = y_min; + face_bboxes[f][2] = x_max; + face_bboxes[f][3] = y_max; + face_bboxes[f][4] = z_min; + face_bboxes[f][5] = z_max; + } + + return face_bboxes; +} + +// Check if the point (px, py) lies inside the face bounding box face_bbox. +// Return true if the point is outside. +template +bool CheckPointOutsideBoundingBox( + const Face& face_bbox, + float blur_radius, + float px, + float py) { + // Read triangle bbox coordinates and expand by blur radius. + float x_min = face_bbox[0] - blur_radius; + float y_min = face_bbox[1] - blur_radius; + float x_max = face_bbox[2] + blur_radius; + float y_max = face_bbox[3] + blur_radius; + + // Faces with at least one vertex behind the camera won't render correctly + // and should be removed or clipped before calling the rasterizer + const bool z_invalid = face_bbox[4] < kEpsilon; + + // Check if the current point is within the triangle bounding box. + return (px > x_max || px < x_min || py > y_max || py < y_min || z_invalid); +} + +// Calculate areas of all faces. Returns a tensor of shape (total_faces, 1) +// where faces with zero area have value -1. +auto ComputeFaceAreas(const torch::Tensor& face_verts) { + const int total_F = face_verts.size(0); + auto float_opts = face_verts.options().dtype(torch::kFloat32); + auto face_verts_a = face_verts.accessor(); + torch::Tensor face_areas = torch::full({total_F}, -1, float_opts); + + // Loop through all the faces + for (int f = 0; f < total_F; ++f) { + const auto& face = face_verts_a[f]; + float x0, x1, x2, y0, y1, y2, z0, z1, z2; + std::tie(x0, y0, z0) = ExtractVerts(face, 0); + std::tie(x1, y1, z1) = ExtractVerts(face, 1); + std::tie(x2, y2, z2) = ExtractVerts(face, 2); + + const vec2 v0(x0, y0); + const vec2 v1(x1, y1); + const vec2 v2(x2, y2); + + const float face_area = EdgeFunctionForward(v0, v1, v2); + face_areas[f] = face_area; + } + + return face_areas; +} + +// Helper function to use with std::find_if to find the index of any +// values in the top k struct which match a given idx. +struct IsNeighbor { + IsNeighbor(int neighbor_idx) { + this->neighbor_idx = neighbor_idx; + } + bool operator()(std::tuple elem) { + return (std::get<1>(elem) == neighbor_idx); + } + int neighbor_idx; +}; + +namespace { +void RasterizeMeshesNaiveCpu_worker( + const int start_yi, + const int end_yi, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const float blur_radius, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces, + const int32_t N, + const int H, + const int W, + const int K, + at::TensorAccessor& face_verts_a, + at::TensorAccessor& face_areas_a, + at::TensorAccessor& face_bboxes_a, + at::TensorAccessor& neighbor_idx_a, + at::TensorAccessor& zbuf_a, + at::TensorAccessor& face_idxs_a, + at::TensorAccessor& pix_dists_a, + at::TensorAccessor& barycentric_coords_a) { + for (int n = 0; n < N; ++n) { + // Loop through each mesh in the batch. + // Get the start index of the faces in faces_packed and the num faces + // in the mesh to avoid having to loop through all the faces. + const int face_start_idx = mesh_to_face_first_idx[n].item().to(); + const int face_stop_idx = + (face_start_idx + num_faces_per_mesh[n].item().to()); + + // Iterate through the horizontal lines of the image from top to bottom. + for (int yi = start_yi; yi < end_yi; ++yi) { + // Reverse the order of yi so that +Y is pointing upwards in the image. + const int yidx = H - 1 - yi; + + // Y coordinate of the top of the pixel. + const float yf = PixToNonSquareNdc(yidx, H, W); + // Iterate through pixels on this horizontal line, left to right. + for (int xi = 0; xi < W; ++xi) { + // Reverse the order of xi so that +X is pointing to the left in the + // image. + const int xidx = W - 1 - xi; + + // X coordinate of the left of the pixel. + const float xf = PixToNonSquareNdc(xidx, W, H); + + // Use a deque to hold values: + // (z, idx, r, bary.x, bary.y. bary.z) + // Sort the deque as needed to mimic a priority queue. + std::deque> q; + + // Loop through the faces in the mesh. + for (int f = face_start_idx; f < face_stop_idx; ++f) { + // Get coordinates of three face vertices. + const auto& face = face_verts_a[f]; + float x0, x1, x2, y0, y1, y2, z0, z1, z2; + std::tie(x0, y0, z0) = ExtractVerts(face, 0); + std::tie(x1, y1, z1) = ExtractVerts(face, 1); + std::tie(x2, y2, z2) = ExtractVerts(face, 2); + + const vec2 v0(x0, y0); + const vec2 v1(x1, y1); + const vec2 v2(x2, y2); + + const float face_area = face_areas_a[f]; + const bool back_face = face_area < 0.0; + // Check if the face is visible to the camera. + if (cull_backfaces && back_face) { + continue; + } + // Skip faces with zero area. + if (face_area <= kEpsilon && face_area >= -1.0f * kEpsilon) { + continue; + } + + // Skip if point is outside the face bounding box. + const auto face_bbox = face_bboxes_a[f]; + const bool outside_bbox = CheckPointOutsideBoundingBox( + face_bbox, std::sqrt(blur_radius), xf, yf); + if (outside_bbox) { + continue; + } + + // Compute barycentric coordinates and use this to get the + // depth of the point on the triangle. + const vec2 pxy(xf, yf); + const vec3 bary0 = + BarycentricCoordinatesForward(pxy, v0, v1, v2); + const vec3 bary = !perspective_correct + ? bary0 + : BarycentricPerspectiveCorrectionForward(bary0, z0, z1, z2); + + const vec3 bary_clip = + !clip_barycentric_coords ? bary : BarycentricClipForward(bary); + + // Use barycentric coordinates to get the depth of the current pixel + const float pz = + (bary_clip.x * z0 + bary_clip.y * z1 + bary_clip.z * z2); + + if (pz < 0) { + continue; // Point is behind the image plane so ignore. + } + + // Compute squared distance of the point to the triangle. + const float dist = PointTriangleDistanceForward(pxy, v0, v1, v2); + + // Use the bary coordinates to determine if the point is + // inside the face. + const bool inside = bary.x > 0.0f && bary.y > 0.0f && bary.z > 0.0f; + + // If the point is inside the triangle then signed_dist + // is negative. + const float signed_dist = inside ? -dist : dist; + + // Check if pixel is outside blur region + if (!inside && dist >= blur_radius) { + continue; + } + + // Handle the case where a face (f) partially behind the image plane + // is clipped to a quadrilateral and then split into two faces (t1, + // t2). In this case we: + // 1. Find the index of the neighbor (e.g. for t1 need index of t2) + // 2. Check if the neighbor (t2) is already in the top K faces + // 3. If yes, compare the distance of the pixel to t1 with the + // distance to t2. + // 4. If dist_t1 < dist_t2, overwrite the values for t2 in the top K + // faces. + const int neighbor_idx = neighbor_idx_a[f]; + int idx_top_k = -1; + + // Check if neighboring face is already in the top K. + if (neighbor_idx != -1) { + const auto it = + std::find_if(q.begin(), q.end(), IsNeighbor(neighbor_idx)); + // Get the index of the element from the iterator + idx_top_k = (it != q.end()) ? it - q.begin() : idx_top_k; + } + + // If idx_top_k idx is not -1 then it is in the top K struct. + if (idx_top_k != -1) { + // If dist of current face is less than neighbor, overwrite + // the neighbor face values in the top K struct. + const auto neighbor = q[idx_top_k]; + const float dist_neighbor = std::abs(std::get<2>(neighbor)); + if (dist < dist_neighbor) { + // Overwrite the neighbor face values. + q[idx_top_k] = std::make_tuple( + pz, f, signed_dist, bary_clip.x, bary_clip.y, bary_clip.z); + } + } else { + // Handle as a normal face. + // The current pixel lies inside the current face. + // Add at the end of the deque. + q.emplace_back( + pz, f, signed_dist, bary_clip.x, bary_clip.y, bary_clip.z); + } + + // Sort the deque inplace based on the z distance + // to mimic using a priority queue. + std::sort(q.begin(), q.end()); + if (static_cast(q.size()) > K) { + // remove the last value + q.pop_back(); + } + } + while (!q.empty()) { + // Loop through and add values to the output tensors + auto t = q.back(); + q.pop_back(); + const int i = q.size(); + zbuf_a[n][yi][xi][i] = std::get<0>(t); + face_idxs_a[n][yi][xi][i] = std::get<1>(t); + pix_dists_a[n][yi][xi][i] = std::get<2>(t); + barycentric_coords_a[n][yi][xi][i][0] = std::get<3>(t); + barycentric_coords_a[n][yi][xi][i][1] = std::get<4>(t); + barycentric_coords_a[n][yi][xi][i][2] = std::get<5>(t); + } + } + } + } +} +} // namespace + +std::tuple +RasterizeMeshesNaiveCpu( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const torch::Tensor& clipped_faces_neighbor_idx, + const std::tuple image_size, + const float blur_radius, + const int faces_per_pixel, + const bool perspective_correct, + const bool clip_barycentric_coords, + const bool cull_backfaces) { + if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 || + face_verts.size(2) != 3) { + AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)"); + } + if (num_faces_per_mesh.size(0) != mesh_to_face_first_idx.size(0)) { + AT_ERROR( + "num_faces_per_mesh must have save size first dimension as mesh_to_face_first_idx"); + } + + const int32_t N = mesh_to_face_first_idx.size(0); // batch_size. + const int H = std::get<0>(image_size); + const int W = std::get<1>(image_size); + const int K = faces_per_pixel; + + auto long_opts = num_faces_per_mesh.options().dtype(torch::kInt64); + auto float_opts = face_verts.options().dtype(torch::kFloat32); + + // Initialize output tensors. + torch::Tensor face_idxs = torch::full({N, H, W, K}, -1, long_opts); + torch::Tensor zbuf = torch::full({N, H, W, K}, -1, float_opts); + torch::Tensor pix_dists = torch::full({N, H, W, K}, -1, float_opts); + torch::Tensor barycentric_coords = + torch::full({N, H, W, K, 3}, -1, float_opts); + + auto face_verts_a = face_verts.accessor(); + auto face_idxs_a = face_idxs.accessor(); + auto zbuf_a = zbuf.accessor(); + auto pix_dists_a = pix_dists.accessor(); + auto barycentric_coords_a = barycentric_coords.accessor(); + auto neighbor_idx_a = clipped_faces_neighbor_idx.accessor(); + + auto face_bboxes = ComputeFaceBoundingBoxes(face_verts); + auto face_bboxes_a = face_bboxes.accessor(); + auto face_areas = ComputeFaceAreas(face_verts); + auto face_areas_a = face_areas.accessor(); + + const int64_t n_threads = at::get_num_threads(); + std::vector threads; + threads.reserve(n_threads); + const int chunk_size = 1 + (H - 1) / n_threads; + int start_yi = 0; + for (int iThread = 0; iThread < n_threads; ++iThread) { + const int64_t end_yi = std::min(start_yi + chunk_size, H); + threads.emplace_back( + RasterizeMeshesNaiveCpu_worker, + start_yi, + end_yi, + mesh_to_face_first_idx, + num_faces_per_mesh, + blur_radius, + perspective_correct, + clip_barycentric_coords, + cull_backfaces, + N, + H, + W, + K, + std::ref(face_verts_a), + std::ref(face_areas_a), + std::ref(face_bboxes_a), + std::ref(neighbor_idx_a), + std::ref(zbuf_a), + std::ref(face_idxs_a), + std::ref(pix_dists_a), + std::ref(barycentric_coords_a)); + start_yi += chunk_size; + } + for (auto&& thread : threads) { + thread.join(); + } + + return std::make_tuple(face_idxs, zbuf, barycentric_coords, pix_dists); +} + +torch::Tensor RasterizeMeshesBackwardCpu( + const torch::Tensor& face_verts, // (F, 3, 3) + const torch::Tensor& pix_to_face, // (N, H, W, K) + const torch::Tensor& grad_zbuf, // (N, H, W, K) + const torch::Tensor& grad_bary, // (N, H, W, K, 3) + const torch::Tensor& grad_dists, // (N, H, W, K) + const bool perspective_correct, + const bool clip_barycentric_coords) { + const int F = face_verts.size(0); + const int N = pix_to_face.size(0); + const int H = pix_to_face.size(1); + const int W = pix_to_face.size(2); + const int K = pix_to_face.size(3); + + torch::Tensor grad_face_verts = torch::zeros({F, 3, 3}, face_verts.options()); + auto face_verts_a = face_verts.accessor(); + auto pix_to_face_a = pix_to_face.accessor(); + auto grad_dists_a = grad_dists.accessor(); + auto grad_zbuf_a = grad_zbuf.accessor(); + auto grad_bary_a = grad_bary.accessor(); + + for (int n = 0; n < N; ++n) { + // Iterate through the horizontal lines of the image from top to bottom. + for (int y = 0; y < H; ++y) { + // Reverse the order of yi so that +Y is pointing upwards in the image. + const int yidx = H - 1 - y; + + // Y coordinate of the top of the pixel. + const float yf = PixToNonSquareNdc(yidx, H, W); + // Iterate through pixels on this horizontal line, left to right. + for (int x = 0; x < W; ++x) { + // Reverse the order of xi so that +X is pointing to the left in the + // image. + const int xidx = W - 1 - x; + + // X coordinate of the left of the pixel. + const float xf = PixToNonSquareNdc(xidx, W, H); + const vec2 pxy(xf, yf); + + // Iterate through the faces that hit this pixel. + for (int k = 0; k < K; ++k) { + // Get face index from forward pass output. + const int f = pix_to_face_a[n][y][x][k]; + if (f < 0) { + continue; // padded face. + } + // Get coordinates of the three face vertices. + const auto face_verts_f = face_verts_a[f]; + const float x0 = face_verts_f[0][0]; + const float y0 = face_verts_f[0][1]; + const float z0 = face_verts_f[0][2]; + const float x1 = face_verts_f[1][0]; + const float y1 = face_verts_f[1][1]; + const float z1 = face_verts_f[1][2]; + const float x2 = face_verts_f[2][0]; + const float y2 = face_verts_f[2][1]; + const float z2 = face_verts_f[2][2]; + const vec2 v0xy(x0, y0); + const vec2 v1xy(x1, y1); + const vec2 v2xy(x2, y2); + + // Get upstream gradients for the face. + const float grad_dist_upstream = grad_dists_a[n][y][x][k]; + const float grad_zbuf_upstream = grad_zbuf_a[n][y][x][k]; + const auto grad_bary_upstream_w012 = grad_bary_a[n][y][x][k]; + const float grad_bary_upstream_w0 = grad_bary_upstream_w012[0]; + const float grad_bary_upstream_w1 = grad_bary_upstream_w012[1]; + const float grad_bary_upstream_w2 = grad_bary_upstream_w012[2]; + const vec3 grad_bary_upstream( + grad_bary_upstream_w0, + grad_bary_upstream_w1, + grad_bary_upstream_w2); + + const vec3 bary0 = + BarycentricCoordinatesForward(pxy, v0xy, v1xy, v2xy); + const vec3 bary = !perspective_correct + ? bary0 + : BarycentricPerspectiveCorrectionForward(bary0, z0, z1, z2); + const vec3 bary_clip = + !clip_barycentric_coords ? bary : BarycentricClipForward(bary); + + // Distances inside the face are negative so get the + // correct sign to apply to the upstream gradient. + const bool inside = bary.x > 0.0f && bary.y > 0.0f && bary.z > 0.0f; + const float sign = inside ? -1.0f : 1.0f; + + const auto grad_dist_f = PointTriangleDistanceBackward( + pxy, v0xy, v1xy, v2xy, sign * grad_dist_upstream); + const auto ddist_d_v0 = std::get<1>(grad_dist_f); + const auto ddist_d_v1 = std::get<2>(grad_dist_f); + const auto ddist_d_v2 = std::get<3>(grad_dist_f); + + // Upstream gradient for barycentric coords from zbuf calculation: + // zbuf = bary_w0 * z0 + bary_w1 * z1 + bary_w2 * z2 + // Therefore + // d_zbuf/d_bary_w0 = z0 + // d_zbuf/d_bary_w1 = z1 + // d_zbuf/d_bary_w2 = z2 + const vec3 d_zbuf_d_baryclip(z0, z1, z2); + + // Total upstream barycentric gradients are the sum of + // external upstream gradients and contribution from zbuf. + const vec3 grad_bary_f_sum = + (grad_bary_upstream + grad_zbuf_upstream * d_zbuf_d_baryclip); + + vec3 grad_bary0 = grad_bary_f_sum; + + if (clip_barycentric_coords) { + grad_bary0 = BarycentricClipBackward(bary, grad_bary0); + } + + if (perspective_correct) { + auto perspective_grads = BarycentricPerspectiveCorrectionBackward( + bary0, z0, z1, z2, grad_bary0); + grad_bary0 = std::get<0>(perspective_grads); + grad_face_verts[f][0][2] += std::get<1>(perspective_grads); + grad_face_verts[f][1][2] += std::get<2>(perspective_grads); + grad_face_verts[f][2][2] += std::get<3>(perspective_grads); + } + + auto grad_bary_f = + BarycentricCoordsBackward(pxy, v0xy, v1xy, v2xy, grad_bary0); + const vec2 dbary_d_v0 = std::get<1>(grad_bary_f); + const vec2 dbary_d_v1 = std::get<2>(grad_bary_f); + const vec2 dbary_d_v2 = std::get<3>(grad_bary_f); + + // Update output gradient buffer. + grad_face_verts[f][0][0] += dbary_d_v0.x + ddist_d_v0.x; + grad_face_verts[f][0][1] += dbary_d_v0.y + ddist_d_v0.y; + grad_face_verts[f][0][2] += grad_zbuf_upstream * bary_clip.x; + grad_face_verts[f][1][0] += dbary_d_v1.x + ddist_d_v1.x; + grad_face_verts[f][1][1] += dbary_d_v1.y + ddist_d_v1.y; + grad_face_verts[f][1][2] += grad_zbuf_upstream * bary_clip.y; + grad_face_verts[f][2][0] += dbary_d_v2.x + ddist_d_v2.x; + grad_face_verts[f][2][1] += dbary_d_v2.y + ddist_d_v2.y; + grad_face_verts[f][2][2] += grad_zbuf_upstream * bary_clip.z; + } + } + } + } + return grad_face_verts; +} + +torch::Tensor RasterizeMeshesCoarseCpu( + const torch::Tensor& face_verts, + const torch::Tensor& mesh_to_face_first_idx, + const torch::Tensor& num_faces_per_mesh, + const std::tuple image_size, + const float blur_radius, + const int bin_size, + const int max_faces_per_bin) { + if (face_verts.ndimension() != 3 || face_verts.size(1) != 3 || + face_verts.size(2) != 3) { + AT_ERROR("face_verts must have dimensions (num_faces, 3, 3)"); + } + if (num_faces_per_mesh.ndimension() != 1) { + AT_ERROR("num_faces_per_mesh can only have one dimension"); + } + + const int N = num_faces_per_mesh.size(0); // batch size. + const int M = max_faces_per_bin; + + const float H = std::get<0>(image_size); + const float W = std::get<1>(image_size); + + // Integer division round up. + const int BH = 1 + (H - 1) / bin_size; + const int BW = 1 + (W - 1) / bin_size; + + auto opts = num_faces_per_mesh.options().dtype(torch::kInt32); + torch::Tensor faces_per_bin = torch::zeros({N, BH, BW}, opts); + torch::Tensor bin_faces = torch::full({N, BH, BW, M}, -1, opts); + auto bin_faces_a = bin_faces.accessor(); + + // Precompute all face bounding boxes. + auto face_bboxes = ComputeFaceBoundingBoxes(face_verts); + auto face_bboxes_a = face_bboxes.accessor(); + + const float ndc_x_range = NonSquareNdcRange(W, H); + const float pixel_width_x = ndc_x_range / W; + const float bin_width_x = pixel_width_x * bin_size; + + const float ndc_y_range = NonSquareNdcRange(H, W); + const float pixel_width_y = ndc_y_range / H; + const float bin_width_y = pixel_width_y * bin_size; + + // Iterate through the meshes in the batch. + for (int n = 0; n < N; ++n) { + const int face_start_idx = mesh_to_face_first_idx[n].item().to(); + const int face_stop_idx = + (face_start_idx + num_faces_per_mesh[n].item().to()); + + float bin_y_min = -1.0f; + float bin_y_max = bin_y_min + bin_width_y; + + // Iterate through the horizontal bins from top to bottom. + for (int by = 0; by < BH; ++by) { + float bin_x_min = -1.0f; + float bin_x_max = bin_x_min + bin_width_x; + + // Iterate through bins on this horizontal line, left to right. + for (int bx = 0; bx < BW; ++bx) { + int32_t faces_hit = 0; + + for (int32_t f = face_start_idx; f < face_stop_idx; ++f) { + // Get bounding box and expand by blur radius. + float face_x_min = face_bboxes_a[f][0] - std::sqrt(blur_radius); + float face_y_min = face_bboxes_a[f][1] - std::sqrt(blur_radius); + float face_x_max = face_bboxes_a[f][2] + std::sqrt(blur_radius); + float face_y_max = face_bboxes_a[f][3] + std::sqrt(blur_radius); + float face_z_min = face_bboxes_a[f][4]; + + // Faces with at least one vertex behind the camera won't render + // correctly and should be removed or clipped before calling the + // rasterizer + if (face_z_min < kEpsilon) { + continue; + } + + // Use a half-open interval so that faces exactly on the + // boundary between bins will fall into exactly one bin. + bool x_overlap = + (face_x_min <= bin_x_max) && (bin_x_min < face_x_max); + bool y_overlap = + (face_y_min <= bin_y_max) && (bin_y_min < face_y_max); + + if (x_overlap && y_overlap) { + // Got too many faces for this bin, so throw an error. + if (faces_hit >= max_faces_per_bin) { + AT_ERROR("Got too many faces per bin"); + } + // The current point falls in the current bin, so + // record it. + bin_faces_a[n][by][bx][faces_hit] = f; + faces_hit++; + } + } + + // Shift the bin to the right for the next loop iteration + bin_x_min = bin_x_max; + bin_x_max = bin_x_min + bin_width_x; + } + // Shift the bin down for the next loop iteration + bin_y_min = bin_y_max; + bin_y_max = bin_y_min + bin_width_y; + } + } + return bin_faces; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf.h new file mode 100644 index 0000000000000000000000000000000000000000..a369705311080f033803871635e5278fc2911393 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/sample_pdf/sample_pdf.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include +#include +#include +#include +#include "utils/pytorch3d_cutils.h" + +// **************************************************************************** +// * SamplePdf * +// **************************************************************************** + +// Samples a probability density functions defined by bin edges `bins` and +// the non-negative per-bin probabilities `weights`. + +// Args: +// bins: FloatTensor of shape `(batch_size, n_bins+1)` denoting the edges +// of the sampling bins. + +// weights: FloatTensor of shape `(batch_size, n_bins)` containing +// non-negative numbers representing the probability of sampling the +// corresponding bin. + +// uniforms: The quantiles to draw, FloatTensor of shape +// `(batch_size, n_samples)`. + +// outputs: On call, this contains the quantiles to draw. It is overwritten +// with the drawn samples. FloatTensor of shape +// `(batch_size, n_samples), where `n_samples are drawn from each +// distribution. + +// eps: A constant preventing division by zero in case empty bins are +// present. + +// Not differentiable + +#ifdef WITH_CUDA +void SamplePdfCuda( + const torch::Tensor& bins, + const torch::Tensor& weights, + const torch::Tensor& outputs, + float eps); +#endif + +void SamplePdfCpu( + const torch::Tensor& bins, + const torch::Tensor& weights, + const torch::Tensor& outputs, + float eps); + +inline void SamplePdf( + const torch::Tensor& bins, + const torch::Tensor& weights, + const torch::Tensor& outputs, + float eps) { + if (bins.is_cuda()) { +#ifdef WITH_CUDA + CHECK_CUDA(weights); + CHECK_CONTIGUOUS_CUDA(outputs); + torch::autograd::increment_version(outputs); + SamplePdfCuda(bins, weights, outputs, eps); + return; +#else + AT_ERROR("Not compiled with GPU support."); +#endif + } + CHECK_CPU(weights); + CHECK_CPU(outputs); + CHECK_CONTIGUOUS(outputs); + SamplePdfCpu(bins, weights, outputs, eps); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/dispatch.cuh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/dispatch.cuh new file mode 100644 index 0000000000000000000000000000000000000000..83f3d69ff40907c396e3d175402d5cf4561142b5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/dispatch.cuh @@ -0,0 +1,357 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +// This file provides utilities for dispatching to specialized versions of +// functions. This is especially useful for CUDA kernels, since specializing +// them to particular input sizes can often allow the compiler to unroll loops +// and place arrays into registers, which can give huge performance speedups. +// +// As an example, suppose we have the following function which is specialized +// based on a compile-time int64_t value: +// +// template +// struct SquareOffset { +// static void run(T y) { +// T val = x * x + y; +// std::cout << val << std::endl; +// } +// } +// +// This function takes one compile-time argument x, and one run-time argument y. +// We might want to compile specialized versions of this for x=0, x=1, etc and +// then dispatch to the correct one based on the runtime value of x. +// One simple way to achieve this is with a lookup table: +// +// template +// void DispatchSquareOffset(const int64_t x, T y) { +// if (x == 0) { +// SquareOffset::run(y); +// } else if (x == 1) { +// SquareOffset::run(y); +// } else if (x == 2) { +// SquareOffset::run(y); +// } +// } +// +// This function takes both x and y as run-time arguments, and dispatches to +// different specialized versions of SquareOffset based on the run-time value +// of x. This works, but it's tedious and error-prone. If we want to change the +// set of x values for which we provide compile-time specializations, then we +// will need to do a lot of tedius editing of the dispatch function. Also, if we +// want to provide compile-time specializations for another function other than +// SquareOffset, we will need to duplicate the entire lookup table. +// +// To solve these problems, we can use the DispatchKernel1D function provided by +// this file instead: +// +// template +// void DispatchSquareOffset(const int64_t x, T y) { +// constexpr int64_t xmin = 0; +// constexpr int64_t xmax = 2; +// DispatchKernel1D(x, y); +// } +// +// DispatchKernel1D uses template metaprogramming to compile specialized +// versions of SquareOffset for all values of x with xmin <= x <= xmax, and +// then dispatches to the correct one based on the run-time value of x. If we +// want to change the range of x values for which SquareOffset is specialized +// at compile-time, then all we have to do is change the values of the +// compile-time constants xmin and xmax. +// +// This file also allows us to similarly dispatch functions that depend on two +// compile-time int64_t values, using the DispatchKernel2D function like this: +// +// template +// struct Sum { +// static void run(T z, T w) { +// T val = x + y + z + w; +// std::cout << val << std::endl; +// } +// } +// +// template +// void DispatchSum(const int64_t x, const int64_t y, int z, int w) { +// constexpr int64_t xmin = 1; +// constexpr int64_t xmax = 3; +// constexpr int64_t ymin = 2; +// constexpr int64_t ymax = 5; +// DispatchKernel2D(x, y, z, w); +// } +// +// Like its 1D counterpart, DispatchKernel2D uses template metaprogramming to +// compile specialized versions of sum for all values of (x, y) with +// xmin <= x <= xmax and ymin <= y <= ymax, then dispatches to the correct +// specialized version based on the runtime values of x and y. + +// Define some helper structs in an anonymous namespace. +namespace { + +// 1D dispatch: general case. +// Kernel is the function we want to dispatch to; it should take a typename and +// an int64_t as template args, and it should define a static void function +// run which takes any number of arguments of any type. +// In order to dispatch, we will take an additional template argument curN, +// and increment it via template recursion until it is equal to the run-time +// argument N. +template < + template + class Kernel, + typename T, + int64_t minN, + int64_t maxN, + int64_t curN, + typename... Args> +struct DispatchKernelHelper1D { + static void run(const int64_t N, Args... args) { + if (curN == N) { + // The compile-time value curN is equal to the run-time value N, so we + // can dispatch to the run method of the Kernel. + Kernel::run(args...); + } else if (curN < N) { + // Increment curN via template recursion + DispatchKernelHelper1D::run( + N, args...); + } + // We shouldn't get here -- throw an error? + } +}; + +// 1D dispatch: Specialization when curN == maxN +// We need this base case to avoid infinite template recursion. +template < + template + class Kernel, + typename T, + int64_t minN, + int64_t maxN, + typename... Args> +struct DispatchKernelHelper1D { + static void run(const int64_t N, Args... args) { + if (N == maxN) { + Kernel::run(args...); + } + // We shouldn't get here -- throw an error? + } +}; + +// 2D dispatch, general case. +// This is similar to the 1D case: we take additional template args curN and +// curM, and increment them via template recursion until they are equal to +// the run-time values of N and M, at which point we dispatch to the run +// method of the kernel. +template < + template + class Kernel, + typename T, + int64_t minN, + int64_t maxN, + int64_t curN, + int64_t minM, + int64_t maxM, + int64_t curM, + typename... Args> +struct DispatchKernelHelper2D { + static void run(const int64_t N, const int64_t M, Args... args) { + if (curN == N && curM == M) { + Kernel::run(args...); + } else if (curN < N && curM < M) { + // Increment both curN and curM. This isn't strictly necessary; we could + // just increment one or the other at each step. But this helps to cut + // on the number of recursive calls we make. + DispatchKernelHelper2D< + Kernel, + T, + minN, + maxN, + curN + 1, + minM, + maxM, + curM + 1, + Args...>::run(N, M, args...); + } else if (curN < N) { + // Increment curN only + DispatchKernelHelper2D< + Kernel, + T, + minN, + maxN, + curN + 1, + minM, + maxM, + curM, + Args...>::run(N, M, args...); + } else if (curM < M) { + // Increment curM only + DispatchKernelHelper2D< + Kernel, + T, + minN, + maxN, + curN, + minM, + maxM, + curM + 1, + Args...>::run(N, M, args...); + } + } +}; + +// 2D dispatch, specialization for curN == maxN +template < + template + class Kernel, + typename T, + int64_t minN, + int64_t maxN, + int64_t minM, + int64_t maxM, + int64_t curM, + typename... Args> +struct DispatchKernelHelper2D< + Kernel, + T, + minN, + maxN, + maxN, + minM, + maxM, + curM, + Args...> { + static void run(const int64_t N, const int64_t M, Args... args) { + if (maxN == N && curM == M) { + Kernel::run(args...); + } else if (curM < maxM) { + DispatchKernelHelper2D< + Kernel, + T, + minN, + maxN, + maxN, + minM, + maxM, + curM + 1, + Args...>::run(N, M, args...); + } + // We should not get here -- throw an error? + } +}; + +// 2D dispatch, specialization for curM == maxM +template < + template + class Kernel, + typename T, + int64_t minN, + int64_t maxN, + int64_t curN, + int64_t minM, + int64_t maxM, + typename... Args> +struct DispatchKernelHelper2D< + Kernel, + T, + minN, + maxN, + curN, + minM, + maxM, + maxM, + Args...> { + static void run(const int64_t N, const int64_t M, Args... args) { + if (curN == N && maxM == M) { + Kernel::run(args...); + } else if (curN < maxN) { + DispatchKernelHelper2D< + Kernel, + T, + minN, + maxN, + curN + 1, + minM, + maxM, + maxM, + Args...>::run(N, M, args...); + } + // We should not get here -- throw an error? + } +}; + +// 2D dispatch, specialization for curN == maxN, curM == maxM +template < + template + class Kernel, + typename T, + int64_t minN, + int64_t maxN, + int64_t minM, + int64_t maxM, + typename... Args> +struct DispatchKernelHelper2D< + Kernel, + T, + minN, + maxN, + maxN, + minM, + maxM, + maxM, + Args...> { + static void run(const int64_t N, const int64_t M, Args... args) { + if (maxN == N && maxM == M) { + Kernel::run(args...); + } + // We should not get here -- throw an error? + } +}; + +} // namespace + +// This is the function we expect users to call to dispatch to 1D functions +template < + template + class Kernel, + typename T, + int64_t minN, + int64_t maxN, + typename... Args> +void DispatchKernel1D(const int64_t N, Args... args) { + if (minN <= N && N <= maxN) { + // Kick off the template recursion by calling the Helper with curN = minN + DispatchKernelHelper1D::run( + N, args...); + } + // Maybe throw an error if we tried to dispatch outside the allowed range? +} + +// This is the function we expect users to call to dispatch to 2D functions +template < + template + class Kernel, + typename T, + int64_t minN, + int64_t maxN, + int64_t minM, + int64_t maxM, + typename... Args> +void DispatchKernel2D(const int64_t N, const int64_t M, Args... args) { + if (minN <= N && N <= maxN && minM <= M && M <= maxM) { + // Kick off the template recursion by calling the Helper with curN = minN + // and curM = minM + DispatchKernelHelper2D< + Kernel, + T, + minN, + maxN, + minN, + minM, + maxM, + minM, + Args...>::run(N, M, args...); + } + // Maybe throw an error if we tried to dispatch outside the specified range? +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/float_math.cuh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/float_math.cuh new file mode 100644 index 0000000000000000000000000000000000000000..2a0e3e3856895f1c14061d270d1d4fb777a427f8 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/float_math.cuh @@ -0,0 +1,156 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include + +// Set epsilon +#ifdef _MSC_VER +#define vEpsilon 1e-8f +#else +const auto vEpsilon = 1e-8; +#endif + +// Common functions and operators for float2. + +// Complex arithmetic is already defined for AMD. +#if !defined(USE_ROCM) +__device__ inline float2 operator-(const float2& a, const float2& b) { + return make_float2(a.x - b.x, a.y - b.y); +} + +__device__ inline float2 operator+(const float2& a, const float2& b) { + return make_float2(a.x + b.x, a.y + b.y); +} + +__device__ inline float2 operator/(const float2& a, const float2& b) { + return make_float2(a.x / b.x, a.y / b.y); +} + +__device__ inline float2 operator/(const float2& a, const float b) { + return make_float2(a.x / b, a.y / b); +} + +__device__ inline float2 operator*(const float2& a, const float2& b) { + return make_float2(a.x * b.x, a.y * b.y); +} + +__device__ inline float2 operator*(const float a, const float2& b) { + return make_float2(a * b.x, a * b.y); +} +#endif + +__device__ inline float FloatMin3(const float a, const float b, const float c) { + return fminf(a, fminf(b, c)); +} + +__device__ inline float FloatMax3(const float a, const float b, const float c) { + return fmaxf(a, fmaxf(b, c)); +} + +__device__ inline float dot(const float2& a, const float2& b) { + return a.x * b.x + a.y * b.y; +} + +// Backward pass for the dot product. +// Args: +// a, b: Coordinates of two points. +// grad_dot: Upstream gradient for the output. +// +// Returns: +// tuple of gradients for each of the input points: +// (float2 grad_a, float2 grad_b) +// +__device__ inline thrust::tuple +DotBackward(const float2& a, const float2& b, const float& grad_dot) { + return thrust::make_tuple(grad_dot * b, grad_dot * a); +} + +__device__ inline float sum(const float2& a) { + return a.x + a.y; +} + +// Common functions and operators for float3. + +__device__ inline float3 operator-(const float3& a, const float3& b) { + return make_float3(a.x - b.x, a.y - b.y, a.z - b.z); +} + +__device__ inline float3 operator+(const float3& a, const float3& b) { + return make_float3(a.x + b.x, a.y + b.y, a.z + b.z); +} + +__device__ inline float3 operator/(const float3& a, const float3& b) { + return make_float3(a.x / b.x, a.y / b.y, a.z / b.z); +} + +__device__ inline float3 operator/(const float3& a, const float b) { + return make_float3(a.x / b, a.y / b, a.z / b); +} + +__device__ inline float3 operator*(const float3& a, const float3& b) { + return make_float3(a.x * b.x, a.y * b.y, a.z * b.z); +} + +__device__ inline float3 operator*(const float a, const float3& b) { + return make_float3(a * b.x, a * b.y, a * b.z); +} + +__device__ inline float dot(const float3& a, const float3& b) { + return a.x * b.x + a.y * b.y + a.z * b.z; +} + +__device__ inline float sum(const float3& a) { + return a.x + a.y + a.z; +} + +__device__ inline float3 cross(const float3& a, const float3& b) { + return make_float3( + a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); +} + +__device__ inline thrust::tuple +cross_backward(const float3& a, const float3& b, const float3& grad_cross) { + const float grad_ax = -grad_cross.y * b.z + grad_cross.z * b.y; + const float grad_ay = grad_cross.x * b.z - grad_cross.z * b.x; + const float grad_az = -grad_cross.x * b.y + grad_cross.y * b.x; + const float3 grad_a = make_float3(grad_ax, grad_ay, grad_az); + + const float grad_bx = grad_cross.y * a.z - grad_cross.z * a.y; + const float grad_by = -grad_cross.x * a.z + grad_cross.z * a.x; + const float grad_bz = grad_cross.x * a.y - grad_cross.y * a.x; + const float3 grad_b = make_float3(grad_bx, grad_by, grad_bz); + + return thrust::make_tuple(grad_a, grad_b); +} + +__device__ inline float norm(const float3& a) { + return sqrt(dot(a, a)); +} + +__device__ inline float3 normalize(const float3& a) { + return a / (norm(a) + vEpsilon); +} + +__device__ inline float3 normalize_backward( + const float3& a, + const float3& grad_normz) { + const float a_norm = norm(a) + vEpsilon; + const float3 out = a / a_norm; + + const float grad_ax = grad_normz.x * (1.0f - out.x * out.x) / a_norm + + grad_normz.y * (-out.x * out.y) / a_norm + + grad_normz.z * (-out.x * out.z) / a_norm; + const float grad_ay = grad_normz.x * (-out.x * out.y) / a_norm + + grad_normz.y * (1.0f - out.y * out.y) / a_norm + + grad_normz.z * (-out.y * out.z) / a_norm; + const float grad_az = grad_normz.x * (-out.x * out.z) / a_norm + + grad_normz.y * (-out.y * out.z) / a_norm + + grad_normz.z * (1.0f - out.z * out.z) / a_norm; + return make_float3(grad_ax, grad_ay, grad_az); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/geometry_utils.cuh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/geometry_utils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..b18bb5420104674f6b3f1f5e058e2d8e978f7dda --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/geometry_utils.cuh @@ -0,0 +1,790 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include "float_math.cuh" + +// Set epsilon for preventing floating point errors and division by 0. +#ifdef _MSC_VER +#define kEpsilon 1e-8f +#else +const auto kEpsilon = 1e-8; +#endif + +// ************************************************************* // +// vec2 utils // +// ************************************************************* // + +// Determines whether a point p is on the right side of a 2D line segment +// given by the end points v0, v1. +// +// Args: +// p: vec2 Coordinates of a point. +// v0, v1: vec2 Coordinates of the end points of the edge. +// +// Returns: +// area: The signed area of the parallelogram given by the vectors +// A = p - v0 +// B = v1 - v0 +// +__device__ inline float +EdgeFunctionForward(const float2& p, const float2& v0, const float2& v1) { + return (p.x - v0.x) * (v1.y - v0.y) - (p.y - v0.y) * (v1.x - v0.x); +} + +// Backward pass for the edge function returning partial dervivatives for each +// of the input points. +// +// Args: +// p: vec2 Coordinates of a point. +// v0, v1: vec2 Coordinates of the end points of the edge. +// grad_edge: Upstream gradient for output from edge function. +// +// Returns: +// tuple of gradients for each of the input points: +// (float2 d_edge_dp, float2 d_edge_dv0, float2 d_edge_dv1) +// +__device__ inline thrust::tuple EdgeFunctionBackward( + const float2& p, + const float2& v0, + const float2& v1, + const float& grad_edge) { + const float2 dedge_dp = make_float2(v1.y - v0.y, v0.x - v1.x); + const float2 dedge_dv0 = make_float2(p.y - v1.y, v1.x - p.x); + const float2 dedge_dv1 = make_float2(v0.y - p.y, p.x - v0.x); + return thrust::make_tuple( + grad_edge * dedge_dp, grad_edge * dedge_dv0, grad_edge * dedge_dv1); +} + +// The forward pass for computing the barycentric coordinates of a point +// relative to a triangle. +// +// Args: +// p: Coordinates of a point. +// v0, v1, v2: Coordinates of the triangle vertices. +// +// Returns +// bary: (w0, w1, w2) barycentric coordinates in the range [0, 1]. +// +__device__ inline float3 BarycentricCoordsForward( + const float2& p, + const float2& v0, + const float2& v1, + const float2& v2) { + const float area = EdgeFunctionForward(v2, v0, v1) + kEpsilon; + const float w0 = EdgeFunctionForward(p, v1, v2) / area; + const float w1 = EdgeFunctionForward(p, v2, v0) / area; + const float w2 = EdgeFunctionForward(p, v0, v1) / area; + return make_float3(w0, w1, w2); +} + +// The backward pass for computing the barycentric coordinates of a point +// relative to a triangle. +// +// Args: +// p: Coordinates of a point. +// v0, v1, v2: (x, y) coordinates of the triangle vertices. +// grad_bary_upstream: vec3 Upstream gradient for each of the +// barycentric coordaintes [grad_w0, grad_w1, grad_w2]. +// +// Returns +// tuple of gradients for each of the triangle vertices: +// (float2 grad_v0, float2 grad_v1, float2 grad_v2) +// +__device__ inline thrust::tuple +BarycentricCoordsBackward( + const float2& p, + const float2& v0, + const float2& v1, + const float2& v2, + const float3& grad_bary_upstream) { + const float area = EdgeFunctionForward(v2, v0, v1) + kEpsilon; + const float area2 = pow(area, 2.0f); + const float e0 = EdgeFunctionForward(p, v1, v2); + const float e1 = EdgeFunctionForward(p, v2, v0); + const float e2 = EdgeFunctionForward(p, v0, v1); + + const float grad_w0 = grad_bary_upstream.x; + const float grad_w1 = grad_bary_upstream.y; + const float grad_w2 = grad_bary_upstream.z; + + // Calculate component of the gradient from each of w0, w1 and w2. + // e.g. for w0: + // dloss/dw0_v = dl/dw0 * dw0/dw0_top * dw0_top/dv + // + dl/dw0 * dw0/dw0_bot * dw0_bot/dv + const float dw0_darea = -e0 / (area2); + const float dw0_e0 = 1 / area; + const float dloss_d_w0area = grad_w0 * dw0_darea; + const float dloss_e0 = grad_w0 * dw0_e0; + auto de0_dv = EdgeFunctionBackward(p, v1, v2, dloss_e0); + auto dw0area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w0area); + const float2 dw0_p = thrust::get<0>(de0_dv); + const float2 dw0_dv0 = thrust::get<1>(dw0area_dv); + const float2 dw0_dv1 = thrust::get<1>(de0_dv) + thrust::get<2>(dw0area_dv); + const float2 dw0_dv2 = thrust::get<2>(de0_dv) + thrust::get<0>(dw0area_dv); + + const float dw1_darea = -e1 / (area2); + const float dw1_e1 = 1 / area; + const float dloss_d_w1area = grad_w1 * dw1_darea; + const float dloss_e1 = grad_w1 * dw1_e1; + auto de1_dv = EdgeFunctionBackward(p, v2, v0, dloss_e1); + auto dw1area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w1area); + const float2 dw1_p = thrust::get<0>(de1_dv); + const float2 dw1_dv0 = thrust::get<2>(de1_dv) + thrust::get<1>(dw1area_dv); + const float2 dw1_dv1 = thrust::get<2>(dw1area_dv); + const float2 dw1_dv2 = thrust::get<1>(de1_dv) + thrust::get<0>(dw1area_dv); + + const float dw2_darea = -e2 / (area2); + const float dw2_e2 = 1 / area; + const float dloss_d_w2area = grad_w2 * dw2_darea; + const float dloss_e2 = grad_w2 * dw2_e2; + auto de2_dv = EdgeFunctionBackward(p, v0, v1, dloss_e2); + auto dw2area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w2area); + const float2 dw2_p = thrust::get<0>(de2_dv); + const float2 dw2_dv0 = thrust::get<1>(de2_dv) + thrust::get<1>(dw2area_dv); + const float2 dw2_dv1 = thrust::get<2>(de2_dv) + thrust::get<2>(dw2area_dv); + const float2 dw2_dv2 = thrust::get<0>(dw2area_dv); + + const float2 dbary_p = dw0_p + dw1_p + dw2_p; + const float2 dbary_dv0 = dw0_dv0 + dw1_dv0 + dw2_dv0; + const float2 dbary_dv1 = dw0_dv1 + dw1_dv1 + dw2_dv1; + const float2 dbary_dv2 = dw0_dv2 + dw1_dv2 + dw2_dv2; + + return thrust::make_tuple(dbary_p, dbary_dv0, dbary_dv1, dbary_dv2); +} + +// Forward pass for applying perspective correction to barycentric coordinates. +// +// Args: +// bary: Screen-space barycentric coordinates for a point +// z0, z1, z2: Camera-space z-coordinates of the triangle vertices +// +// Returns +// World-space barycentric coordinates +// +__device__ inline float3 BarycentricPerspectiveCorrectionForward( + const float3& bary, + const float z0, + const float z1, + const float z2) { + const float w0_top = bary.x * z1 * z2; + const float w1_top = z0 * bary.y * z2; + const float w2_top = z0 * z1 * bary.z; + const float denom = fmaxf(w0_top + w1_top + w2_top, kEpsilon); + const float w0 = w0_top / denom; + const float w1 = w1_top / denom; + const float w2 = w2_top / denom; + return make_float3(w0, w1, w2); +} + +// Backward pass for applying perspective correction to barycentric coordinates. +// +// Args: +// bary: Screen-space barycentric coordinates for a point +// z0, z1, z2: Camera-space z-coordinates of the triangle vertices +// grad_out: Upstream gradient of the loss with respect to the corrected +// barycentric coordinates. +// +// Returns a tuple of: +// grad_bary: Downstream gradient of the loss with respect to the the +// uncorrected barycentric coordinates. +// grad_z0, grad_z1, grad_z2: Downstream gradient of the loss with respect +// to the z-coordinates of the triangle verts +__device__ inline thrust::tuple +BarycentricPerspectiveCorrectionBackward( + const float3& bary, + const float z0, + const float z1, + const float z2, + const float3& grad_out) { + // Recompute forward pass + const float w0_top = bary.x * z1 * z2; + const float w1_top = z0 * bary.y * z2; + const float w2_top = z0 * z1 * bary.z; + const float denom = fmaxf(w0_top + w1_top + w2_top, kEpsilon); + + // Now do backward pass + const float grad_denom_top = + -w0_top * grad_out.x - w1_top * grad_out.y - w2_top * grad_out.z; + const float grad_denom = grad_denom_top / (denom * denom); + const float grad_w0_top = grad_denom + grad_out.x / denom; + const float grad_w1_top = grad_denom + grad_out.y / denom; + const float grad_w2_top = grad_denom + grad_out.z / denom; + const float grad_bary_x = grad_w0_top * z1 * z2; + const float grad_bary_y = grad_w1_top * z0 * z2; + const float grad_bary_z = grad_w2_top * z0 * z1; + const float3 grad_bary = make_float3(grad_bary_x, grad_bary_y, grad_bary_z); + const float grad_z0 = grad_w1_top * bary.y * z2 + grad_w2_top * bary.z * z1; + const float grad_z1 = grad_w0_top * bary.x * z2 + grad_w2_top * bary.z * z0; + const float grad_z2 = grad_w0_top * bary.x * z1 + grad_w1_top * bary.y * z0; + return thrust::make_tuple(grad_bary, grad_z0, grad_z1, grad_z2); +} + +// Clip negative barycentric coordinates to 0.0 and renormalize so +// the barycentric coordinates for a point sum to 1. When the blur_radius +// is greater than 0, a face will still be recorded as overlapping a pixel +// if the pixel is outside the face. In this case at least one of the +// barycentric coordinates for the pixel relative to the face will be negative. +// Clipping will ensure that the texture and z buffer are interpolated +// correctly. +// +// Args +// bary: (w0, w1, w2) barycentric coordinates which can be outside the +// range [0, 1]. +// +// Returns +// bary: (w0, w1, w2) barycentric coordinates in the range [0, 1] which +// satisfy the condition: sum(w0, w1, w2) = 1.0. +// +__device__ inline float3 BarycentricClipForward(const float3 bary) { + float3 w = make_float3(0.0f, 0.0f, 0.0f); + // Clamp lower bound only + w.x = max(bary.x, 0.0); + w.y = max(bary.y, 0.0); + w.z = max(bary.z, 0.0); + float w_sum = w.x + w.y + w.z; + w_sum = fmaxf(w_sum, 1e-5); + w.x /= w_sum; + w.y /= w_sum; + w.z /= w_sum; + + return w; +} + +// Backward pass for barycentric coordinate clipping. +// +// Args +// bary: (w0, w1, w2) barycentric coordinates which can be outside the +// range [0, 1]. +// grad_baryclip_upstream: vec3 Upstream gradient for each of the clipped +// barycentric coordinates [grad_w0, grad_w1, grad_w2]. +// +// Returns +// vec3 of gradients for the unclipped barycentric coordinates: +// (grad_w0, grad_w1, grad_w2) +// +__device__ inline float3 BarycentricClipBackward( + const float3 bary, + const float3 grad_baryclip_upstream) { + // Redo some of the forward pass calculations + float3 w = make_float3(0.0f, 0.0f, 0.0f); + // Clamp lower bound only + w.x = max(bary.x, 0.0); + w.y = max(bary.y, 0.0); + w.z = max(bary.z, 0.0); + float w_sum = w.x + w.y + w.z; + + float3 grad_bary = make_float3(1.0f, 1.0f, 1.0f); + float3 grad_clip = make_float3(1.0f, 1.0f, 1.0f); + float3 grad_sum = make_float3(1.0f, 1.0f, 1.0f); + + // Check if sum was clipped. + float grad_sum_clip = 1.0f; + if (w_sum < 1e-5) { + grad_sum_clip = 0.0f; + w_sum = 1e-5; + } + + // Check if any of bary values have been clipped. + if (bary.x < 0.0f) { + grad_clip.x = 0.0f; + } + if (bary.y < 0.0f) { + grad_clip.y = 0.0f; + } + if (bary.z < 0.0f) { + grad_clip.z = 0.0f; + } + + // Gradients of the sum. + grad_sum.x = -w.x / (pow(w_sum, 2.0f)) * grad_sum_clip; + grad_sum.y = -w.y / (pow(w_sum, 2.0f)) * grad_sum_clip; + grad_sum.z = -w.z / (pow(w_sum, 2.0f)) * grad_sum_clip; + + // Gradients for each of the bary coordinates including the cross terms + // from the sum. + grad_bary.x = grad_clip.x * + (grad_baryclip_upstream.x * (1.0f / w_sum + grad_sum.x) + + grad_baryclip_upstream.y * (grad_sum.y) + + grad_baryclip_upstream.z * (grad_sum.z)); + + grad_bary.y = grad_clip.y * + (grad_baryclip_upstream.y * (1.0f / w_sum + grad_sum.y) + + grad_baryclip_upstream.x * (grad_sum.x) + + grad_baryclip_upstream.z * (grad_sum.z)); + + grad_bary.z = grad_clip.z * + (grad_baryclip_upstream.z * (1.0f / w_sum + grad_sum.z) + + grad_baryclip_upstream.x * (grad_sum.x) + + grad_baryclip_upstream.y * (grad_sum.y)); + + return grad_bary; +} + +// Return minimum distance between line segment (v1 - v0) and point p. +// +// Args: +// p: Coordinates of a point. +// v0, v1: Coordinates of the end points of the line segment. +// +// Returns: +// squared distance to the boundary of the triangle. +// +__device__ inline float +PointLineDistanceForward(const float2& p, const float2& a, const float2& b) { + const float2 ba = b - a; + float l2 = dot(ba, ba); + float t = dot(ba, p - a) / l2; + if (l2 <= kEpsilon) { + return dot(p - b, p - b); + } + t = __saturatef(t); // clamp to the interval [+0.0, 1.0] + const float2 p_proj = a + t * ba; + const float2 d = (p_proj - p); + return dot(d, d); // squared distance +} + +// Backward pass for point to line distance in 2D. +// +// Args: +// p: Coordinates of a point. +// v0, v1: Coordinates of the end points of the line segment. +// grad_dist: Upstream gradient for the distance. +// +// Returns: +// tuple of gradients for each of the input points: +// (float2 grad_p, float2 grad_v0, float2 grad_v1) +// +__device__ inline thrust::tuple +PointLineDistanceBackward( + const float2& p, + const float2& v0, + const float2& v1, + const float& grad_dist) { + // Redo some of the forward pass calculations. + const float2 v1v0 = v1 - v0; + const float2 pv0 = p - v0; + const float t_bot = dot(v1v0, v1v0); + const float t_top = dot(v1v0, pv0); + float tt = t_top / t_bot; + tt = __saturatef(tt); + const float2 p_proj = (1.0f - tt) * v0 + tt * v1; + + const float2 grad_p = -1.0f * grad_dist * 2.0f * (p_proj - p); + const float2 grad_v0 = grad_dist * (1.0f - tt) * 2.0f * (p_proj - p); + const float2 grad_v1 = grad_dist * tt * 2.0f * (p_proj - p); + + return thrust::make_tuple(grad_p, grad_v0, grad_v1); +} + +// The forward pass for calculating the shortest distance between a point +// and a triangle. +// +// Args: +// p: Coordinates of a point. +// v0, v1, v2: Coordinates of the three triangle vertices. +// +// Returns: +// shortest squared distance from a point to a triangle. +// +__device__ inline float PointTriangleDistanceForward( + const float2& p, + const float2& v0, + const float2& v1, + const float2& v2) { + // Compute distance to all 3 edges of the triangle and return the min. + const float e01_dist = PointLineDistanceForward(p, v0, v1); + const float e02_dist = PointLineDistanceForward(p, v0, v2); + const float e12_dist = PointLineDistanceForward(p, v1, v2); + const float edge_dist = fminf(fminf(e01_dist, e02_dist), e12_dist); + return edge_dist; +} + +// Backward pass for point triangle distance. +// +// Args: +// p: Coordinates of a point. +// v0, v1, v2: Coordinates of the three triangle vertices. +// grad_dist: Upstream gradient for the distance. +// +// Returns: +// tuple of gradients for each of the triangle vertices: +// (float2 grad_v0, float2 grad_v1, float2 grad_v2) +// +__device__ inline thrust::tuple +PointTriangleDistanceBackward( + const float2& p, + const float2& v0, + const float2& v1, + const float2& v2, + const float& grad_dist) { + // Compute distance to all 3 edges of the triangle. + const float e01_dist = PointLineDistanceForward(p, v0, v1); + const float e02_dist = PointLineDistanceForward(p, v0, v2); + const float e12_dist = PointLineDistanceForward(p, v1, v2); + + // Initialize output tensors. + float2 grad_v0 = make_float2(0.0f, 0.0f); + float2 grad_v1 = make_float2(0.0f, 0.0f); + float2 grad_v2 = make_float2(0.0f, 0.0f); + float2 grad_p = make_float2(0.0f, 0.0f); + + // Find which edge is the closest and return PointLineDistanceBackward for + // that edge. + if (e01_dist <= e02_dist && e01_dist <= e12_dist) { + // Closest edge is v1 - v0. + auto grad_e01 = PointLineDistanceBackward(p, v0, v1, grad_dist); + grad_p = thrust::get<0>(grad_e01); + grad_v0 = thrust::get<1>(grad_e01); + grad_v1 = thrust::get<2>(grad_e01); + } else if (e02_dist <= e01_dist && e02_dist <= e12_dist) { + // Closest edge is v2 - v0. + auto grad_e02 = PointLineDistanceBackward(p, v0, v2, grad_dist); + grad_p = thrust::get<0>(grad_e02); + grad_v0 = thrust::get<1>(grad_e02); + grad_v2 = thrust::get<2>(grad_e02); + } else if (e12_dist <= e01_dist && e12_dist <= e02_dist) { + // Closest edge is v2 - v1. + auto grad_e12 = PointLineDistanceBackward(p, v1, v2, grad_dist); + grad_p = thrust::get<0>(grad_e12); + grad_v1 = thrust::get<1>(grad_e12); + grad_v2 = thrust::get<2>(grad_e12); + } + + return thrust::make_tuple(grad_p, grad_v0, grad_v1, grad_v2); +} + +// ************************************************************* // +// vec3 utils // +// ************************************************************* // + +// Computes the area of a triangle (v0, v1, v2). +// +// Args: +// v0, v1, v2: vec3 coordinates of the triangle vertices +// +// Returns +// area: float: The area of the triangle +// +__device__ inline float +AreaOfTriangle(const float3& v0, const float3& v1, const float3& v2) { + float3 p0 = v1 - v0; + float3 p1 = v2 - v0; + + // compute the hypotenus of the scross product (p0 x p1) + float dd = hypot( + p0.y * p1.z - p0.z * p1.y, + hypot(p0.z * p1.x - p0.x * p1.z, p0.x * p1.y - p0.y * p1.x)); + + return dd / 2.0; +} + +// Computes the barycentric coordinates of a point p relative +// to a triangle (v0, v1, v2), i.e. p = w0 * v0 + w1 * v1 + w2 * v2 +// s.t. w0 + w1 + w2 = 1.0 +// +// NOTE that this function assumes that p lives on the space spanned +// by (v0, v1, v2). +// TODO(gkioxari) explicitly check whether p is coplanar with (v0, v1, v2) +// and throw an error if check fails +// +// Args: +// p: vec3 coordinates of a point +// v0, v1, v2: vec3 coordinates of the triangle vertices +// +// Returns +// bary: (w0, w1, w2) barycentric coordinates +// +__device__ inline float3 BarycentricCoords3Forward( + const float3& p, + const float3& v0, + const float3& v1, + const float3& v2) { + float3 p0 = v1 - v0; + float3 p1 = v2 - v0; + float3 p2 = p - v0; + + const float d00 = dot(p0, p0); + const float d01 = dot(p0, p1); + const float d11 = dot(p1, p1); + const float d20 = dot(p2, p0); + const float d21 = dot(p2, p1); + + const float denom = d00 * d11 - d01 * d01 + kEpsilon; + const float w1 = (d11 * d20 - d01 * d21) / denom; + const float w2 = (d00 * d21 - d01 * d20) / denom; + const float w0 = 1.0f - w1 - w2; + + return make_float3(w0, w1, w2); +} + +// Checks whether the point p is inside the triangle (v0, v1, v2). +// A point is inside the triangle, if all barycentric coordinates +// wrt the triangle are >= 0 & <= 1. +// If the triangle is degenerate, aka line or point, then return False. +// +// NOTE that this function assumes that p lives on the space spanned +// by (v0, v1, v2). +// TODO(gkioxari) explicitly check whether p is coplanar with (v0, v1, v2) +// and throw an error if check fails +// +// Args: +// p: vec3 coordinates of a point +// v0, v1, v2: vec3 coordinates of the triangle vertices +// min_triangle_area: triangles less than this size are considered +// points/lines, IsInsideTriangle returns False +// +// Returns: +// inside: bool indicating wether p is inside triangle +// +__device__ inline bool IsInsideTriangle( + const float3& p, + const float3& v0, + const float3& v1, + const float3& v2, + const double min_triangle_area) { + bool inside; + if (AreaOfTriangle(v0, v1, v2) < min_triangle_area) { + inside = 0; + } else { + float3 bary = BarycentricCoords3Forward(p, v0, v1, v2); + bool x_in = 0.0f <= bary.x && bary.x <= 1.0f; + bool y_in = 0.0f <= bary.y && bary.y <= 1.0f; + bool z_in = 0.0f <= bary.z && bary.z <= 1.0f; + inside = x_in && y_in && z_in; + } + return inside; +} + +// Computes the minimum squared Euclidean distance between the point p +// and the segment spanned by (v0, v1). +// To find this we parametrize p as: x(t) = v0 + t * (v1 - v0) +// and find t which minimizes (x(t) - p) ^ 2. +// Note that p does not need to live in the space spanned by (v0, v1) +// +// Args: +// p: vec3 coordinates of a point +// v0, v1: vec3 coordinates of start and end of segment +// +// Returns: +// dist: the minimum squared distance of p from segment (v0, v1) +// + +__device__ inline float +PointLine3DistanceForward(const float3& p, const float3& v0, const float3& v1) { + const float3 v1v0 = v1 - v0; + const float3 pv0 = p - v0; + const float t_bot = dot(v1v0, v1v0); + const float t_top = dot(pv0, v1v0); + // if t_bot small, then v0 == v1, set tt to 0. + float tt = (t_bot < kEpsilon) ? 0.0f : (t_top / t_bot); + + tt = __saturatef(tt); // clamps to [0, 1] + + const float3 p_proj = v0 + tt * v1v0; + const float3 diff = p - p_proj; + const float dist = dot(diff, diff); + return dist; +} + +// Backward function of the minimum squared Euclidean distance between the point +// p and the line segment (v0, v1). +// +// Args: +// p: vec3 coordinates of a point +// v0, v1: vec3 coordinates of start and end of segment +// grad_dist: Float of the gradient wrt dist +// +// Returns: +// tuple of gradients for the point and line segment (v0, v1): +// (float3 grad_p, float3 grad_v0, float3 grad_v1) + +__device__ inline thrust::tuple +PointLine3DistanceBackward( + const float3& p, + const float3& v0, + const float3& v1, + const float& grad_dist) { + const float3 v1v0 = v1 - v0; + const float3 pv0 = p - v0; + const float t_bot = dot(v1v0, v1v0); + const float t_top = dot(v1v0, pv0); + + float3 grad_p = make_float3(0.0f, 0.0f, 0.0f); + float3 grad_v0 = make_float3(0.0f, 0.0f, 0.0f); + float3 grad_v1 = make_float3(0.0f, 0.0f, 0.0f); + + const float tt = t_top / t_bot; + + if (t_bot < kEpsilon) { + // if t_bot small, then v0 == v1, + // and dist = 0.5 * dot(pv0, pv0) + 0.5 * dot(pv1, pv1) + grad_p = grad_dist * 2.0f * pv0; + grad_v0 = -0.5f * grad_p; + grad_v1 = grad_v0; + } else if (tt < 0.0f) { + grad_p = grad_dist * 2.0f * pv0; + grad_v0 = -1.0f * grad_p; + // no gradients wrt v1 + } else if (tt > 1.0f) { + grad_p = grad_dist * 2.0f * (p - v1); + grad_v1 = -1.0f * grad_p; + // no gradients wrt v0 + } else { + const float3 p_proj = v0 + tt * v1v0; + const float3 diff = p - p_proj; + const float3 grad_base = grad_dist * 2.0f * diff; + grad_p = grad_base - dot(grad_base, v1v0) * v1v0 / t_bot; + const float3 dtt_v0 = (-1.0f * v1v0 - pv0 + 2.0f * tt * v1v0) / t_bot; + grad_v0 = (-1.0f + tt) * grad_base - dot(grad_base, v1v0) * dtt_v0; + const float3 dtt_v1 = (pv0 - 2.0f * tt * v1v0) / t_bot; + grad_v1 = -dot(grad_base, v1v0) * dtt_v1 - tt * grad_base; + } + + return thrust::make_tuple(grad_p, grad_v0, grad_v1); +} + +// Computes the squared distance of a point p relative to a triangle (v0, v1, +// v2). If the point's projection p0 on the plane spanned by (v0, v1, v2) is +// inside the triangle with vertices (v0, v1, v2), then the returned value is +// the squared distance of p to its projection p0. Otherwise, the returned value +// is the smallest squared distance of p from the line segments (v0, v1), (v0, +// v2) and (v1, v2). +// +// Args: +// p: vec3 coordinates of a point +// v0, v1, v2: vec3 coordinates of the triangle vertices +// min_triangle_area: triangles less than this size are considered +// points/lines, IsInsideTriangle returns False +// +// Returns: +// dist: Float of the squared distance +// + +__device__ inline float PointTriangle3DistanceForward( + const float3& p, + const float3& v0, + const float3& v1, + const float3& v2, + const double min_triangle_area) { + float3 normal = cross(v2 - v0, v1 - v0); + const float norm_normal = norm(normal); + normal = normalize(normal); + + // p0 is the projection of p on the plane spanned by (v0, v1, v2) + // i.e. p0 = p + t * normal, s.t. (p0 - v0) is orthogonal to normal + const float t = dot(v0 - p, normal); + const float3 p0 = p + t * normal; + + bool is_inside = IsInsideTriangle(p0, v0, v1, v2, min_triangle_area); + float dist = 0.0f; + + if ((is_inside) && (norm_normal > kEpsilon)) { + // if projection p0 is inside triangle spanned by (v0, v1, v2) + // then distance is equal to norm(p0 - p)^2 + dist = t * t; + } else { + const float e01 = PointLine3DistanceForward(p, v0, v1); + const float e02 = PointLine3DistanceForward(p, v0, v2); + const float e12 = PointLine3DistanceForward(p, v1, v2); + + dist = (e01 > e02) ? e02 : e01; + dist = (dist > e12) ? e12 : dist; + } + + return dist; +} + +// The backward pass for computing the squared distance of a point +// to the triangle (v0, v1, v2). +// +// Args: +// p: xyz coordinates of a point +// v0, v1, v2: xyz coordinates of the triangle vertices +// grad_dist: Float of the gradient wrt dist +// min_triangle_area: triangles less than this size are considered +// points/lines, IsInsideTriangle returns False +// +// Returns: +// tuple of gradients for the point and triangle: +// (float3 grad_p, float3 grad_v0, float3 grad_v1, float3 grad_v2) +// + +__device__ inline thrust::tuple +PointTriangle3DistanceBackward( + const float3& p, + const float3& v0, + const float3& v1, + const float3& v2, + const float& grad_dist, + const double min_triangle_area) { + const float3 v2v0 = v2 - v0; + const float3 v1v0 = v1 - v0; + const float3 v0p = v0 - p; + float3 raw_normal = cross(v2v0, v1v0); + const float norm_normal = norm(raw_normal); + float3 normal = normalize(raw_normal); + + // p0 is the projection of p on the plane spanned by (v0, v1, v2) + // i.e. p0 = p + t * normal, s.t. (p0 - v0) is orthogonal to normal + const float t = dot(v0 - p, normal); + const float3 p0 = p + t * normal; + const float3 diff = t * normal; + + bool is_inside = IsInsideTriangle(p0, v0, v1, v2, min_triangle_area); + + float3 grad_p = make_float3(0.0f, 0.0f, 0.0f); + float3 grad_v0 = make_float3(0.0f, 0.0f, 0.0f); + float3 grad_v1 = make_float3(0.0f, 0.0f, 0.0f); + float3 grad_v2 = make_float3(0.0f, 0.0f, 0.0f); + + if ((is_inside) && (norm_normal > kEpsilon)) { + // derivative of dist wrt p + grad_p = -2.0f * grad_dist * t * normal; + // derivative of dist wrt normal + const float3 grad_normal = 2.0f * grad_dist * t * (v0p + diff); + // derivative of dist wrt raw_normal + const float3 grad_raw_normal = normalize_backward(raw_normal, grad_normal); + // derivative of dist wrt v2v0 and v1v0 + const auto grad_cross = cross_backward(v2v0, v1v0, grad_raw_normal); + const float3 grad_cross_v2v0 = thrust::get<0>(grad_cross); + const float3 grad_cross_v1v0 = thrust::get<1>(grad_cross); + grad_v0 = + grad_dist * 2.0f * t * normal - (grad_cross_v2v0 + grad_cross_v1v0); + grad_v1 = grad_cross_v1v0; + grad_v2 = grad_cross_v2v0; + } else { + const float e01 = PointLine3DistanceForward(p, v0, v1); + const float e02 = PointLine3DistanceForward(p, v0, v2); + const float e12 = PointLine3DistanceForward(p, v1, v2); + + if ((e01 <= e02) && (e01 <= e12)) { + // e01 is smallest + const auto grads = PointLine3DistanceBackward(p, v0, v1, grad_dist); + grad_p = thrust::get<0>(grads); + grad_v0 = thrust::get<1>(grads); + grad_v1 = thrust::get<2>(grads); + } else if ((e02 <= e01) && (e02 <= e12)) { + // e02 is smallest + const auto grads = PointLine3DistanceBackward(p, v0, v2, grad_dist); + grad_p = thrust::get<0>(grads); + grad_v0 = thrust::get<1>(grads); + grad_v2 = thrust::get<2>(grads); + } else if ((e12 <= e01) && (e12 <= e02)) { + // e12 is smallest + const auto grads = PointLine3DistanceBackward(p, v1, v2, grad_dist); + grad_p = thrust::get<0>(grads); + grad_v1 = thrust::get<1>(grads); + grad_v2 = thrust::get<2>(grads); + } + } + + return thrust::make_tuple(grad_p, grad_v0, grad_v1, grad_v2); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/geometry_utils.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/geometry_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..ad9f7ff3f34dde7b119ea708eb0901cb826794d7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/geometry_utils.h @@ -0,0 +1,823 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include +#include +#include "vec2.h" +#include "vec3.h" + +// Set epsilon for preventing floating point errors and division by 0. +const auto kEpsilon = 1e-8; + +// Determines whether a point p is on the right side of a 2D line segment +// given by the end points v0, v1. +// +// Args: +// p: vec2 Coordinates of a point. +// v0, v1: vec2 Coordinates of the end points of the edge. +// +// Returns: +// area: The signed area of the parallelogram given by the vectors +// A = p - v0 +// B = v1 - v0 +// +// v1 ________ +// /\ / +// A / \ / +// / \ / +// v0 /______\/ +// B p +// +// The area can also be interpreted as the cross product A x B. +// If the sign of the area is positive, the point p is on the +// right side of the edge. Negative area indicates the point is on +// the left side of the edge. i.e. for an edge v1 - v0: +// +// v1 +// / +// / +// - / + +// / +// / +// v0 +// +template +T EdgeFunctionForward(const vec2& p, const vec2& v0, const vec2& v1) { + const T edge = (p.x - v0.x) * (v1.y - v0.y) - (p.y - v0.y) * (v1.x - v0.x); + return edge; +} + +// Backward pass for the edge function returning partial dervivatives for each +// of the input points. +// +// Args: +// p: vec2 Coordinates of a point. +// v0, v1: vec2 Coordinates of the end points of the edge. +// grad_edge: Upstream gradient for output from edge function. +// +// Returns: +// tuple of gradients for each of the input points: +// (vec2 d_edge_dp, vec2 d_edge_dv0, vec2 d_edge_dv1) +// +template +inline std::tuple, vec2, vec2> EdgeFunctionBackward( + const vec2& p, + const vec2& v0, + const vec2& v1, + const T grad_edge) { + const vec2 dedge_dp(v1.y - v0.y, v0.x - v1.x); + const vec2 dedge_dv0(p.y - v1.y, v1.x - p.x); + const vec2 dedge_dv1(v0.y - p.y, p.x - v0.x); + return std::make_tuple( + grad_edge * dedge_dp, grad_edge * dedge_dv0, grad_edge * dedge_dv1); +} + +// The forward pass for computing the barycentric coordinates of a point +// relative to a triangle. +// Ref: +// https://www.scratchapixel.com/lessons/3d-basic-rendering/ray-tracing-rendering-a-triangle/barycentric-coordinates +// +// Args: +// p: Coordinates of a point. +// v0, v1, v2: Coordinates of the triangle vertices. +// +// Returns +// bary: (w0, w1, w2) barycentric coordinates in the range [0, 1]. +// +template +vec3 BarycentricCoordinatesForward( + const vec2& p, + const vec2& v0, + const vec2& v1, + const vec2& v2) { + const T area = EdgeFunctionForward(v2, v0, v1) + kEpsilon; + const T w0 = EdgeFunctionForward(p, v1, v2) / area; + const T w1 = EdgeFunctionForward(p, v2, v0) / area; + const T w2 = EdgeFunctionForward(p, v0, v1) / area; + return vec3(w0, w1, w2); +} + +// The backward pass for computing the barycentric coordinates of a point +// relative to a triangle. +// +// Args: +// p: Coordinates of a point. +// v0, v1, v2: (x, y) coordinates of the triangle vertices. +// grad_bary_upstream: vec3 Upstream gradient for each of the +// barycentric coordaintes [grad_w0, grad_w1, grad_w2]. +// +// Returns +// tuple of gradients for each of the triangle vertices: +// (vec2 grad_v0, vec2 grad_v1, vec2 grad_v2) +// +template +inline std::tuple, vec2, vec2, vec2> BarycentricCoordsBackward( + const vec2& p, + const vec2& v0, + const vec2& v1, + const vec2& v2, + const vec3& grad_bary_upstream) { + const T area = EdgeFunctionForward(v2, v0, v1) + kEpsilon; + const T area2 = pow(area, 2.0f); + const T area_inv = 1.0f / area; + const T e0 = EdgeFunctionForward(p, v1, v2); + const T e1 = EdgeFunctionForward(p, v2, v0); + const T e2 = EdgeFunctionForward(p, v0, v1); + + const T grad_w0 = grad_bary_upstream.x; + const T grad_w1 = grad_bary_upstream.y; + const T grad_w2 = grad_bary_upstream.z; + + // Calculate component of the gradient from each of w0, w1 and w2. + // e.g. for w0: + // dloss/dw0_v = dl/dw0 * dw0/dw0_top * dw0_top/dv + // + dl/dw0 * dw0/dw0_bot * dw0_bot/dv + const T dw0_darea = -e0 / (area2); + const T dw0_e0 = area_inv; + const T dloss_d_w0area = grad_w0 * dw0_darea; + const T dloss_e0 = grad_w0 * dw0_e0; + auto de0_dv = EdgeFunctionBackward(p, v1, v2, dloss_e0); + auto dw0area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w0area); + const vec2 dw0_p = std::get<0>(de0_dv); + const vec2 dw0_dv0 = std::get<1>(dw0area_dv); + const vec2 dw0_dv1 = std::get<1>(de0_dv) + std::get<2>(dw0area_dv); + const vec2 dw0_dv2 = std::get<2>(de0_dv) + std::get<0>(dw0area_dv); + + const T dw1_darea = -e1 / (area2); + const T dw1_e1 = area_inv; + const T dloss_d_w1area = grad_w1 * dw1_darea; + const T dloss_e1 = grad_w1 * dw1_e1; + auto de1_dv = EdgeFunctionBackward(p, v2, v0, dloss_e1); + auto dw1area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w1area); + const vec2 dw1_p = std::get<0>(de1_dv); + const vec2 dw1_dv0 = std::get<2>(de1_dv) + std::get<1>(dw1area_dv); + const vec2 dw1_dv1 = std::get<2>(dw1area_dv); + const vec2 dw1_dv2 = std::get<1>(de1_dv) + std::get<0>(dw1area_dv); + + const T dw2_darea = -e2 / (area2); + const T dw2_e2 = area_inv; + const T dloss_d_w2area = grad_w2 * dw2_darea; + const T dloss_e2 = grad_w2 * dw2_e2; + auto de2_dv = EdgeFunctionBackward(p, v0, v1, dloss_e2); + auto dw2area_dv = EdgeFunctionBackward(v2, v0, v1, dloss_d_w2area); + const vec2 dw2_p = std::get<0>(de2_dv); + const vec2 dw2_dv0 = std::get<1>(de2_dv) + std::get<1>(dw2area_dv); + const vec2 dw2_dv1 = std::get<2>(de2_dv) + std::get<2>(dw2area_dv); + const vec2 dw2_dv2 = std::get<0>(dw2area_dv); + + const vec2 dbary_p = dw0_p + dw1_p + dw2_p; + const vec2 dbary_dv0 = dw0_dv0 + dw1_dv0 + dw2_dv0; + const vec2 dbary_dv1 = dw0_dv1 + dw1_dv1 + dw2_dv1; + const vec2 dbary_dv2 = dw0_dv2 + dw1_dv2 + dw2_dv2; + + return std::make_tuple(dbary_p, dbary_dv0, dbary_dv1, dbary_dv2); +} + +// Forward pass for applying perspective correction to barycentric coordinates. +// +// Args: +// bary: Screen-space barycentric coordinates for a point +// z0, z1, z2: Camera-space z-coordinates of the triangle vertices +// +// Returns +// World-space barycentric coordinates +// +template +inline vec3 BarycentricPerspectiveCorrectionForward( + const vec3& bary, + const T z0, + const T z1, + const T z2) { + const T w0_top = bary.x * z1 * z2; + const T w1_top = bary.y * z0 * z2; + const T w2_top = bary.z * z0 * z1; + const T denom = std::max(w0_top + w1_top + w2_top, kEpsilon); + const T w0 = w0_top / denom; + const T w1 = w1_top / denom; + const T w2 = w2_top / denom; + return vec3(w0, w1, w2); +} + +// Backward pass for applying perspective correction to barycentric coordinates. +// +// Args: +// bary: Screen-space barycentric coordinates for a point +// z0, z1, z2: Camera-space z-coordinates of the triangle vertices +// grad_out: Upstream gradient of the loss with respect to the corrected +// barycentric coordinates. +// +// Returns a tuple of: +// grad_bary: Downstream gradient of the loss with respect to the the +// uncorrected barycentric coordinates. +// grad_z0, grad_z1, grad_z2: Downstream gradient of the loss with respect +// to the z-coordinates of the triangle verts +template +inline std::tuple, T, T, T> BarycentricPerspectiveCorrectionBackward( + const vec3& bary, + const T z0, + const T z1, + const T z2, + const vec3& grad_out) { + // Recompute forward pass + const T w0_top = bary.x * z1 * z2; + const T w1_top = bary.y * z0 * z2; + const T w2_top = bary.z * z0 * z1; + const T denom = std::max(w0_top + w1_top + w2_top, kEpsilon); + + // Now do backward pass + const T grad_denom_top = + -w0_top * grad_out.x - w1_top * grad_out.y - w2_top * grad_out.z; + const T grad_denom = grad_denom_top / (denom * denom); + const T grad_w0_top = grad_denom + grad_out.x / denom; + const T grad_w1_top = grad_denom + grad_out.y / denom; + const T grad_w2_top = grad_denom + grad_out.z / denom; + const T grad_bary_x = grad_w0_top * z1 * z2; + const T grad_bary_y = grad_w1_top * z0 * z2; + const T grad_bary_z = grad_w2_top * z0 * z1; + const vec3 grad_bary(grad_bary_x, grad_bary_y, grad_bary_z); + const T grad_z0 = grad_w1_top * bary.y * z2 + grad_w2_top * bary.z * z1; + const T grad_z1 = grad_w0_top * bary.x * z2 + grad_w2_top * bary.z * z0; + const T grad_z2 = grad_w0_top * bary.x * z1 + grad_w1_top * bary.y * z0; + return std::make_tuple(grad_bary, grad_z0, grad_z1, grad_z2); +} + +// Clip negative barycentric coordinates to 0.0 and renormalize so +// the barycentric coordinates for a point sum to 1. When the blur_radius +// is greater than 0, a face will still be recorded as overlapping a pixel +// if the pixel is outside the face. In this case at least one of the +// barycentric coordinates for the pixel relative to the face will be negative. +// Clipping will ensure that the texture and z buffer are interpolated +// correctly. +// +// Args +// bary: (w0, w1, w2) barycentric coordinates which can contain values < 0. +// +// Returns +// bary: (w0, w1, w2) barycentric coordinates in the range [0, 1] which +// satisfy the condition: sum(w0, w1, w2) = 1.0. +// +template +vec3 BarycentricClipForward(const vec3 bary) { + vec3 w(0.0f, 0.0f, 0.0f); + // Only clamp negative values to 0.0. + // No need to clamp values > 1.0 as they will be renormalized. + w.x = std::max(bary.x, 0.0f); + w.y = std::max(bary.y, 0.0f); + w.z = std::max(bary.z, 0.0f); + float w_sum = w.x + w.y + w.z; + w_sum = std::fmaxf(w_sum, 1e-5); + w.x /= w_sum; + w.y /= w_sum; + w.z /= w_sum; + return w; +} + +// Backward pass for barycentric coordinate clipping. +// +// Args +// bary: (w0, w1, w2) barycentric coordinates which can contain values < 0. +// grad_baryclip_upstream: vec3 Upstream gradient for each of the clipped +// barycentric coordinates [grad_w0, grad_w1, grad_w2]. +// +// Returns +// vec3 of gradients for the unclipped barycentric coordinates: +// (grad_w0, grad_w1, grad_w2) +// +template +vec3 BarycentricClipBackward( + const vec3 bary, + const vec3 grad_baryclip_upstream) { + // Redo some of the forward pass calculations + vec3 w(0.0f, 0.0f, 0.0f); + w.x = std::max(bary.x, 0.0f); + w.y = std::max(bary.y, 0.0f); + w.z = std::max(bary.z, 0.0f); + float w_sum = w.x + w.y + w.z; + + vec3 grad_bary(1.0f, 1.0f, 1.0f); + vec3 grad_clip(1.0f, 1.0f, 1.0f); + vec3 grad_sum(1.0f, 1.0f, 1.0f); + + // Check if the sum was clipped. + float grad_sum_clip = 1.0f; + if (w_sum < 1e-5) { + grad_sum_clip = 0.0f; + w_sum = 1e-5; + } + + // Check if any of the bary coordinates have been clipped. + // Only negative values are clamped to 0.0. + if (bary.x < 0.0f) { + grad_clip.x = 0.0f; + } + if (bary.y < 0.0f) { + grad_clip.y = 0.0f; + } + if (bary.z < 0.0f) { + grad_clip.z = 0.0f; + } + + // Gradients of the sum. + grad_sum.x = -w.x / (pow(w_sum, 2.0f)) * grad_sum_clip; + grad_sum.y = -w.y / (pow(w_sum, 2.0f)) * grad_sum_clip; + grad_sum.z = -w.z / (pow(w_sum, 2.0f)) * grad_sum_clip; + + // Gradients for each of the bary coordinates including the cross terms + // from the sum. + grad_bary.x = grad_clip.x * + (grad_baryclip_upstream.x * (1.0f / w_sum + grad_sum.x) + + grad_baryclip_upstream.y * (grad_sum.y) + + grad_baryclip_upstream.z * (grad_sum.z)); + + grad_bary.y = grad_clip.y * + (grad_baryclip_upstream.y * (1.0f / w_sum + grad_sum.y) + + grad_baryclip_upstream.x * (grad_sum.x) + + grad_baryclip_upstream.z * (grad_sum.z)); + + grad_bary.z = grad_clip.z * + (grad_baryclip_upstream.z * (1.0f / w_sum + grad_sum.z) + + grad_baryclip_upstream.x * (grad_sum.x) + + grad_baryclip_upstream.y * (grad_sum.y)); + + return grad_bary; +} + +// Calculate minimum distance between a line segment (v1 - v0) and point p. +// +// Args: +// p: Coordinates of a point. +// v0, v1: Coordinates of the end points of the line segment. +// +// Returns: +// squared distance of the point to the line. +// +// Consider the line extending the segment - this can be parameterized as: +// v0 + t (v1 - v0). +// +// First find the projection of point p onto the line. It falls where: +// t = [(p - v0) . (v1 - v0)] / |v1 - v0|^2 +// where . is the dot product. +// +// The parameter t is clamped from [0, 1] to handle points outside the +// segment (v1 - v0). +// +// Once the projection of the point on the segment is known, the distance from +// p to the projection gives the minimum distance to the segment. +// +template +T PointLineDistanceForward( + const vec2& p, + const vec2& v0, + const vec2& v1) { + const vec2 v1v0 = v1 - v0; + const T l2 = dot(v1v0, v1v0); + if (l2 <= kEpsilon) { + return dot(p - v1, p - v1); + } + + const T t = dot(v1v0, p - v0) / l2; + const T tt = std::min(std::max(t, 0.00f), 1.00f); + const vec2 p_proj = v0 + tt * v1v0; + return dot(p - p_proj, p - p_proj); +} + +template +T PointLine3DistanceForward( + const vec3& p, + const vec3& v0, + const vec3& v1) { + const vec3 v1v0 = v1 - v0; + const T l2 = dot(v1v0, v1v0); + if (l2 <= kEpsilon) { + return dot(p - v1, p - v1); + } + + const T t = dot(v1v0, p - v0) / l2; + const T tt = std::min(std::max(t, 0.00f), 1.00f); + const vec3 p_proj = v0 + tt * v1v0; + return dot(p - p_proj, p - p_proj); +} + +// Backward pass for point to line distance in 2D. +// +// Args: +// p: Coordinates of a point. +// v0, v1: Coordinates of the end points of the line segment. +// grad_dist: Upstream gradient for the distance. +// +// Returns: +// tuple of gradients for each of the input points: +// (vec2 grad_p, vec2 grad_v0, vec2 grad_v1) +// +template +inline std::tuple, vec2, vec2> PointLineDistanceBackward( + const vec2& p, + const vec2& v0, + const vec2& v1, + const T& grad_dist) { + // Redo some of the forward pass calculations. + const vec2 v1v0 = v1 - v0; + const vec2 pv0 = p - v0; + const T t_bot = dot(v1v0, v1v0); + const T t_top = dot(v1v0, pv0); + const T t = t_top / t_bot; + const T tt = std::min(std::max(t, 0.00f), 1.00f); + const vec2 p_proj = (1.0f - tt) * v0 + tt * v1; + + const vec2 grad_v0 = grad_dist * (1.0f - tt) * 2.0f * (p_proj - p); + const vec2 grad_v1 = grad_dist * tt * 2.0f * (p_proj - p); + const vec2 grad_p = -1.0f * grad_dist * 2.0f * (p_proj - p); + + return std::make_tuple(grad_p, grad_v0, grad_v1); +} + +template +std::tuple, vec3, vec3> PointLine3DistanceBackward( + const vec3& p, + const vec3& v0, + const vec3& v1, + const T& grad_dist) { + const vec3 v1v0 = v1 - v0; + const vec3 pv0 = p - v0; + const T t_bot = dot(v1v0, v1v0); + const T t_top = dot(v1v0, pv0); + + vec3 grad_p{0.0f, 0.0f, 0.0f}; + vec3 grad_v0{0.0f, 0.0f, 0.0f}; + vec3 grad_v1{0.0f, 0.0f, 0.0f}; + + const T tt = t_top / t_bot; + + if (t_bot < kEpsilon) { + // if t_bot small, then v0 == v1, + // and dist = 0.5 * dot(pv0, pv0) + 0.5 * dot(pv1, pv1) + grad_p = grad_dist * 2.0f * pv0; + grad_v0 = -0.5f * grad_p; + grad_v1 = grad_v0; + } else if (tt < 0.0f) { + grad_p = grad_dist * 2.0f * pv0; + grad_v0 = -1.0f * grad_p; + // no gradients wrt v1 + } else if (tt > 1.0f) { + grad_p = grad_dist * 2.0f * (p - v1); + grad_v1 = -1.0f * grad_p; + // no gradients wrt v0 + } else { + const vec3 p_proj = v0 + tt * v1v0; + const vec3 diff = p - p_proj; + const vec3 grad_base = grad_dist * 2.0f * diff; + grad_p = grad_base - dot(grad_base, v1v0) * v1v0 / t_bot; + const vec3 dtt_v0 = (-1.0f * v1v0 - pv0 + 2.0f * tt * v1v0) / t_bot; + grad_v0 = (-1.0f + tt) * grad_base - dot(grad_base, v1v0) * dtt_v0; + const vec3 dtt_v1 = (pv0 - 2.0f * tt * v1v0) / t_bot; + grad_v1 = -dot(grad_base, v1v0) * dtt_v1 - tt * grad_base; + } + + return std::make_tuple(grad_p, grad_v0, grad_v1); +} + +// The forward pass for calculating the shortest distance between a point +// and a triangle. +// Ref: https://www.randygaul.net/2014/07/23/distance-point-to-line-segment/ +// +// Args: +// p: Coordinates of a point. +// v0, v1, v2: Coordinates of the three triangle vertices. +// +// Returns: +// shortest squared distance from a point to a triangle. +// +// +template +T PointTriangleDistanceForward( + const vec2& p, + const vec2& v0, + const vec2& v1, + const vec2& v2) { + // Compute distance of point to 3 edges of the triangle and return the + // minimum value. + const T e01_dist = PointLineDistanceForward(p, v0, v1); + const T e02_dist = PointLineDistanceForward(p, v0, v2); + const T e12_dist = PointLineDistanceForward(p, v1, v2); + const T edge_dist = std::min(std::min(e01_dist, e02_dist), e12_dist); + + return edge_dist; +} + +// Backward pass for point triangle distance. +// +// Args: +// p: Coordinates of a point. +// v0, v1, v2: Coordinates of the three triangle vertices. +// grad_dist: Upstream gradient for the distance. +// +// Returns: +// tuple of gradients for each of the triangle vertices: +// (vec2 grad_v0, vec2 grad_v1, vec2 grad_v2) +// +template +inline std::tuple, vec2, vec2, vec2> +PointTriangleDistanceBackward( + const vec2& p, + const vec2& v0, + const vec2& v1, + const vec2& v2, + const T& grad_dist) { + // Compute distance to all 3 edges of the triangle. + const T e01_dist = PointLineDistanceForward(p, v0, v1); + const T e02_dist = PointLineDistanceForward(p, v0, v2); + const T e12_dist = PointLineDistanceForward(p, v1, v2); + + // Initialize output tensors. + vec2 grad_v0(0.0f, 0.0f); + vec2 grad_v1(0.0f, 0.0f); + vec2 grad_v2(0.0f, 0.0f); + vec2 grad_p(0.0f, 0.0f); + + // Find which edge is the closest and return PointLineDistanceBackward for + // that edge. + if (e01_dist <= e02_dist && e01_dist <= e12_dist) { + // Closest edge is v1 - v0. + auto grad_e01 = PointLineDistanceBackward(p, v0, v1, grad_dist); + grad_p = std::get<0>(grad_e01); + grad_v0 = std::get<1>(grad_e01); + grad_v1 = std::get<2>(grad_e01); + } else if (e02_dist <= e01_dist && e02_dist <= e12_dist) { + // Closest edge is v2 - v0. + auto grad_e02 = PointLineDistanceBackward(p, v0, v2, grad_dist); + grad_p = std::get<0>(grad_e02); + grad_v0 = std::get<1>(grad_e02); + grad_v2 = std::get<2>(grad_e02); + } else if (e12_dist <= e01_dist && e12_dist <= e02_dist) { + // Closest edge is v2 - v1. + auto grad_e12 = PointLineDistanceBackward(p, v1, v2, grad_dist); + grad_p = std::get<0>(grad_e12); + grad_v1 = std::get<1>(grad_e12); + grad_v2 = std::get<2>(grad_e12); + } + + return std::make_tuple(grad_p, grad_v0, grad_v1, grad_v2); +} + +// Computes the area of a triangle (v0, v1, v2). +// Args: +// v0, v1, v2: vec3 coordinates of the triangle vertices +// +// Returns: +// area: float: the area of the triangle +// +template +T AreaOfTriangle(const vec3& v0, const vec3& v1, const vec3& v2) { + vec3 p0 = v1 - v0; + vec3 p1 = v2 - v0; + + // compute the hypotenus of the scross product (p0 x p1) + float dd = std::hypot( + p0.y * p1.z - p0.z * p1.y, + std::hypot(p0.z * p1.x - p0.x * p1.z, p0.x * p1.y - p0.y * p1.x)); + + return dd / 2.0; +} + +// Computes the squared distance of a point p relative to a triangle (v0, v1, +// v2). If the point's projection p0 on the plane spanned by (v0, v1, v2) is +// inside the triangle with vertices (v0, v1, v2), then the returned value is +// the squared distance of p to its projection p0. Otherwise, the returned value +// is the smallest squared distance of p from the line segments (v0, v1), (v0, +// v2) and (v1, v2). +// +// Args: +// p: vec3 coordinates of a point +// v0, v1, v2: vec3 coordinates of the triangle vertices +// +// Returns: +// dist: Float of the squared distance +// + +const float vEpsilon = 1e-8; + +template +vec3 BarycentricCoords3Forward( + const vec3& p, + const vec3& v0, + const vec3& v1, + const vec3& v2) { + vec3 p0 = v1 - v0; + vec3 p1 = v2 - v0; + vec3 p2 = p - v0; + + const T d00 = dot(p0, p0); + const T d01 = dot(p0, p1); + const T d11 = dot(p1, p1); + const T d20 = dot(p2, p0); + const T d21 = dot(p2, p1); + + const T denom = d00 * d11 - d01 * d01 + kEpsilon; + const T w1 = (d11 * d20 - d01 * d21) / denom; + const T w2 = (d00 * d21 - d01 * d20) / denom; + const T w0 = 1.0f - w1 - w2; + + return vec3(w0, w1, w2); +} + +// Checks whether the point p is inside the triangle (v0, v1, v2). +// A point is inside the triangle, if all barycentric coordinates +// wrt the triangle are >= 0 & <= 1. +// If the triangle is degenerate, aka line or point, then return False. +// +// NOTE that this function assumes that p lives on the space spanned +// by (v0, v1, v2). +// TODO(gkioxari) explicitly check whether p is coplanar with (v0, v1, v2) +// and throw an error if check fails +// +// Args: +// p: vec3 coordinates of a point +// v0, v1, v2: vec3 coordinates of the triangle vertices +// min_triangle_area: triangles less than this size are considered +// points/lines, IsInsideTriangle returns False +// +// Returns: +// inside: bool indicating wether p is inside triangle +// +template +static bool IsInsideTriangle( + const vec3& p, + const vec3& v0, + const vec3& v1, + const vec3& v2, + const double min_triangle_area) { + bool inside; + if (AreaOfTriangle(v0, v1, v2) < min_triangle_area) { + inside = 0; + } else { + vec3 bary = BarycentricCoords3Forward(p, v0, v1, v2); + bool x_in = 0.0f <= bary.x && bary.x <= 1.0f; + bool y_in = 0.0f <= bary.y && bary.y <= 1.0f; + bool z_in = 0.0f <= bary.z && bary.z <= 1.0f; + inside = x_in && y_in && z_in; + } + return inside; +} + +template +T PointTriangle3DistanceForward( + const vec3& p, + const vec3& v0, + const vec3& v1, + const vec3& v2, + const double min_triangle_area) { + vec3 normal = cross(v2 - v0, v1 - v0); + const T norm_normal = norm(normal); + normal = normal / (norm_normal + vEpsilon); + + // p0 is the projection of p on the plane spanned by (v0, v1, v2) + // i.e. p0 = p + t * normal, s.t. (p0 - v0) is orthogonal to normal + const T t = dot(v0 - p, normal); + const vec3 p0 = p + t * normal; + + bool is_inside = IsInsideTriangle(p0, v0, v1, v2, min_triangle_area); + T dist = 0.0f; + + if ((is_inside) && (norm_normal > kEpsilon)) { + // if projection p0 is inside triangle spanned by (v0, v1, v2) + // then distance is equal to norm(p0 - p)^2 + dist = t * t; + } else { + const float e01 = PointLine3DistanceForward(p, v0, v1); + const float e02 = PointLine3DistanceForward(p, v0, v2); + const float e12 = PointLine3DistanceForward(p, v1, v2); + + dist = (e01 > e02) ? e02 : e01; + dist = (dist > e12) ? e12 : dist; + } + + return dist; +} + +template +std::tuple, vec3> +cross_backward(const vec3& a, const vec3& b, const vec3& grad_cross) { + const float grad_ax = -grad_cross.y * b.z + grad_cross.z * b.y; + const float grad_ay = grad_cross.x * b.z - grad_cross.z * b.x; + const float grad_az = -grad_cross.x * b.y + grad_cross.y * b.x; + const vec3 grad_a = vec3(grad_ax, grad_ay, grad_az); + + const float grad_bx = grad_cross.y * a.z - grad_cross.z * a.y; + const float grad_by = -grad_cross.x * a.z + grad_cross.z * a.x; + const float grad_bz = grad_cross.x * a.y - grad_cross.y * a.x; + const vec3 grad_b = vec3(grad_bx, grad_by, grad_bz); + + return std::make_tuple(grad_a, grad_b); +} + +template +vec3 normalize_backward(const vec3& a, const vec3& grad_normz) { + const float a_norm = norm(a) + vEpsilon; + const vec3 out = a / a_norm; + + const float grad_ax = grad_normz.x * (1.0f - out.x * out.x) / a_norm + + grad_normz.y * (-out.x * out.y) / a_norm + + grad_normz.z * (-out.x * out.z) / a_norm; + const float grad_ay = grad_normz.x * (-out.x * out.y) / a_norm + + grad_normz.y * (1.0f - out.y * out.y) / a_norm + + grad_normz.z * (-out.y * out.z) / a_norm; + const float grad_az = grad_normz.x * (-out.x * out.z) / a_norm + + grad_normz.y * (-out.y * out.z) / a_norm + + grad_normz.z * (1.0f - out.z * out.z) / a_norm; + return vec3(grad_ax, grad_ay, grad_az); +} + +// The backward pass for computing the squared distance of a point +// to the triangle (v0, v1, v2). +// +// Args: +// p: xyz coordinates of a point +// v0, v1, v2: xyz coordinates of the triangle vertices +// grad_dist: Float of the gradient wrt dist +// min_triangle_area: triangles less than this size are considered +// points/lines, IsInsideTriangle returns False +// +// Returns: +// tuple of gradients for the point and triangle: +// (float3 grad_p, float3 grad_v0, float3 grad_v1, float3 grad_v2) +// + +template +static std::tuple, vec3, vec3, vec3> +PointTriangle3DistanceBackward( + const vec3& p, + const vec3& v0, + const vec3& v1, + const vec3& v2, + const T& grad_dist, + const double min_triangle_area) { + const vec3 v2v0 = v2 - v0; + const vec3 v1v0 = v1 - v0; + const vec3 v0p = v0 - p; + vec3 raw_normal = cross(v2v0, v1v0); + const T norm_normal = norm(raw_normal); + vec3 normal = raw_normal / (norm_normal + vEpsilon); + + // p0 is the projection of p on the plane spanned by (v0, v1, v2) + // i.e. p0 = p + t * normal, s.t. (p0 - v0) is orthogonal to normal + const T t = dot(v0 - p, normal); + const vec3 p0 = p + t * normal; + const vec3 diff = t * normal; + + bool is_inside = IsInsideTriangle(p0, v0, v1, v2, min_triangle_area); + + vec3 grad_p(0.0f, 0.0f, 0.0f); + vec3 grad_v0(0.0f, 0.0f, 0.0f); + vec3 grad_v1(0.0f, 0.0f, 0.0f); + vec3 grad_v2(0.0f, 0.0f, 0.0f); + + if ((is_inside) && (norm_normal > kEpsilon)) { + // derivative of dist wrt p + grad_p = -2.0f * grad_dist * t * normal; + // derivative of dist wrt normal + const vec3 grad_normal = 2.0f * grad_dist * t * (v0p + diff); + // derivative of dist wrt raw_normal + const vec3 grad_raw_normal = normalize_backward(raw_normal, grad_normal); + // derivative of dist wrt v2v0 and v1v0 + const auto grad_cross = cross_backward(v2v0, v1v0, grad_raw_normal); + const vec3 grad_cross_v2v0 = std::get<0>(grad_cross); + const vec3 grad_cross_v1v0 = std::get<1>(grad_cross); + grad_v0 = + grad_dist * 2.0f * t * normal - (grad_cross_v2v0 + grad_cross_v1v0); + grad_v1 = grad_cross_v1v0; + grad_v2 = grad_cross_v2v0; + } else { + const T e01 = PointLine3DistanceForward(p, v0, v1); + const T e02 = PointLine3DistanceForward(p, v0, v2); + const T e12 = PointLine3DistanceForward(p, v1, v2); + + if ((e01 <= e02) && (e01 <= e12)) { + // e01 is smallest + const auto grads = PointLine3DistanceBackward(p, v0, v1, grad_dist); + grad_p = std::get<0>(grads); + grad_v0 = std::get<1>(grads); + grad_v1 = std::get<2>(grads); + } else if ((e02 <= e01) && (e02 <= e12)) { + // e02 is smallest + const auto grads = PointLine3DistanceBackward(p, v0, v2, grad_dist); + grad_p = std::get<0>(grads); + grad_v0 = std::get<1>(grads); + grad_v2 = std::get<2>(grads); + } else if ((e12 <= e01) && (e12 <= e02)) { + // e12 is smallest + const auto grads = PointLine3DistanceBackward(p, v1, v2, grad_dist); + grad_p = std::get<0>(grads); + grad_v1 = std::get<1>(grads); + grad_v2 = std::get<2>(grads); + } + } + + return std::make_tuple(grad_p, grad_v0, grad_v1, grad_v2); +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/index_utils.cuh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/index_utils.cuh new file mode 100644 index 0000000000000000000000000000000000000000..74bca270f344b0d0b0bc19c45fc7373eb5875a14 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/index_utils.cuh @@ -0,0 +1,224 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +// This converts dynamic array lookups into static array lookups, for small +// arrays up to size 32. +// +// Suppose we have a small thread-local array: +// +// float vals[10]; +// +// Ideally we should only index this array using static indices: +// +// for (int i = 0; i < 10; ++i) vals[i] = i * i; +// +// If we do so, then the CUDA compiler may be able to place the array into +// registers, which can have a big performance improvement. However if we +// access the array dynamically, the the compiler may force the array into +// local memory, which has the same latency as global memory. +// +// These functions convert dynamic array access into static array access +// using a brute-force lookup table. It can be used like this: +// +// float vals[10]; +// int idx = 3; +// float val = 3.14f; +// RegisterIndexUtils::set(vals, idx, val); +// float val2 = RegisterIndexUtils::get(vals, idx); +// +// The implementation is based on fbcuda/RegisterUtils.cuh: +// https://github.com/facebook/fbcuda/blob/master/RegisterUtils.cuh +// To avoid depending on the entire library, we just reimplement these two +// functions. The fbcuda implementation is a bit more sophisticated, and uses +// the preprocessor to generate switch statements that go up to N for each +// value of N. We are lazy and just have a giant explicit switch statement. +// +// We might be able to use a template metaprogramming approach similar to +// DispatchKernel1D for this. However DispatchKernel1D is intended to be used +// for dispatching to the correct CUDA kernel on the host, while this is +// is intended to run on the device. I was concerned that a metaprogramming +// approach for this might lead to extra function calls at runtime if the +// compiler fails to optimize them away, which could be very slow on device. +// However I didn't actually benchmark or test this. +template +struct RegisterIndexUtils { + __device__ __forceinline__ static T get(const T arr[N], int idx) { + if (idx < 0 || idx >= N) + return T(); + switch (idx) { + case 0: + return arr[0]; + case 1: + return arr[1]; + case 2: + return arr[2]; + case 3: + return arr[3]; + case 4: + return arr[4]; + case 5: + return arr[5]; + case 6: + return arr[6]; + case 7: + return arr[7]; + case 8: + return arr[8]; + case 9: + return arr[9]; + case 10: + return arr[10]; + case 11: + return arr[11]; + case 12: + return arr[12]; + case 13: + return arr[13]; + case 14: + return arr[14]; + case 15: + return arr[15]; + case 16: + return arr[16]; + case 17: + return arr[17]; + case 18: + return arr[18]; + case 19: + return arr[19]; + case 20: + return arr[20]; + case 21: + return arr[21]; + case 22: + return arr[22]; + case 23: + return arr[23]; + case 24: + return arr[24]; + case 25: + return arr[25]; + case 26: + return arr[26]; + case 27: + return arr[27]; + case 28: + return arr[28]; + case 29: + return arr[29]; + case 30: + return arr[30]; + case 31: + return arr[31]; + }; + return T(); + } + + __device__ __forceinline__ static void set(T arr[N], int idx, T val) { + if (idx < 0 || idx >= N) + return; + switch (idx) { + case 0: + arr[0] = val; + break; + case 1: + arr[1] = val; + break; + case 2: + arr[2] = val; + break; + case 3: + arr[3] = val; + break; + case 4: + arr[4] = val; + break; + case 5: + arr[5] = val; + break; + case 6: + arr[6] = val; + break; + case 7: + arr[7] = val; + break; + case 8: + arr[8] = val; + break; + case 9: + arr[9] = val; + break; + case 10: + arr[10] = val; + break; + case 11: + arr[11] = val; + break; + case 12: + arr[12] = val; + break; + case 13: + arr[13] = val; + break; + case 14: + arr[14] = val; + break; + case 15: + arr[15] = val; + break; + case 16: + arr[16] = val; + break; + case 17: + arr[17] = val; + break; + case 18: + arr[18] = val; + break; + case 19: + arr[19] = val; + break; + case 20: + arr[20] = val; + break; + case 21: + arr[21] = val; + break; + case 22: + arr[22] = val; + break; + case 23: + arr[23] = val; + break; + case 24: + arr[24] = val; + break; + case 25: + arr[25] = val; + break; + case 26: + arr[26] = val; + break; + case 27: + arr[27] = val; + break; + case 28: + arr[28] = val; + break; + case 29: + arr[29] = val; + break; + case 30: + arr[30] = val; + break; + case 31: + arr[31] = val; + break; + } + } +}; diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/mink.cuh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/mink.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c7858f58c8b92c12f5e889c10fe6e98a622d82b7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/mink.cuh @@ -0,0 +1,165 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#define MINK_H + +#include "index_utils.cuh" + +// A data structure to keep track of the smallest K keys seen so far as well +// as their associated values, intended to be used in device code. +// This data structure doesn't allocate any memory; keys and values are stored +// in arrays passed to the constructor. +// +// The implementation is generic; it can be used for any key type that supports +// the < operator, and can be used with any value type. +// +// Example usage: +// +// float keys[K]; +// int values[K]; +// MinK mink(keys, values, K); +// for (...) { +// // Produce some key and value from somewhere +// mink.add(key, value); +// } +// mink.sort(); +// +// Now keys and values store the smallest K keys seen so far and the values +// associated to these keys: +// +// for (int k = 0; k < K; ++k) { +// float key_k = keys[k]; +// int value_k = values[k]; +// } +template +class MinK { + public: + // Constructor. + // + // Arguments: + // keys: Array in which to store keys + // values: Array in which to store values + // K: How many values to keep track of + __device__ MinK(key_t* keys, value_t* vals, int K) + : keys(keys), vals(vals), K(K), _size(0) {} + + // Try to add a new key and associated value to the data structure. If the key + // is one of the smallest K seen so far then it will be kept; otherwise it + // it will not be kept. + // + // This takes O(1) operations if the new key is not kept, or if the structure + // currently contains fewer than K elements. Otherwise this takes O(K) time. + // + // Arguments: + // key: The key to add + // val: The value associated to the key + __device__ __forceinline__ void add(const key_t& key, const value_t& val) { + if (_size < K) { + keys[_size] = key; + vals[_size] = val; + if (_size == 0 || key > max_key) { + max_key = key; + max_idx = _size; + } + _size++; + } else if (key < max_key) { + keys[max_idx] = key; + vals[max_idx] = val; + max_key = key; + for (int k = 0; k < K; ++k) { + key_t cur_key = keys[k]; + if (cur_key > max_key) { + max_key = cur_key; + max_idx = k; + } + } + } + } + + // Get the number of items currently stored in the structure. + // This takes O(1) time. + __device__ __forceinline__ int size() { + return _size; + } + + // Sort the items stored in the structure using bubble sort. + // This takes O(K^2) time. + __device__ __forceinline__ void sort() { + for (int i = 0; i < _size - 1; ++i) { + for (int j = 0; j < _size - i - 1; ++j) { + if (keys[j + 1] < keys[j]) { + key_t key = keys[j]; + value_t val = vals[j]; + keys[j] = keys[j + 1]; + vals[j] = vals[j + 1]; + keys[j + 1] = key; + vals[j + 1] = val; + } + } + } + } + + private: + key_t* keys; + value_t* vals; + int K; + int _size; + key_t max_key; + int max_idx; +}; + +// This is a version of MinK that only touches the arrays using static indexing +// via RegisterIndexUtils. If the keys and values are stored in thread-local +// arrays, then this may allow the compiler to place them in registers for +// fast access. +// +// This has the same API as RegisterMinK, but doesn't support sorting. +// We found that sorting via RegisterIndexUtils gave very poor performance, +// and suspect it may have prevented the compiler from placing the arrays +// into registers. +template +class RegisterMinK { + public: + __device__ RegisterMinK(key_t* keys, value_t* vals) + : keys(keys), vals(vals), _size(0) {} + + __device__ __forceinline__ void add(const key_t& key, const value_t& val) { + if (_size < K) { + RegisterIndexUtils::set(keys, _size, key); + RegisterIndexUtils::set(vals, _size, val); + if (_size == 0 || key > max_key) { + max_key = key; + max_idx = _size; + } + _size++; + } else if (key < max_key) { + RegisterIndexUtils::set(keys, max_idx, key); + RegisterIndexUtils::set(vals, max_idx, val); + max_key = key; + for (int k = 0; k < K; ++k) { + key_t cur_key = RegisterIndexUtils::get(keys, k); + if (cur_key > max_key) { + max_key = cur_key; + max_idx = k; + } + } + } + } + + __device__ __forceinline__ int size() { + return _size; + } + + private: + key_t* keys; + value_t* vals; + int _size; + key_t max_key; + int max_idx; +}; diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/pytorch3d_cutils.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/pytorch3d_cutils.h new file mode 100644 index 0000000000000000000000000000000000000000..c1856ecaa66bb4c41cc51176bdbc7c8324e1e3c6 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/pytorch3d_cutils.h @@ -0,0 +1,21 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include + +#define CHECK_CUDA(x) TORCH_CHECK(x.is_cuda(), #x " must be a CUDA tensor.") +#define CHECK_CONTIGUOUS(x) \ + TORCH_CHECK(x.is_contiguous(), #x " must be contiguous.") +#define CHECK_CONTIGUOUS_CUDA(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) +#define CHECK_CPU(x) \ + TORCH_CHECK( \ + x.device().type() == torch::kCPU, \ + "Cannot use CPU implementation: " #x " not on CPU.") diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/vec2.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/vec2.h new file mode 100644 index 0000000000000000000000000000000000000000..f4550f918394c06ecbcca5db013e4f0f014ee914 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/vec2.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once +#include + +// A fixed-sized vector with basic arithmetic operators useful for +// representing 2D coordinates. +// TODO: switch to Eigen if more functionality is needed. + +template < + typename T, + typename = std::enable_if_t< + std::is_same::value || std::is_same::value>> +struct vec2 { + T x, y; + typedef T scalar_t; + vec2(T x, T y) : x(x), y(y) {} +}; + +template +inline vec2 operator+(const vec2& a, const vec2& b) { + return vec2(a.x + b.x, a.y + b.y); +} + +template +inline vec2 operator-(const vec2& a, const vec2& b) { + return vec2(a.x - b.x, a.y - b.y); +} + +template +inline vec2 operator*(const T a, const vec2& b) { + return vec2(a * b.x, a * b.y); +} + +template +inline vec2 operator/(const vec2& a, const T b) { + if (b == 0.0) { + AT_ERROR( + "denominator in vec2 division is 0"); // prevent divide by 0 errors. + } + return vec2(a.x / b, a.y / b); +} + +template +inline T dot(const vec2& a, const vec2& b) { + return a.x * b.x + a.y * b.y; +} + +template +inline T norm(const vec2& a, const vec2& b) { + const vec2 ba = b - a; + return sqrt(dot(ba, ba)); +} + +template +std::ostream& operator<<(std::ostream& os, const vec2& v) { + os << "vec2(" << v.x << ", " << v.y << ")"; + return os; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/vec3.h b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/vec3.h new file mode 100644 index 0000000000000000000000000000000000000000..fc37bf5c0dc0f041e58bd801dbf4b7d36eb1979b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/vec3.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#pragma once + +// A fixed-sized vector with basic arithmetic operators useful for +// representing 3D coordinates. +// TODO: switch to Eigen if more functionality is needed. + +template < + typename T, + typename = std::enable_if_t< + std::is_same::value || std::is_same::value>> +struct vec3 { + T x, y, z; + typedef T scalar_t; + vec3(T x, T y, T z) : x(x), y(y), z(z) {} +}; + +template +inline vec3 operator+(const vec3& a, const vec3& b) { + return vec3(a.x + b.x, a.y + b.y, a.z + b.z); +} + +template +inline vec3 operator-(const vec3& a, const vec3& b) { + return vec3(a.x - b.x, a.y - b.y, a.z - b.z); +} + +template +inline vec3 operator/(const vec3& a, const T b) { + if (b == 0.0) { + AT_ERROR( + "denominator in vec3 division is 0"); // prevent divide by 0 errors. + } + return vec3(a.x / b, a.y / b, a.z / b); +} + +template +inline vec3 operator*(const T a, const vec3& b) { + return vec3(a * b.x, a * b.y, a * b.z); +} + +template +inline vec3 operator*(const vec3& a, const vec3& b) { + return vec3(a.x * b.x, a.y * b.y, a.z * b.z); +} + +template +inline T dot(const vec3& a, const vec3& b) { + return a.x * b.x + a.y * b.y + a.z * b.z; +} + +template +inline vec3 cross(const vec3& a, const vec3& b) { + return vec3( + a.y * b.z - a.z * b.y, a.z * b.x - a.x * b.z, a.x * b.y - a.y * b.x); +} + +template +inline T norm(const vec3& a) { + return sqrt(dot(a, a)); +} + +template +std::ostream& operator<<(std::ostream& os, const vec3& v) { + os << "vec3(" << v.x << ", " << v.y << ", " << v.z << ")"; + return os; +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/warp_reduce.cuh b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/warp_reduce.cuh new file mode 100644 index 0000000000000000000000000000000000000000..172f67b26711c1095d92561de3110922cb68061c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/csrc/utils/warp_reduce.cuh @@ -0,0 +1,120 @@ +/* + * Copyright (c) Meta Platforms, Inc. and affiliates. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. + */ + +#include +#include +#include + +// Helper functions WarpReduceMin and WarpReduceMax used in .cu files +// Starting in Volta, instructions are no longer synchronous within a warp. +// We need to call __syncwarp() to sync the 32 threads in the warp +// instead of all the threads in the block. + +template +__device__ void +WarpReduceMin(scalar_t* min_dists, int64_t* min_idxs, const size_t tid) { + // s = 32 + if (min_dists[tid] > min_dists[tid + 32]) { + min_idxs[tid] = min_idxs[tid + 32]; + min_dists[tid] = min_dists[tid + 32]; + } +// AMD does not use explicit syncwarp and instead automatically inserts memory +// fences during compilation. +#if !defined(USE_ROCM) + __syncwarp(); +#endif + // s = 16 + if (min_dists[tid] > min_dists[tid + 16]) { + min_idxs[tid] = min_idxs[tid + 16]; + min_dists[tid] = min_dists[tid + 16]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif + // s = 8 + if (min_dists[tid] > min_dists[tid + 8]) { + min_idxs[tid] = min_idxs[tid + 8]; + min_dists[tid] = min_dists[tid + 8]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif + // s = 4 + if (min_dists[tid] > min_dists[tid + 4]) { + min_idxs[tid] = min_idxs[tid + 4]; + min_dists[tid] = min_dists[tid + 4]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif + // s = 2 + if (min_dists[tid] > min_dists[tid + 2]) { + min_idxs[tid] = min_idxs[tid + 2]; + min_dists[tid] = min_dists[tid + 2]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif + // s = 1 + if (min_dists[tid] > min_dists[tid + 1]) { + min_idxs[tid] = min_idxs[tid + 1]; + min_dists[tid] = min_dists[tid + 1]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif +} + +template +__device__ void WarpReduceMax( + volatile scalar_t* dists, + volatile int64_t* dists_idx, + const size_t tid) { + if (dists[tid] < dists[tid + 32]) { + dists[tid] = dists[tid + 32]; + dists_idx[tid] = dists_idx[tid + 32]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif + if (dists[tid] < dists[tid + 16]) { + dists[tid] = dists[tid + 16]; + dists_idx[tid] = dists_idx[tid + 16]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif + if (dists[tid] < dists[tid + 8]) { + dists[tid] = dists[tid + 8]; + dists_idx[tid] = dists_idx[tid + 8]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif + if (dists[tid] < dists[tid + 4]) { + dists[tid] = dists[tid + 4]; + dists_idx[tid] = dists_idx[tid + 4]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif + if (dists[tid] < dists[tid + 2]) { + dists[tid] = dists[tid + 2]; + dists_idx[tid] = dists_idx[tid + 2]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif + if (dists[tid] < dists[tid + 1]) { + dists[tid] = dists[tid + 1]; + dists_idx[tid] = dists_idx[tid + 1]; + } +#if !defined(USE_ROCM) + __syncwarp(); +#endif +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..47f8852be662f6233e9c2dac4b9cdb91d82a0b7c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/__init__.py @@ -0,0 +1,14 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .r2n2 import BlenderCamera, collate_batched_R2N2, R2N2, render_cubified_voxels +from .shapenet import ShapeNetCore +from .utils import collate_batched_meshes + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0b554d84b19b8dc58c49e438bc6a8950d27a9e42 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .r2n2 import R2N2 +from .utils import BlenderCamera, collate_batched_R2N2, render_cubified_voxels + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/r2n2.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/r2n2.py new file mode 100644 index 0000000000000000000000000000000000000000..c4e182f97890204be8d83d5d127e105da26c3499 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/r2n2.py @@ -0,0 +1,427 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import json +import warnings +from os import path +from pathlib import Path +from typing import Dict, List, Optional + +import numpy as np +import torch +from PIL import Image +from pytorch3d.common.datatypes import Device +from pytorch3d.datasets.shapenet_base import ShapeNetBase +from pytorch3d.renderer import HardPhongShader +from tabulate import tabulate + +from .utils import ( + align_bbox, + BlenderCamera, + compute_extrinsic_matrix, + read_binvox_coords, + voxelize, +) + + +SYNSET_DICT_DIR = Path(__file__).resolve().parent +MAX_CAMERA_DISTANCE = 1.75 # Constant from R2N2. +VOXEL_SIZE = 128 +# Intrinsic matrix extracted from Blender. Taken from meshrcnn codebase: +# https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py +BLENDER_INTRINSIC = torch.tensor( + [ + [2.1875, 0.0, 0.0, 0.0], + [0.0, 2.1875, 0.0, 0.0], + [0.0, 0.0, -1.002002, -0.2002002], + [0.0, 0.0, -1.0, 0.0], + ] +) + + +class R2N2(ShapeNetBase): # pragma: no cover + """ + This class loads the R2N2 dataset from a given directory into a Dataset object. + The R2N2 dataset contains 13 categories that are a subset of the ShapeNetCore v.1 + dataset. The R2N2 dataset also contains its own 24 renderings of each object and + voxelized models. Most of the models have all 24 views in the same split, but there + are eight of them that divide their views between train and test splits. + + """ + + def __init__( + self, + split: str, + shapenet_dir: str, + r2n2_dir: str, + splits_file: str, + return_all_views: bool = True, + return_voxels: bool = False, + views_rel_path: str = "ShapeNetRendering", + voxels_rel_path: str = "ShapeNetVoxels", + load_textures: bool = True, + texture_resolution: int = 4, + ) -> None: + """ + Store each object's synset id and models id the given directories. + + Args: + split (str): One of (train, val, test). + shapenet_dir (str): Path to ShapeNet core v1. + r2n2_dir (str): Path to the R2N2 dataset. + splits_file (str): File containing the train/val/test splits. + return_all_views (bool): Indicator of whether or not to load all the views in + the split. If set to False, one of the views in the split will be randomly + selected and loaded. + return_voxels(bool): Indicator of whether or not to return voxels as a tensor + of shape (D, D, D) where D is the number of voxels along each dimension. + views_rel_path: path to rendered views within the r2n2_dir. If not specified, + the renderings are assumed to be at os.path.join(rn2n_dir, "ShapeNetRendering"). + voxels_rel_path: path to rendered views within the r2n2_dir. If not specified, + the renderings are assumed to be at os.path.join(rn2n_dir, "ShapeNetVoxels"). + load_textures: Boolean indicating whether textures should loaded for the model. + Textures will be of type TexturesAtlas i.e. a texture map per face. + texture_resolution: Int specifying the resolution of the texture map per face + created using the textures in the obj file. A + (texture_resolution, texture_resolution, 3) map is created per face. + + """ + super().__init__() + self.shapenet_dir = shapenet_dir + self.r2n2_dir = r2n2_dir + self.views_rel_path = views_rel_path + self.voxels_rel_path = voxels_rel_path + self.load_textures = load_textures + self.texture_resolution = texture_resolution + # Examine if split is valid. + if split not in ["train", "val", "test"]: + raise ValueError("split has to be one of (train, val, test).") + # Synset dictionary mapping synset offsets in R2N2 to corresponding labels. + with open( + path.join(SYNSET_DICT_DIR, "r2n2_synset_dict.json"), "r" + ) as read_dict: + self.synset_dict = json.load(read_dict) + # Inverse dictionary mapping synset labels to corresponding offsets. + self.synset_inv = {label: offset for offset, label in self.synset_dict.items()} + + # Store synset and model ids of objects mentioned in the splits_file. + with open(splits_file) as splits: + split_dict = json.load(splits)[split] + + self.return_images = True + # Check if the folder containing R2N2 renderings is included in r2n2_dir. + if not path.isdir(path.join(r2n2_dir, views_rel_path)): + self.return_images = False + msg = ( + "%s not found in %s. R2N2 renderings will " + "be skipped when returning models." + ) % (views_rel_path, r2n2_dir) + warnings.warn(msg) + + self.return_voxels = return_voxels + # Check if the folder containing voxel coordinates is included in r2n2_dir. + if not path.isdir(path.join(r2n2_dir, voxels_rel_path)): + self.return_voxels = False + msg = ( + "%s not found in %s. Voxel coordinates will " + "be skipped when returning models." + ) % (voxels_rel_path, r2n2_dir) + warnings.warn(msg) + + synset_set = set() + # Store lists of views of each model in a list. + self.views_per_model_list = [] + # Store tuples of synset label and total number of views in each category in a list. + synset_num_instances = [] + for synset in split_dict.keys(): + # Examine if the given synset is present in the ShapeNetCore dataset + # and is also part of the standard R2N2 dataset. + if not ( + path.isdir(path.join(shapenet_dir, synset)) + and synset in self.synset_dict + ): + msg = ( + "Synset category %s from the splits file is either not " + "present in %s or not part of the standard R2N2 dataset." + ) % (synset, shapenet_dir) + warnings.warn(msg) + continue + + synset_set.add(synset) + self.synset_start_idxs[synset] = len(self.synset_ids) + # Start counting total number of views in the current category. + synset_view_count = 0 + for model in split_dict[synset]: + # Examine if the given model is present in the ShapeNetCore path. + shapenet_path = path.join(shapenet_dir, synset, model) + if not path.isdir(shapenet_path): + msg = "Model %s from category %s is not present in %s." % ( + model, + synset, + shapenet_dir, + ) + warnings.warn(msg) + continue + self.synset_ids.append(synset) + self.model_ids.append(model) + + model_views = split_dict[synset][model] + # Randomly select a view index if return_all_views set to False. + if not return_all_views: + rand_idx = torch.randint(len(model_views), (1,)) + model_views = [model_views[rand_idx]] + self.views_per_model_list.append(model_views) + synset_view_count += len(model_views) + synset_num_instances.append((self.synset_dict[synset], synset_view_count)) + model_count = len(self.synset_ids) - self.synset_start_idxs[synset] + self.synset_num_models[synset] = model_count + headers = ["category", "#instances"] + synset_num_instances.append(("total", sum(n for _, n in synset_num_instances))) + print( + tabulate(synset_num_instances, headers, numalign="left", stralign="center") + ) + + # Examine if all the synsets in the standard R2N2 mapping are present. + # Update self.synset_inv so that it only includes the loaded categories. + synset_not_present = [ + self.synset_inv.pop(self.synset_dict[synset]) + for synset in self.synset_dict + if synset not in synset_set + ] + if len(synset_not_present) > 0: + msg = ( + "The following categories are included in R2N2's" + "official mapping but not found in the dataset location %s: %s" + ) % (shapenet_dir, ", ".join(synset_not_present)) + warnings.warn(msg) + + def __getitem__(self, model_idx, view_idxs: Optional[List[int]] = None) -> Dict: + """ + Read a model by the given index. + + Args: + model_idx: The idx of the model to be retrieved in the dataset. + view_idx: List of indices of the view to be returned. Each index needs to be + contained in the loaded split (always between 0 and 23, inclusive). If + an invalid index is supplied, view_idx will be ignored and all the loaded + views will be returned. + + Returns: + dictionary with following keys: + - verts: FloatTensor of shape (V, 3). + - faces: faces.verts_idx, LongTensor of shape (F, 3). + - synset_id (str): synset id. + - model_id (str): model id. + - label (str): synset label. + - images: FloatTensor of shape (V, H, W, C), where V is number of views + returned. Returns a batch of the renderings of the models from the R2N2 dataset. + - R: Rotation matrix of shape (V, 3, 3), where V is number of views returned. + - T: Translation matrix of shape (V, 3), where V is number of views returned. + - K: Intrinsic matrix of shape (V, 4, 4), where V is number of views returned. + - voxels: Voxels of shape (D, D, D), where D is the number of voxels along each + dimension. + """ + if isinstance(model_idx, tuple): + model_idx, view_idxs = model_idx + if view_idxs is not None: + if isinstance(view_idxs, int): + view_idxs = [view_idxs] + if not isinstance(view_idxs, list) and not torch.is_tensor(view_idxs): + raise TypeError( + "view_idxs is of type %s but it needs to be a list." + % type(view_idxs) + ) + + model_views = self.views_per_model_list[model_idx] + if view_idxs is not None and any( + idx not in self.views_per_model_list[model_idx] for idx in view_idxs + ): + msg = """At least one of the indices in view_idxs is not available. + Specified view of the model needs to be contained in the + loaded split. If return_all_views is set to False, only one + random view is loaded. Try accessing the specified view(s) + after loading the dataset with self.return_all_views set to True. + Now returning all view(s) in the loaded dataset.""" + warnings.warn(msg) + elif view_idxs is not None: + model_views = view_idxs + + model = self._get_item_ids(model_idx) + model_path = path.join( + self.shapenet_dir, model["synset_id"], model["model_id"], "model.obj" + ) + + verts, faces, textures = self._load_mesh(model_path) + model["verts"] = verts + model["faces"] = faces + model["textures"] = textures + model["label"] = self.synset_dict[model["synset_id"]] + + model["images"] = None + images, Rs, Ts, voxel_RTs = [], [], [], [] + # Retrieve R2N2's renderings if required. + if self.return_images: + rendering_path = path.join( + self.r2n2_dir, + self.views_rel_path, + model["synset_id"], + model["model_id"], + "rendering", + ) + # Read metadata file to obtain params for calibration matrices. + with open(path.join(rendering_path, "rendering_metadata.txt"), "r") as f: + metadata_lines = f.readlines() + for i in model_views: + # Read image. + image_path = path.join(rendering_path, "%02d.png" % i) + raw_img = Image.open(image_path) + image = torch.from_numpy(np.array(raw_img) / 255.0)[..., :3] + images.append(image.to(dtype=torch.float32)) + + # Get camera calibration. + azim, elev, yaw, dist_ratio, fov = [ + float(v) for v in metadata_lines[i].strip().split(" ") + ] + dist = dist_ratio * MAX_CAMERA_DISTANCE + # Extrinsic matrix before transformation to PyTorch3D world space. + RT = compute_extrinsic_matrix(azim, elev, dist) + R, T = self._compute_camera_calibration(RT) + Rs.append(R) + Ts.append(T) + voxel_RTs.append(RT) + + # Intrinsic matrix extracted from the Blender with slight modification to work with + # PyTorch3D world space. Taken from meshrcnn codebase: + # https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py + K = torch.tensor( + [ + [2.1875, 0.0, 0.0, 0.0], + [0.0, 2.1875, 0.0, 0.0], + [0.0, 0.0, -1.002002, -0.2002002], + [0.0, 0.0, 1.0, 0.0], + ] + ) + model["images"] = torch.stack(images) + model["R"] = torch.stack(Rs) + model["T"] = torch.stack(Ts) + model["K"] = K.expand(len(model_views), 4, 4) + + voxels_list = [] + + # Read voxels if required. + voxel_path = path.join( + self.r2n2_dir, + self.voxels_rel_path, + model["synset_id"], + model["model_id"], + "model.binvox", + ) + if self.return_voxels: + if not path.isfile(voxel_path): + msg = "Voxel file not found for model %s from category %s." + raise FileNotFoundError(msg % (model["model_id"], model["synset_id"])) + + with open(voxel_path, "rb") as f: + # Read voxel coordinates as a tensor of shape (N, 3). + voxel_coords = read_binvox_coords(f) + # Align voxels to the same coordinate system as mesh verts. + voxel_coords = align_bbox(voxel_coords, model["verts"]) + for RT in voxel_RTs: + # Compute projection matrix. + P = BLENDER_INTRINSIC.mm(RT) + # Convert voxel coordinates of shape (N, 3) to voxels of shape (D, D, D). + voxels = voxelize(voxel_coords, P, VOXEL_SIZE) + voxels_list.append(voxels) + model["voxels"] = torch.stack(voxels_list) + + return model + + def _compute_camera_calibration(self, RT): + """ + Helper function for calculating rotation and translation matrices from ShapeNet + to camera transformation and ShapeNet to PyTorch3D transformation. + + Args: + RT: Extrinsic matrix that performs ShapeNet world view to camera view + transformation. + + Returns: + R: Rotation matrix of shape (3, 3). + T: Translation matrix of shape (3). + """ + # Transform the mesh vertices from shapenet world to pytorch3d world. + shapenet_to_pytorch3d = torch.tensor( + [ + [-1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, -1.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + ], + dtype=torch.float32, + ) + RT = torch.transpose(RT, 0, 1).mm(shapenet_to_pytorch3d) # (4, 4) + # Extract rotation and translation matrices from RT. + R = RT[:3, :3] + T = RT[3, :3] + return R, T + + def render( + self, + model_ids: Optional[List[str]] = None, + categories: Optional[List[str]] = None, + sample_nums: Optional[List[int]] = None, + idxs: Optional[List[int]] = None, + view_idxs: Optional[List[int]] = None, + shader_type=HardPhongShader, + device: Device = "cpu", + **kwargs, + ) -> torch.Tensor: + """ + Render models with BlenderCamera by default to achieve the same orientations as the + R2N2 renderings. Also accepts other types of cameras and any of the args that the + render function in the ShapeNetBase class accepts. + + Args: + view_idxs: each model will be rendered with the orientation(s) of the specified + views. Only render by view_idxs if no camera or args for BlenderCamera is + supplied. + Accepts any of the args of the render function in ShapeNetBase: + model_ids: List[str] of model_ids of models intended to be rendered. + categories: List[str] of categories intended to be rendered. categories + and sample_nums must be specified at the same time. categories can be given + in the form of synset offsets or labels, or a combination of both. + sample_nums: List[int] of number of models to be randomly sampled from + each category. Could also contain one single integer, in which case it + will be broadcasted for every category. + idxs: List[int] of indices of models to be rendered in the dataset. + shader_type: Shader to use for rendering. Examples include HardPhongShader + (default), SoftPhongShader etc or any other type of valid Shader class. + device: Device (as str or torch.device) on which the tensors should be located. + **kwargs: Accepts any of the kwargs that the renderer supports and any of the + args that BlenderCamera supports. + + Returns: + Batch of rendered images of shape (N, H, W, 3). + """ + idxs = self._handle_render_inputs(model_ids, categories, sample_nums, idxs) + r = torch.cat([self[idxs[i], view_idxs]["R"] for i in range(len(idxs))]) + t = torch.cat([self[idxs[i], view_idxs]["T"] for i in range(len(idxs))]) + k = torch.cat([self[idxs[i], view_idxs]["K"] for i in range(len(idxs))]) + # Initialize default camera using R, T, K from kwargs or R, T, K of the specified views. + blend_cameras = BlenderCamera( + R=kwargs.get("R", r), + T=kwargs.get("T", t), + K=kwargs.get("K", k), + device=device, + ) + cameras = kwargs.get("cameras", blend_cameras).to(device) + kwargs.pop("cameras", None) + # pass down all the same inputs + return super().render( + idxs=idxs, shader_type=shader_type, device=device, cameras=cameras, **kwargs + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/r2n2_synset_dict.json b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/r2n2_synset_dict.json new file mode 100644 index 0000000000000000000000000000000000000000..b8cbae58173e58ea0607e95161e65944979aff23 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/r2n2_synset_dict.json @@ -0,0 +1,15 @@ +{ + "04256520": "sofa", + "02933112": "cabinet", + "02828884": "bench", + "03001627": "chair", + "03211117": "display", + "04090263": "rifle", + "03691459": "loudspeaker", + "03636649": "lamp", + "04401088": "telephone", + "02691156": "airplane", + "04379243": "table", + "02958343": "car", + "04530566": "watercraft" +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0bb52ae15e4c687396e12ce1738fff921efbec65 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/r2n2/utils.py @@ -0,0 +1,504 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import math +from typing import Dict, List + +import numpy as np +import torch +from pytorch3d.common.datatypes import Device +from pytorch3d.datasets.utils import collate_batched_meshes +from pytorch3d.ops import cubify +from pytorch3d.renderer import ( + HardPhongShader, + MeshRasterizer, + MeshRenderer, + PointLights, + RasterizationSettings, + TexturesVertex, +) +from pytorch3d.renderer.cameras import CamerasBase +from pytorch3d.transforms import Transform3d + + +# Empirical min and max over the dataset from meshrcnn. +# https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py#L9 +SHAPENET_MIN_ZMIN = 0.67 +SHAPENET_MAX_ZMAX = 0.92 +# Threshold for cubify from meshrcnn: +# https://github.com/facebookresearch/meshrcnn/blob/main/configs/shapenet/voxmesh_R50.yaml#L11 +CUBIFY_THRESH = 0.2 + +# Default values of rotation, translation and intrinsic matrices for BlenderCamera. +r = np.expand_dims(np.eye(3), axis=0) # (1, 3, 3) +t = np.expand_dims(np.zeros(3), axis=0) # (1, 3) +k = np.expand_dims(np.eye(4), axis=0) # (1, 4, 4) + + +def collate_batched_R2N2(batch: List[Dict]): # pragma: no cover + """ + Take a list of objects in the form of dictionaries and merge them + into a single dictionary. This function can be used with a Dataset + object to create a torch.utils.data.Dataloader which directly + returns Meshes objects. + TODO: Add support for textures. + + Args: + batch: List of dictionaries containing information about objects + in the dataset. + + Returns: + collated_dict: Dictionary of collated lists. If batch contains both + verts and faces, a collated mesh batch is also returned. + """ + collated_dict = collate_batched_meshes(batch) + + # If collate_batched_meshes receives R2N2 items with images and that + # all models have the same number of views V, stack the batches of + # views of each model into a new batch of shape (N, V, H, W, 3). + # Otherwise leave it as a list. + if "images" in collated_dict: + try: + collated_dict["images"] = torch.stack(collated_dict["images"]) + except RuntimeError: + print( + "Models don't have the same number of views. Now returning " + "lists of images instead of batches." + ) + + # If collate_batched_meshes receives R2N2 items with camera calibration + # matrices and that all models have the same number of views V, stack each + # type of matrices into a new batch of shape (N, V, ...). + # Otherwise leave them as lists. + if all(x in collated_dict for x in ["R", "T", "K"]): + try: + collated_dict["R"] = torch.stack(collated_dict["R"]) # (N, V, 3, 3) + collated_dict["T"] = torch.stack(collated_dict["T"]) # (N, V, 3) + collated_dict["K"] = torch.stack(collated_dict["K"]) # (N, V, 4, 4) + except RuntimeError: + print( + "Models don't have the same number of views. Now returning " + "lists of calibration matrices instead of a batched tensor." + ) + + # If collate_batched_meshes receives voxels and all models have the same + # number of views V, stack the batches of voxels into a new batch of shape + # (N, V, S, S, S), where S is the voxel size. + if "voxels" in collated_dict: + try: + collated_dict["voxels"] = torch.stack(collated_dict["voxels"]) + except RuntimeError: + print( + "Models don't have the same number of views. Now returning " + "lists of voxels instead of a batched tensor." + ) + return collated_dict + + +def compute_extrinsic_matrix( + azimuth: float, elevation: float, distance: float +): # pragma: no cover + """ + Copied from meshrcnn codebase: + https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py#L96 + + Compute 4x4 extrinsic matrix that converts from homogeneous world coordinates + to homogeneous camera coordinates. We assume that the camera is looking at the + origin. + Used in R2N2 Dataset when computing calibration matrices. + + Args: + azimuth: Rotation about the z-axis, in degrees. + elevation: Rotation above the xy-plane, in degrees. + distance: Distance from the origin. + + Returns: + FloatTensor of shape (4, 4). + """ + azimuth, elevation, distance = float(azimuth), float(elevation), float(distance) + + az_rad = -math.pi * azimuth / 180.0 + el_rad = -math.pi * elevation / 180.0 + sa = math.sin(az_rad) + ca = math.cos(az_rad) + se = math.sin(el_rad) + ce = math.cos(el_rad) + R_world2obj = torch.tensor( + [[ca * ce, sa * ce, -se], [-sa, ca, 0], [ca * se, sa * se, ce]] + ) + R_obj2cam = torch.tensor([[0.0, 1.0, 0.0], [0.0, 0.0, 1.0], [1.0, 0.0, 0.0]]) + R_world2cam = R_obj2cam.mm(R_world2obj) + cam_location = torch.tensor([[distance, 0, 0]]).t() + T_world2cam = -(R_obj2cam.mm(cam_location)) + RT = torch.cat([R_world2cam, T_world2cam], dim=1) + RT = torch.cat([RT, torch.tensor([[0.0, 0, 0, 1]])]) + + # Georgia: For some reason I cannot fathom, when Blender loads a .obj file it + # rotates the model 90 degrees about the x axis. To compensate for this quirk we + # roll that rotation into the extrinsic matrix here + rot = torch.tensor([[1, 0, 0, 0], [0, 0, -1, 0], [0, 1, 0, 0], [0, 0, 0, 1]]) + RT = RT.mm(rot.to(RT)) + + return RT + + +def read_binvox_coords( + f, + integer_division: bool = True, + dtype: torch.dtype = torch.float32, +): # pragma: no cover + """ + Copied from meshrcnn codebase: + https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/binvox_torch.py#L5 + + Read a binvox file and return the indices of all nonzero voxels. + + This matches the behavior of binvox_rw.read_as_coord_array + (https://github.com/dimatura/binvox-rw-py/blob/public/binvox_rw.py#L153) + but this implementation uses torch rather than numpy, and is more efficient + due to improved vectorization. + + Georgia: I think that binvox_rw.read_as_coord_array actually has a bug; when converting + linear indices into three-dimensional indices, they use floating-point + division instead of integer division. We can reproduce their incorrect + implementation by passing integer_division=False. + + Args: + f (str): A file pointer to the binvox file to read + integer_division (bool): If False, then match the buggy implementation from binvox_rw + dtype: Datatype of the output tensor. Use float64 to match binvox_rw + + Returns: + coords (tensor): A tensor of shape (N, 3) where N is the number of nonzero voxels, + and coords[i] = (x, y, z) gives the index of the ith nonzero voxel. If the + voxel grid has shape (V, V, V) then we have 0 <= x, y, z < V. + """ + size, translation, scale = _read_binvox_header(f) + storage = torch.ByteStorage.from_buffer(f.read()) + data = torch.tensor([], dtype=torch.uint8) + # pyre-fixme[28]: Unexpected keyword argument `source`. + data.set_(source=storage) + vals, counts = data[::2], data[1::2] + idxs = _compute_idxs(vals, counts) + if not integer_division: + idxs = idxs.to(dtype) + x_idxs = idxs // (size * size) + zy_idxs = idxs % (size * size) + z_idxs = zy_idxs // size + y_idxs = zy_idxs % size + coords = torch.stack([x_idxs, y_idxs, z_idxs], dim=1) + return coords.to(dtype) + + +def _compute_idxs(vals, counts): # pragma: no cover + """ + Copied from meshrcnn codebase: + https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/binvox_torch.py#L58 + + Fast vectorized version of index computation. + + Args: + vals: tensor of binary values indicating voxel presence in a dense format. + counts: tensor of number of occurrence of each value in vals. + + Returns: + idxs: A tensor of shape (N), where N is the number of nonzero voxels. + """ + # Consider an example where: + # vals = [0, 1, 0, 1, 1] + # counts = [2, 3, 3, 2, 1] + # + # These values of counts and vals mean that the dense binary grid is: + # [0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1] + # + # So the nonzero indices we want to return are: + # [2, 3, 4, 8, 9, 10] + + # After the cumsum we will have: + # end_idxs = [2, 5, 8, 10, 11] + end_idxs = counts.cumsum(dim=0) + + # After masking and computing start_idx we have: + # end_idxs = [5, 10, 11] + # counts = [3, 2, 1] + # start_idxs = [2, 8, 10] + mask = vals == 1 + end_idxs = end_idxs[mask] + counts = counts[mask].to(end_idxs) + start_idxs = end_idxs - counts + + # We initialize delta as: + # [2, 1, 1, 1, 1, 1] + delta = torch.ones(counts.sum().item(), dtype=torch.int64) + delta[0] = start_idxs[0] + + # We compute pos = [3, 5], val = [3, 0]; then delta is + # [2, 1, 1, 4, 1, 1] + pos = counts.cumsum(dim=0)[:-1] + val = start_idxs[1:] - end_idxs[:-1] + delta[pos] += val + + # A final cumsum gives the idx we want: [2, 3, 4, 8, 9, 10] + idxs = delta.cumsum(dim=0) + return idxs + + +def _read_binvox_header(f): # pragma: no cover + """ + Copied from meshrcnn codebase: + https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/binvox_torch.py#L99 + + Read binvox header and extract information regarding voxel sizes and translations + to original voxel coordinates. + + Args: + f (str): A file pointer to the binvox file to read. + + Returns: + size (int): size of voxel. + translation (tuple(float)): translation to original voxel coordinates. + scale (float): scale to original voxel coordinates. + """ + # First line of the header should be "#binvox 1" + line = f.readline().strip() + if line != b"#binvox 1": + raise ValueError("Invalid header (line 1)") + + # Second line of the header should be "dim [int] [int] [int]" + # and all three int should be the same + line = f.readline().strip() + if not line.startswith(b"dim "): + raise ValueError("Invalid header (line 2)") + dims = line.split(b" ") + try: + dims = [int(d) for d in dims[1:]] + except ValueError: + raise ValueError("Invalid header (line 2)") from None + if len(dims) != 3 or dims[0] != dims[1] or dims[0] != dims[2]: + raise ValueError("Invalid header (line 2)") + size = dims[0] + + # Third line of the header should be "translate [float] [float] [float]" + line = f.readline().strip() + if not line.startswith(b"translate "): + raise ValueError("Invalid header (line 3)") + translation = line.split(b" ") + if len(translation) != 4: + raise ValueError("Invalid header (line 3)") + try: + translation = tuple(float(t) for t in translation[1:]) + except ValueError: + raise ValueError("Invalid header (line 3)") from None + + # Fourth line of the header should be "scale [float]" + line = f.readline().strip() + if not line.startswith(b"scale "): + raise ValueError("Invalid header (line 4)") + line = line.split(b" ") + if not len(line) == 2: + raise ValueError("Invalid header (line 4)") + scale = float(line[1]) + + # Fifth line of the header should be "data" + line = f.readline().strip() + if not line == b"data": + raise ValueError("Invalid header (line 5)") + + return size, translation, scale + + +def align_bbox(src, tgt): # pragma: no cover + """ + Copied from meshrcnn codebase: + https://github.com/facebookresearch/meshrcnn/blob/main/tools/preprocess_shapenet.py#L263 + + Return a copy of src points in the coordinate system of tgt by applying a + scale and shift along each coordinate axis to make the min / max values align. + + Args: + src, tgt: Torch Tensor of shape (N, 3) + + Returns: + out: Torch Tensor of shape (N, 3) + """ + if src.ndim != 2 or tgt.ndim != 2: + raise ValueError("Both src and tgt need to have dimensions of 2.") + if src.shape[-1] != 3 or tgt.shape[-1] != 3: + raise ValueError( + "Both src and tgt need to have sizes of 3 along the second dimension." + ) + src_min = src.min(dim=0)[0] + src_max = src.max(dim=0)[0] + tgt_min = tgt.min(dim=0)[0] + tgt_max = tgt.max(dim=0)[0] + scale = (tgt_max - tgt_min) / (src_max - src_min) + shift = tgt_min - scale * src_min + out = scale * src + shift + return out + + +def voxelize(voxel_coords, P, V): # pragma: no cover + """ + Copied from meshrcnn codebase: + https://github.com/facebookresearch/meshrcnn/blob/main/tools/preprocess_shapenet.py#L284 + but changing flip y to flip x. + + Creating voxels of shape (D, D, D) from voxel_coords and projection matrix. + + Args: + voxel_coords: FloatTensor of shape (V, 3) giving voxel's coordinates aligned to + the vertices. + P: FloatTensor of shape (4, 4) giving the projection matrix. + V: Voxel size of the output. + + Returns: + voxels: Tensor of shape (D, D, D) giving the voxelized result. + """ + device = voxel_coords.device + voxel_coords = project_verts(voxel_coords, P) + + # Using the actual zmin and zmax of the model is bad because we need them + # to perform the inverse transform, which transform voxels back into world + # space for refinement or evaluation. Instead we use an empirical min and + # max over the dataset; that way it is consistent for all images. + zmin = SHAPENET_MIN_ZMIN + zmax = SHAPENET_MAX_ZMAX + + # Once we know zmin and zmax, we need to adjust the z coordinates so the + # range [zmin, zmax] instead runs from [-1, 1] + m = 2.0 / (zmax - zmin) + b = -2.0 * zmin / (zmax - zmin) - 1 + voxel_coords[:, 2].mul_(m).add_(b) + voxel_coords[:, 0].mul_(-1) # Flip x + + # Now voxels are in [-1, 1]^3; map to [0, V-1)^3 + voxel_coords = 0.5 * (V - 1) * (voxel_coords + 1.0) + voxel_coords = voxel_coords.round().to(torch.int64) + valid = (0 <= voxel_coords) * (voxel_coords < V) + valid = valid[:, 0] * valid[:, 1] * valid[:, 2] + x, y, z = voxel_coords.unbind(dim=1) + x, y, z = x[valid], y[valid], z[valid] + voxels = torch.zeros(V, V, V, dtype=torch.uint8, device=device) + voxels[z, y, x] = 1 + + return voxels + + +def project_verts(verts, P, eps: float = 1e-1): # pragma: no cover + """ + Copied from meshrcnn codebase: + https://github.com/facebookresearch/meshrcnn/blob/main/shapenet/utils/coords.py#L159 + + Project vertices using a 4x4 transformation matrix. + + Args: + verts: FloatTensor of shape (N, V, 3) giving a batch of vertex positions or of + shape (V, 3) giving a single set of vertex positions. + P: FloatTensor of shape (N, 4, 4) giving projection matrices or of shape (4, 4) + giving a single projection matrix. + + Returns: + verts_out: FloatTensor of shape (N, V, 3) giving vertex positions (x, y, z) + where verts_out[i] is the result of transforming verts[i] by P[i]. + """ + # Handle unbatched inputs + singleton = False + if verts.dim() == 2: + assert P.dim() == 2 + singleton = True + verts, P = verts[None], P[None] + + N, V = verts.shape[0], verts.shape[1] + dtype, device = verts.dtype, verts.device + + # Add an extra row of ones to the world-space coordinates of verts before + # multiplying by the projection matrix. We could avoid this allocation by + # instead multiplying by a 4x3 submatrix of the projection matrix, then + # adding the remaining 4x1 vector. Not sure whether there will be much + # performance difference between the two. + ones = torch.ones(N, V, 1, dtype=dtype, device=device) + verts_hom = torch.cat([verts, ones], dim=2) + verts_cam_hom = torch.bmm(verts_hom, P.transpose(1, 2)) + + # Avoid division by zero by clamping the absolute value + w = verts_cam_hom[:, :, 3:] + w_sign = w.sign() + w_sign[w == 0] = 1 + w = w_sign * w.abs().clamp(min=eps) + + verts_proj = verts_cam_hom[:, :, :3] / w + + if singleton: + return verts_proj[0] + return verts_proj + + +class BlenderCamera(CamerasBase): # pragma: no cover + """ + Camera for rendering objects with calibration matrices from the R2N2 dataset + (which uses Blender for rendering the views for each model). + """ + + def __init__(self, R=r, T=t, K=k, device: Device = "cpu") -> None: + """ + Args: + R: Rotation matrix of shape (N, 3, 3). + T: Translation matrix of shape (N, 3). + K: Intrinsic matrix of shape (N, 4, 4). + device: Device (as str or torch.device). + """ + # The initializer formats all inputs to torch tensors and broadcasts + # all the inputs to have the same batch dimension where necessary. + super().__init__(device=device, R=R, T=T, K=K) + + def get_projection_transform(self, **kwargs) -> Transform3d: + transform = Transform3d(device=self.device) + transform._matrix = self.K.transpose(1, 2).contiguous() + return transform + + def is_perspective(self): + return False + + def in_ndc(self): + return True + + +def render_cubified_voxels( + voxels: torch.Tensor, shader_type=HardPhongShader, device: Device = "cpu", **kwargs +): # pragma: no cover + """ + Use the Cubify operator to convert inputs voxels to a mesh and then render that mesh. + + Args: + voxels: FloatTensor of shape (N, D, D, D) where N is the batch size and + D is the number of voxels along each dimension. + shader_type: shader_type: shader_type: Shader to use for rendering. Examples + include HardPhongShader (default), SoftPhongShader etc or any other type + of valid Shader class. + device: Device (as str or torch.device) on which the tensors should be located. + **kwargs: Accepts any of the kwargs that the renderer supports. + Returns: + Batch of rendered images of shape (N, H, W, 3). + """ + cubified_voxels = cubify(voxels, CUBIFY_THRESH).to(device) + cubified_voxels.textures = TexturesVertex( + verts_features=torch.ones_like(cubified_voxels.verts_padded(), device=device) + ) + cameras = BlenderCamera(device=device) + renderer = MeshRenderer( + rasterizer=MeshRasterizer( + cameras=cameras, + raster_settings=kwargs.get("raster_settings", RasterizationSettings()), + ), + shader=shader_type( + device=device, + cameras=cameras, + lights=kwargs.get("lights", PointLights()).to(device), + ), + ) + return renderer(cubified_voxels) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1c2f532ea35ebc74f272bfafef57e6b009eba299 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .shapenet_core import ShapeNetCore + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/shapenet_core.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/shapenet_core.py new file mode 100644 index 0000000000000000000000000000000000000000..02291ed0d1d10fadead34c5bd10e1c47e0971ceb --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/shapenet_core.py @@ -0,0 +1,160 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import json +import os +import warnings +from os import path +from pathlib import Path +from typing import Dict + +from pytorch3d.datasets.shapenet_base import ShapeNetBase + + +SYNSET_DICT_DIR = Path(__file__).resolve().parent + + +class ShapeNetCore(ShapeNetBase): # pragma: no cover + """ + This class loads ShapeNetCore from a given directory into a Dataset object. + ShapeNetCore is a subset of the ShapeNet dataset and can be downloaded from + https://www.shapenet.org/. + """ + + def __init__( + self, + data_dir, + synsets=None, + version: int = 1, + load_textures: bool = True, + texture_resolution: int = 4, + ) -> None: + """ + Store each object's synset id and models id from data_dir. + + Args: + data_dir: Path to ShapeNetCore data. + synsets: List of synset categories to load from ShapeNetCore in the form of + synset offsets or labels. A combination of both is also accepted. + When no category is specified, all categories in data_dir are loaded. + version: (int) version of ShapeNetCore data in data_dir, 1 or 2. + Default is set to be 1. Version 1 has 57 categories and version 2 has 55 + categories. + Note: version 1 has two categories 02858304(boat) and 02992529(cellphone) + that are hyponyms of categories 04530566(watercraft) and 04401088(telephone) + respectively. You can combine the categories manually if needed. + Version 2 doesn't have 02858304(boat) or 02834778(bicycle) compared to + version 1. + load_textures: Boolean indicating whether textures should loaded for the model. + Textures will be of type TexturesAtlas i.e. a texture map per face. + texture_resolution: Int specifying the resolution of the texture map per face + created using the textures in the obj file. A + (texture_resolution, texture_resolution, 3) map is created per face. + """ + super().__init__() + self.shapenet_dir = data_dir + self.load_textures = load_textures + self.texture_resolution = texture_resolution + + if version not in [1, 2]: + raise ValueError("Version number must be either 1 or 2.") + self.model_dir = "model.obj" if version == 1 else "models/model_normalized.obj" + + # Synset dictionary mapping synset offsets to corresponding labels. + dict_file = "shapenet_synset_dict_v%d.json" % version + with open(path.join(SYNSET_DICT_DIR, dict_file), "r") as read_dict: + self.synset_dict = json.load(read_dict) + # Inverse dictionary mapping synset labels to corresponding offsets. + self.synset_inv = {label: offset for offset, label in self.synset_dict.items()} + + # If categories are specified, check if each category is in the form of either + # synset offset or synset label, and if the category exists in the given directory. + if synsets is not None: + # Set of categories to load in the form of synset offsets. + synset_set = set() + for synset in synsets: + if (synset in self.synset_dict.keys()) and ( + path.isdir(path.join(data_dir, synset)) + ): + synset_set.add(synset) + elif (synset in self.synset_inv.keys()) and ( + path.isdir(path.join(data_dir, self.synset_inv[synset])) + ): + synset_set.add(self.synset_inv[synset]) + else: + msg = ( + "Synset category %s either not part of ShapeNetCore dataset " + "or cannot be found in %s." + ) % (synset, data_dir) + warnings.warn(msg) + # If no category is given, load every category in the given directory. + # Ignore synset folders not included in the official mapping. + else: + synset_set = { + synset + for synset in os.listdir(data_dir) + if path.isdir(path.join(data_dir, synset)) + and synset in self.synset_dict + } + + # Check if there are any categories in the official mapping that are not loaded. + # Update self.synset_inv so that it only includes the loaded categories. + synset_not_present = set(self.synset_dict.keys()).difference(synset_set) + [self.synset_inv.pop(self.synset_dict[synset]) for synset in synset_not_present] + + if len(synset_not_present) > 0: + msg = ( + "The following categories are included in ShapeNetCore ver.%d's " + "official mapping but not found in the dataset location %s: %s" + "" + ) % (version, data_dir, ", ".join(synset_not_present)) + warnings.warn(msg) + + # Extract model_id of each object from directory names. + # Each grandchildren directory of data_dir contains an object, and the name + # of the directory is the object's model_id. + for synset in synset_set: + self.synset_start_idxs[synset] = len(self.synset_ids) + for model in os.listdir(path.join(data_dir, synset)): + if not path.exists(path.join(data_dir, synset, model, self.model_dir)): + msg = ( + "Object file not found in the model directory %s " + "under synset directory %s." + ) % (model, synset) + warnings.warn(msg) + continue + self.synset_ids.append(synset) + self.model_ids.append(model) + model_count = len(self.synset_ids) - self.synset_start_idxs[synset] + self.synset_num_models[synset] = model_count + + def __getitem__(self, idx: int) -> Dict: + """ + Read a model by the given index. + + Args: + idx: The idx of the model to be retrieved in the dataset. + + Returns: + dictionary with following keys: + - verts: FloatTensor of shape (V, 3). + - faces: LongTensor of shape (F, 3) which indexes into the verts tensor. + - synset_id (str): synset id + - model_id (str): model id + - label (str): synset label. + """ + model = self._get_item_ids(idx) + model_path = path.join( + self.shapenet_dir, model["synset_id"], model["model_id"], self.model_dir + ) + verts, faces, textures = self._load_mesh(model_path) + model["verts"] = verts + model["faces"] = faces + model["textures"] = textures + model["label"] = self.synset_dict[model["synset_id"]] + return model diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v1.json b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v1.json new file mode 100644 index 0000000000000000000000000000000000000000..b2fc62ae62107a81e078ec02432fb554ae8f1b41 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v1.json @@ -0,0 +1,59 @@ +{ + "04379243": "table", + "02958343": "car", + "03001627": "chair", + "02691156": "airplane", + "04256520": "sofa", + "04090263": "rifle", + "03636649": "lamp", + "04530566": "watercraft", + "02828884": "bench", + "03691459": "loudspeaker", + "02933112": "cabinet", + "03211117": "display", + "04401088": "telephone", + "02924116": "bus", + "02808440": "bathtub", + "03467517": "guitar", + "03325088": "faucet", + "03046257": "clock", + "03991062": "flowerpot", + "03593526": "jar", + "02876657": "bottle", + "02871439": "bookshelf", + "03642806": "laptop", + "03624134": "knife", + "04468005": "train", + "02747177": "trash bin", + "03790512": "motorbike", + "03948459": "pistol", + "03337140": "file cabinet", + "02818832": "bed", + "03928116": "piano", + "04330267": "stove", + "03797390": "mug", + "02880940": "bowl", + "04554684": "washer", + "04004475": "printer", + "03513137": "helmet", + "03761084": "microwaves", + "04225987": "skateboard", + "04460130": "tower", + "02942699": "camera", + "02801938": "basket", + "02946921": "can", + "03938244": "pillow", + "03710193": "mailbox", + "03207941": "dishwasher", + "04099429": "rocket", + "02773838": "bag", + "02843684": "birdhouse", + "03261776": "earphone", + "03759954": "microphone", + "04074963": "remote", + "03085013": "keyboard", + "02834778": "bicycle", + "02954340": "cap", + "02858304": "boat", + "02992529": "mobile phone" +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v2.json b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v2.json new file mode 100644 index 0000000000000000000000000000000000000000..f0107c93c3535e2454070be1dcb622ac66899c90 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet/shapenet_synset_dict_v2.json @@ -0,0 +1,57 @@ +{ + "02691156": "airplane", + "02747177": "trash bin", + "02773838": "bag", + "02801938": "basket", + "02808440": "bathtub", + "02818832": "bed", + "02828884": "bench", + "02843684": "birdhouse", + "02871439": "bookshelf", + "02876657": "bottle", + "02880940": "bowl", + "02924116": "bus", + "02933112": "cabinet", + "02942699": "camera", + "02946921": "can", + "02954340": "cap", + "02958343": "car", + "02992529": "cellphone", + "03001627": "chair", + "03046257": "clock", + "03085013": "keyboard", + "03207941": "dishwasher", + "03211117": "display", + "03261776": "earphone", + "03325088": "faucet", + "03337140": "file cabinet", + "03467517": "guitar", + "03513137": "helmet", + "03593526": "jar", + "03624134": "knife", + "03636649": "lamp", + "03642806": "laptop", + "03691459": "loudspeaker", + "03710193": "mailbox", + "03759954": "microphone", + "03761084": "microwaves", + "03790512": "motorbike", + "03797390": "mug", + "03928116": "piano", + "03938244": "pillow", + "03948459": "pistol", + "03991062": "flowerpot", + "04004475": "printer", + "04074963": "remote", + "04090263": "rifle", + "04099429": "rocket", + "04225987": "skateboard", + "04256520": "sofa", + "04330267": "stove", + "04379243": "table", + "04401088": "telephone", + "04460130": "tower", + "04468005": "train", + "04530566": "watercraft", + "04554684": "washer" +} diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet_base.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet_base.py new file mode 100644 index 0000000000000000000000000000000000000000..d90bea67956019adbb0ea29dd7524f19060f88f8 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/shapenet_base.py @@ -0,0 +1,291 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings +from typing import Dict, List, Optional, Tuple + +import torch +from pytorch3d.common.datatypes import Device +from pytorch3d.io import load_obj +from pytorch3d.renderer import ( + FoVPerspectiveCameras, + HardPhongShader, + MeshRasterizer, + MeshRenderer, + PointLights, + RasterizationSettings, + TexturesVertex, +) + +from .utils import collate_batched_meshes + + +class ShapeNetBase(torch.utils.data.Dataset): # pragma: no cover + """ + 'ShapeNetBase' implements a base Dataset for ShapeNet and R2N2 with helper methods. + It is not intended to be used on its own as a Dataset for a Dataloader. Both __init__ + and __getitem__ need to be implemented. + """ + + def __init__(self) -> None: + """ + Set up lists of synset_ids and model_ids. + """ + self.synset_ids = [] + self.model_ids = [] + self.synset_inv = {} + self.synset_start_idxs = {} + self.synset_num_models = {} + self.shapenet_dir = "" + self.model_dir = "model.obj" + self.load_textures = True + self.texture_resolution = 4 + + def __len__(self) -> int: + """ + Return number of total models in the loaded dataset. + """ + return len(self.model_ids) + + def __getitem__(self, idx) -> Dict: + """ + Read a model by the given index. Need to be implemented for every child class + of ShapeNetBase. + + Args: + idx: The idx of the model to be retrieved in the dataset. + + Returns: + dictionary containing information about the model. + """ + raise NotImplementedError( + "__getitem__ should be implemented in the child class of ShapeNetBase" + ) + + def _get_item_ids(self, idx) -> Dict: + """ + Read a model by the given index. + + Args: + idx: The idx of the model to be retrieved in the dataset. + + Returns: + dictionary with following keys: + - synset_id (str): synset id + - model_id (str): model id + """ + model = {} + model["synset_id"] = self.synset_ids[idx] + model["model_id"] = self.model_ids[idx] + return model + + def _load_mesh(self, model_path) -> Tuple: + verts, faces, aux = load_obj( + model_path, + create_texture_atlas=self.load_textures, + load_textures=self.load_textures, + texture_atlas_size=self.texture_resolution, + ) + if self.load_textures: + textures = aux.texture_atlas + # Some meshes don't have textures. In this case + # create a white texture map + if textures is None: + textures = verts.new_ones( + faces.verts_idx.shape[0], + self.texture_resolution, + self.texture_resolution, + 3, + ) + else: + textures = None + + return verts, faces.verts_idx, textures + + def render( + self, + model_ids: Optional[List[str]] = None, + categories: Optional[List[str]] = None, + sample_nums: Optional[List[int]] = None, + idxs: Optional[List[int]] = None, + shader_type=HardPhongShader, + device: Device = "cpu", + **kwargs, + ) -> torch.Tensor: + """ + If a list of model_ids are supplied, render all the objects by the given model_ids. + If no model_ids are supplied, but categories and sample_nums are specified, randomly + select a number of objects (number specified in sample_nums) in the given categories + and render these objects. If instead a list of idxs is specified, check if the idxs + are all valid and render models by the given idxs. Otherwise, randomly select a number + (first number in sample_nums, default is set to be 1) of models from the loaded dataset + and render these models. + + Args: + model_ids: List[str] of model_ids of models intended to be rendered. + categories: List[str] of categories intended to be rendered. categories + and sample_nums must be specified at the same time. categories can be given + in the form of synset offsets or labels, or a combination of both. + sample_nums: List[int] of number of models to be randomly sampled from + each category. Could also contain one single integer, in which case it + will be broadcasted for every category. + idxs: List[int] of indices of models to be rendered in the dataset. + shader_type: Select shading. Valid options include HardPhongShader (default), + SoftPhongShader, HardGouraudShader, SoftGouraudShader, HardFlatShader, + SoftSilhouetteShader. + device: Device (as str or torch.device) on which the tensors should be located. + **kwargs: Accepts any of the kwargs that the renderer supports. + + Returns: + Batch of rendered images of shape (N, H, W, 3). + """ + idxs = self._handle_render_inputs(model_ids, categories, sample_nums, idxs) + # Use the getitem method which loads mesh + texture + models = [self[idx] for idx in idxs] + meshes = collate_batched_meshes(models)["mesh"] + if meshes.textures is None: + meshes.textures = TexturesVertex( + verts_features=torch.ones_like(meshes.verts_padded(), device=device) + ) + + meshes = meshes.to(device) + cameras = kwargs.get("cameras", FoVPerspectiveCameras()).to(device) + if len(cameras) != 1 and len(cameras) % len(meshes) != 0: + raise ValueError("Mismatch between batch dims of cameras and meshes.") + if len(cameras) > 1: + # When rendering R2N2 models, if more than one views are provided, broadcast + # the meshes so that each mesh can be rendered for each of the views. + meshes = meshes.extend(len(cameras) // len(meshes)) + renderer = MeshRenderer( + rasterizer=MeshRasterizer( + cameras=cameras, + raster_settings=kwargs.get("raster_settings", RasterizationSettings()), + ), + shader=shader_type( + device=device, + cameras=cameras, + lights=kwargs.get("lights", PointLights()).to(device), + ), + ) + return renderer(meshes) + + def _handle_render_inputs( + self, + model_ids: Optional[List[str]] = None, + categories: Optional[List[str]] = None, + sample_nums: Optional[List[int]] = None, + idxs: Optional[List[int]] = None, + ) -> List[int]: + """ + Helper function for converting user provided model_ids, categories and sample_nums + to indices of models in the loaded dataset. If model idxs are provided, we check if + the idxs are valid. If no models are specified, the first model in the loaded dataset + is chosen. The function returns the file paths to the selected models. + + Args: + model_ids: List[str] of model_ids of models to be rendered. + categories: List[str] of categories to be rendered. + sample_nums: List[int] of number of models to be randomly sampled from + each category. + idxs: List[int] of indices of models to be rendered in the dataset. + + Returns: + List of paths of models to be rendered. + """ + # Get corresponding indices if model_ids are supplied. + if model_ids is not None and len(model_ids) > 0: + idxs = [] + for model_id in model_ids: + if model_id not in self.model_ids: + raise ValueError( + "model_id %s not found in the loaded dataset." % model_id + ) + idxs.append(self.model_ids.index(model_id)) + + # Sample random models if categories and sample_nums are supplied and get + # the corresponding indices. + elif categories is not None and len(categories) > 0: + sample_nums = [1] if sample_nums is None else sample_nums + if len(categories) != len(sample_nums) and len(sample_nums) != 1: + raise ValueError( + "categories and sample_nums needs to be of the same length or " + "sample_nums needs to be of length 1." + ) + + idxs_tensor = torch.empty(0, dtype=torch.int32) + for i in range(len(categories)): + category = self.synset_inv.get(categories[i], categories[i]) + if category not in self.synset_inv.values(): + raise ValueError( + "Category %s is not in the loaded dataset." % category + ) + # Broadcast if sample_nums has length of 1. + sample_num = sample_nums[i] if len(sample_nums) > 1 else sample_nums[0] + sampled_idxs = self._sample_idxs_from_category( + sample_num=sample_num, category=category + ) + # pyre-fixme[6]: For 1st param expected `Union[List[Tensor], + # typing.Tuple[Tensor, ...]]` but got `Tuple[Tensor, List[int]]`. + idxs_tensor = torch.cat((idxs_tensor, sampled_idxs)) + idxs = idxs_tensor.tolist() + # Check if the indices are valid if idxs are supplied. + elif idxs is not None and len(idxs) > 0: + if any(idx < 0 or idx >= len(self.model_ids) for idx in idxs): + raise IndexError( + "One or more idx values are out of bounds. Indices need to be" + "between 0 and %s." % (len(self.model_ids) - 1) + ) + # Check if sample_nums is specified, if so sample sample_nums[0] number + # of indices from the entire loaded dataset. Otherwise randomly select one + # index from the dataset. + else: + sample_nums = [1] if sample_nums is None else sample_nums + if len(sample_nums) > 1: + msg = ( + "More than one sample sizes specified, now sampling " + "%d models from the dataset." % sample_nums[0] + ) + warnings.warn(msg) + idxs = self._sample_idxs_from_category(sample_nums[0]) + return idxs + + def _sample_idxs_from_category( + self, sample_num: int = 1, category: Optional[str] = None + ) -> List[int]: + """ + Helper function for sampling a number of indices from the given category. + + Args: + sample_num: number of indices to be sampled from the given category. + category: category synset of the category to be sampled from. If not + specified, sample from all models in the loaded dataset. + """ + start = self.synset_start_idxs[category] if category is not None else 0 + range_len = ( + self.synset_num_models[category] if category is not None else self.__len__() + ) + replacement = sample_num > range_len + sampled_idxs = ( + torch.multinomial( + torch.ones((range_len), dtype=torch.float32), + sample_num, + replacement=replacement, + ) + + start + ) + if replacement: + msg = ( + "Sample size %d is larger than the number of objects in %s, " + "values sampled with replacement." + ) % ( + sample_num, + "category " + category if category is not None else "all categories", + ) + warnings.warn(msg) + # pyre-fixme[7]: Expected `List[int]` but got `Tensor`. + return sampled_idxs diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9974f1eae702b940361d6d63215c1d0230faf4b6 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/datasets/utils.py @@ -0,0 +1,49 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Dict, List + +from pytorch3d.renderer.mesh import TexturesAtlas +from pytorch3d.structures import Meshes + + +def collate_batched_meshes(batch: List[Dict]): # pragma: no cover + """ + Take a list of objects in the form of dictionaries and merge them + into a single dictionary. This function can be used with a Dataset + object to create a torch.utils.data.Dataloader which directly + returns Meshes objects. + TODO: Add support for textures. + + Args: + batch: List of dictionaries containing information about objects + in the dataset. + + Returns: + collated_dict: Dictionary of collated lists. If batch contains both + verts and faces, a collated mesh batch is also returned. + """ + if batch is None or len(batch) == 0: + return None + collated_dict = {} + for k in batch[0].keys(): + collated_dict[k] = [d[k] for d in batch] + + collated_dict["mesh"] = None + if {"verts", "faces"}.issubset(collated_dict.keys()): + textures = None + if "textures" in collated_dict: + textures = TexturesAtlas(atlas=collated_dict["textures"]) + + collated_dict["mesh"] = Meshes( + verts=collated_dict["verts"], + faces=collated_dict["faces"], + textures=textures, + ) + + return collated_dict diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/eval_demo.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/eval_demo.py new file mode 100644 index 0000000000000000000000000000000000000000..9b0e14d7bdda2ac5f1dceb9cf36c4d50a1d66770 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/eval_demo.py @@ -0,0 +1,183 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import dataclasses +import os +from enum import Enum +from typing import Any, cast, Dict, List, Optional, Tuple + +import lpips +import torch +from pytorch3d.implicitron.dataset.data_source import ImplicitronDataSource +from pytorch3d.implicitron.dataset.json_index_dataset import JsonIndexDataset +from pytorch3d.implicitron.dataset.json_index_dataset_map_provider import ( + CO3D_CATEGORIES, +) +from pytorch3d.implicitron.evaluation.evaluate_new_view_synthesis import ( + aggregate_nvs_results, + eval_batch, + pretty_print_nvs_metrics, + summarize_nvs_eval_results, +) +from pytorch3d.implicitron.models.model_dbir import ModelDBIR +from pytorch3d.implicitron.tools.utils import dataclass_to_cuda_ +from tqdm import tqdm + + +class Task(Enum): + SINGLE_SEQUENCE = "singlesequence" + MULTI_SEQUENCE = "multisequence" + + +def main() -> None: + """ + Evaluates new view synthesis metrics of a simple depth-based image rendering + (DBIR) model for multisequence/singlesequence tasks for several categories. + + The evaluation is conducted on the same data as in [1] and, hence, the results + are directly comparable to the numbers reported in [1]. + + References: + [1] J. Reizenstein, R. Shapovalov, P. Henzler, L. Sbordone, + P. Labatut, D. Novotny: + Common Objects in 3D: Large-Scale Learning + and Evaluation of Real-life 3D Category Reconstruction + """ + + task_results = {} + for task in (Task.SINGLE_SEQUENCE, Task.MULTI_SEQUENCE): + task_results[task] = [] + for category in CO3D_CATEGORIES[: (20 if task == Task.SINGLE_SEQUENCE else 10)]: + for single_sequence_id in ( + (0, 1) if task == Task.SINGLE_SEQUENCE else (None,) + ): + category_result = evaluate_dbir_for_category( + category, task=task, single_sequence_id=single_sequence_id + ) + print("") + print( + f"Results for task={task}; category={category};" + + ( + f" sequence={single_sequence_id}:" + if single_sequence_id is not None + else ":" + ) + ) + pretty_print_nvs_metrics(category_result) + print("") + + task_results[task].append(category_result) + _print_aggregate_results(task, task_results) + + for task in task_results: + _print_aggregate_results(task, task_results) + + +def evaluate_dbir_for_category( + category: str, + task: Task, + bg_color: Tuple[float, float, float] = (0.0, 0.0, 0.0), + single_sequence_id: Optional[int] = None, + num_workers: int = 16, +): + """ + Evaluates new view synthesis metrics of a simple depth-based image rendering + (DBIR) model for a given task, category, and sequence (in case task=='singlesequence'). + + Args: + category: Object category. + bg_color: Background color of the renders. + task: Evaluation task. Either singlesequence or multisequence. + single_sequence_id: The ID of the evaluiation sequence for the singlesequence task. + num_workers: The number of workers for the employed dataloaders. + path_manager: (optional) Used for interpreting paths. + + Returns: + category_result: A dictionary of quantitative metrics. + """ + + single_sequence_id = single_sequence_id if single_sequence_id is not None else -1 + + torch.manual_seed(42) + + dataset_map_provider_args = { + "category": category, + "dataset_root": os.environ["CO3D_DATASET_ROOT"], + "assert_single_seq": task == Task.SINGLE_SEQUENCE, + "task_str": task.value, + "test_on_train": False, + "test_restrict_sequence_id": single_sequence_id, + "dataset_JsonIndexDataset_args": {"load_point_clouds": True}, + } + data_source = ImplicitronDataSource( + dataset_map_provider_JsonIndexDatasetMapProvider_args=dataset_map_provider_args + ) + + datasets, dataloaders = data_source.get_datasets_and_dataloaders() + + test_dataset = datasets.test + test_dataloader = dataloaders.test + if test_dataset is None or test_dataloader is None: + raise ValueError("must have a test dataset.") + + image_size = cast(JsonIndexDataset, test_dataset).image_width + + if image_size is None: + raise ValueError("Image size should be set in the dataset") + + # init the simple DBIR model + model = ModelDBIR( + render_image_width=image_size, + render_image_height=image_size, + bg_color=bg_color, + max_points=int(1e5), + ) + model.cuda() + + # init the lpips model for eval + lpips_model = lpips.LPIPS(net="vgg") + lpips_model = lpips_model.cuda() + + per_batch_eval_results = [] + print("Evaluating DBIR model ...") + for frame_data in tqdm(test_dataloader): + frame_data = dataclass_to_cuda_(frame_data) + preds = model(**dataclasses.asdict(frame_data)) + per_batch_eval_results.append( + eval_batch( + frame_data, + preds["implicitron_render"], + bg_color=bg_color, + lpips_model=lpips_model, + ) + ) + + category_result_flat, category_result = summarize_nvs_eval_results( + per_batch_eval_results, + is_multisequence=task != Task.SINGLE_SEQUENCE, + ) + + return category_result["results"] + + +def _print_aggregate_results( + task: Task, task_results: Dict[Task, List[List[Dict[str, Any]]]] +) -> None: + """ + Prints the aggregate metrics for a given task. + """ + aggregate_task_result = aggregate_nvs_results(task_results[task]) + print("") + print(f"Aggregate results for task={task}:") + pretty_print_nvs_metrics(aggregate_task_result) + print("") + + +if __name__ == "__main__": + main() diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/evaluation/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/evaluation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/evaluation/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py new file mode 100644 index 0000000000000000000000000000000000000000..1d7ab689a68a7a8eca6409e05e021a9b5b4a287f --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/evaluation/evaluate_new_view_synthesis.py @@ -0,0 +1,597 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import copy +import warnings +from collections import OrderedDict +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional, Sequence, Tuple, TYPE_CHECKING, Union + +import numpy as np +import torch +import torch.nn.functional as F +from pytorch3d.implicitron.dataset.frame_data import FrameData +from pytorch3d.implicitron.dataset.utils import is_train_frame +from pytorch3d.implicitron.models.base_model import ImplicitronRender +from pytorch3d.implicitron.tools import vis_utils +from pytorch3d.implicitron.tools.image_utils import mask_background +from pytorch3d.implicitron.tools.metric_utils import calc_psnr, eval_depth, iou, rgb_l1 +from pytorch3d.implicitron.tools.point_cloud_utils import get_rgbd_point_cloud +from pytorch3d.implicitron.tools.vis_utils import make_depth_image +from pytorch3d.renderer.cameras import PerspectiveCameras +from pytorch3d.vis.plotly_vis import plot_scene +from tabulate import tabulate + +if TYPE_CHECKING: + from visdom import Visdom + + +EVAL_N_SRC_VIEWS = [1, 3, 5, 7, 9] + + +@dataclass +class _Visualizer: + image_render: torch.Tensor + image_rgb_masked: torch.Tensor + depth_render: torch.Tensor + depth_map: Optional[torch.Tensor] + depth_mask: Optional[torch.Tensor] + + visdom_env: str = "eval_debug" + + _viz: Optional["Visdom"] = field(init=False) + + def __post_init__(self): + self._viz = vis_utils.get_visdom_connection() + + def show_rgb( + self, loss_value: float, metric_name: str, loss_mask_now: torch.Tensor + ): + if self._viz is None: + return + self._viz.images( + torch.cat( + ( + self.image_render, + self.image_rgb_masked, + loss_mask_now.repeat(1, 3, 1, 1), + ), + dim=3, + ), + env=self.visdom_env, + win=metric_name, + opts={"title": f"{metric_name}_{loss_value:1.2f}"}, + ) + + def show_depth( + self, depth_loss: float, name_postfix: str, loss_mask_now: torch.Tensor + ): + if self._viz is None: + return + viz = self._viz + viz.images( + torch.cat( + (make_depth_image(self.depth_render, loss_mask_now),) + + ( + (make_depth_image(self.depth_map, loss_mask_now),) + if self.depth_map is not None + else () + ), + dim=3, + ), + env=self.visdom_env, + win="depth_abs" + name_postfix, + opts={"title": f"depth_abs_{name_postfix}_{depth_loss:1.2f}"}, + ) + viz.images( + loss_mask_now, + env=self.visdom_env, + win="depth_abs" + name_postfix + "_mask", + opts={"title": f"depth_abs_{name_postfix}_{depth_loss:1.2f}_mask"}, + ) + if self.depth_mask is not None: + viz.images( + self.depth_mask, + env=self.visdom_env, + win="depth_abs" + name_postfix + "_maskd", + opts={"title": f"depth_abs_{name_postfix}_{depth_loss:1.2f}_maskd"}, + ) + + # show the 3D plot + # pyre-fixme[9]: viewpoint_trivial has type `PerspectiveCameras`; used as + # `TensorProperties`. + viewpoint_trivial: PerspectiveCameras = PerspectiveCameras().to( + loss_mask_now.device + ) + _pcls = { + "pred_depth": get_rgbd_point_cloud( + viewpoint_trivial, + self.image_render, + self.depth_render, + # mask_crop, + torch.ones_like(self.depth_render), + # loss_mask_now, + ) + } + if self.depth_map is not None: + _pcls["gt_depth"] = get_rgbd_point_cloud( + viewpoint_trivial, + self.image_rgb_masked, + self.depth_map, + # mask_crop, + torch.ones_like(self.depth_map), + # loss_mask_now, + ) + + _pcls = {pn: p for pn, p in _pcls.items() if int(p.num_points_per_cloud()) > 0} + + plotlyplot = plot_scene( + {f"pcl{name_postfix}": _pcls}, # pyre-ignore + camera_scale=1.0, + pointcloud_max_points=10000, + pointcloud_marker_size=1, + ) + viz.plotlyplot( + plotlyplot, + env=self.visdom_env, + win=f"pcl{name_postfix}", + ) + + +def eval_batch( + frame_data: FrameData, + implicitron_render: ImplicitronRender, + bg_color: Union[torch.Tensor, Sequence, str, float] = "black", + mask_thr: float = 0.5, + lpips_model=None, + visualize: bool = False, + visualize_visdom_env: str = "eval_debug", + break_after_visualising: bool = True, +) -> Dict[str, Any]: + """ + Produce performance metrics for a single batch of new-view synthesis + predictions. + + Given a set of known views (for which frame_data.frame_type.endswith('known') + is True), a new-view synthesis method (NVS) is tasked to generate new views + of the scene from the viewpoint of the target views (for which + frame_data.frame_type.endswith('known') is False). The resulting + synthesized new views, stored in `implicitron_render`, are compared to the + target ground truth in `frame_data` in terms of geometry and appearance + resulting in a dictionary of metrics returned by the `eval_batch` function. + + Args: + frame_data: A FrameData object containing the input to the new view + synthesis method. + implicitron_render: The data describing the synthesized new views. + bg_color: The background color of the generated new views and the + ground truth. + lpips_model: A pre-trained model for evaluating the LPIPS metric. + visualize: If True, visualizes the results to Visdom. + + Returns: + results: A dictionary holding evaluation metrics. + + Throws: + ValueError if frame_data does not have frame_type, camera, or image_rgb + ValueError if the batch has a mix of training and test samples + ValueError if the batch frames are not [unseen, known, known, ...] + ValueError if one of the required fields in implicitron_render is missing + """ + frame_type = frame_data.frame_type + if frame_type is None: + raise ValueError("Frame type has not been set.") + + # we check that all those fields are not None but Pyre can't infer that properly + # TODO: assign to local variables and simplify the code. + if frame_data.image_rgb is None: + raise ValueError("Image is not in the evaluation batch.") + + if frame_data.camera is None: + raise ValueError("Camera is not in the evaluation batch.") + + # eval all results in the resolution of the frame_data image + image_resol = tuple(frame_data.image_rgb.shape[2:]) + + # Post-process the render: + # 1) check implicitron_render for Nones, + # 2) obtain copies to make sure we dont edit the original data, + # 3) take only the 1st (target) image + # 4) resize to match ground-truth resolution + cloned_render: Dict[str, torch.Tensor] = {} + for k in ["mask_render", "image_render", "depth_render"]: + field = getattr(implicitron_render, k) + if field is None: + raise ValueError(f"A required predicted field {k} is missing") + + imode = "bilinear" if k == "image_render" else "nearest" + cloned_render[k] = ( + F.interpolate(field[:1], size=image_resol, mode=imode).detach().clone() + ) + + frame_data = copy.deepcopy(frame_data) + + # mask the ground truth depth in case frame_data contains the depth mask + if frame_data.depth_map is not None and frame_data.depth_mask is not None: + frame_data.depth_map *= frame_data.depth_mask + + if not isinstance(frame_type, list): # not batch FrameData + frame_type = [frame_type] + + is_train = is_train_frame(frame_type) + if len(is_train) > 1 and (is_train[1] != is_train[1:]).any(): + raise ValueError( + "All (conditioning) frames in the eval batch have to be either train/test." + ) + + for k in [ + "depth_map", + "image_rgb", + "fg_probability", + "mask_crop", + ]: + if not hasattr(frame_data, k) or getattr(frame_data, k) is None: + continue + setattr(frame_data, k, getattr(frame_data, k)[:1]) + + if frame_data.depth_map is None or frame_data.depth_map.sum() <= 0: + warnings.warn("Empty or missing depth map in evaluation!") + + if frame_data.mask_crop is None: + warnings.warn("mask_crop is None, assuming the whole image is valid.") + + if frame_data.fg_probability is None: + warnings.warn("fg_probability is None, assuming the whole image is fg.") + + # threshold the masks to make ground truth binary masks + mask_fg = ( + frame_data.fg_probability >= mask_thr + if frame_data.fg_probability is not None + # pyre-ignore [16] + else torch.ones_like(frame_data.image_rgb[:, :1, ...]).bool() + ) + + mask_crop = ( + frame_data.mask_crop + if frame_data.mask_crop is not None + else torch.ones_like(mask_fg) + ) + + # unmasked g.t. image + image_rgb = frame_data.image_rgb + + # fg-masked g.t. image + image_rgb_masked = mask_background( + # pyre-fixme[6]: Expected `Tensor` for 1st param but got + # `Optional[torch.Tensor]`. + frame_data.image_rgb, + mask_fg, + bg_color=bg_color, + ) + + # clamp predicted images + image_render = cloned_render["image_render"].clamp(0.0, 1.0) + + if visualize: + visualizer = _Visualizer( + image_render=image_render, + image_rgb_masked=image_rgb_masked, + depth_render=cloned_render["depth_render"], + depth_map=frame_data.depth_map, + depth_mask=( + frame_data.depth_mask[:1] if frame_data.depth_mask is not None else None + ), + visdom_env=visualize_visdom_env, + ) + + results: Dict[str, Any] = {} + + results["iou"] = iou( + cloned_render["mask_render"], + mask_fg, + mask=mask_crop, + ) + + for loss_fg_mask, name_postfix in zip((mask_crop, mask_fg), ("_masked", "_fg")): + loss_mask_now = mask_crop * loss_fg_mask + + for rgb_metric_name, rgb_metric_fun in zip( + ("psnr", "rgb_l1"), (calc_psnr, rgb_l1) + ): + metric_name = rgb_metric_name + name_postfix + results[metric_name] = rgb_metric_fun( + image_render, + image_rgb_masked, + mask=loss_mask_now, + ) + + if visualize: + visualizer.show_rgb( + results[metric_name].item(), metric_name, loss_mask_now + ) + + if name_postfix == "_fg" and frame_data.depth_map is not None: + # only record depth metrics for the foreground + _, abs_ = eval_depth( + cloned_render["depth_render"], + # pyre-fixme[6]: For 2nd param expected `Tensor` but got + # `Optional[Tensor]`. + frame_data.depth_map, + get_best_scale=True, + mask=loss_mask_now, + crop=5, + ) + results["depth_abs" + name_postfix] = abs_.mean() + + if visualize: + visualizer.show_depth(abs_.mean().item(), name_postfix, loss_mask_now) + if break_after_visualising: + breakpoint() # noqa: B601 + + # add the rgb metrics between the render and the unmasked image + for rgb_metric_name, rgb_metric_fun in zip( + ("psnr_full_image", "rgb_l1_full_image"), (calc_psnr, rgb_l1) + ): + results[rgb_metric_name] = rgb_metric_fun( + image_render, + # pyre-fixme[6]: For 2nd argument expected `Tensor` but got + # `Optional[Tensor]`. + image_rgb, + mask=mask_crop, + ) + + if lpips_model is not None: + for gt_image_type in ("_full_image", "_masked"): + im1, im2 = [ + 2.0 * im.clamp(0.0, 1.0) - 1.0 # pyre-ignore[16] + for im in ( + image_rgb_masked if gt_image_type == "_masked" else image_rgb, + cloned_render["image_render"], + ) + ] + results["lpips" + gt_image_type] = lpips_model.forward(im1, im2).item() + + # convert all metrics to floats + results = {k: float(v) for k, v in results.items()} + + results["meta"] = { + # store the size of the batch (corresponds to n_src_views+1) + "batch_size": len(frame_type), + # store the type of the target frame + # pyre-fixme[16]: `None` has no attribute `__getitem__`. + "frame_type": str(frame_data.frame_type[0]), + } + + return results + + +def average_per_batch_results( + results_per_batch: List[Dict[str, Any]], + idx: Optional[torch.Tensor] = None, +) -> dict: + """ + Average a list of per-batch metrics `results_per_batch`. + Optionally, if `idx` is given, only a subset of the per-batch + metrics, indexed by `idx`, is averaged. + """ + result_keys = list(results_per_batch[0].keys()) + result_keys.remove("meta") + if idx is not None: + results_per_batch = [results_per_batch[i] for i in idx] + if len(results_per_batch) == 0: + return {k: float("NaN") for k in result_keys} + return { + k: float(np.array([r[k] for r in results_per_batch]).mean()) + for k in result_keys + } + + +def _reduce_camera_iou_overlap(ious: torch.Tensor, topk: int = 2) -> torch.Tensor: + """ + Calculate the final camera difficulty by computing the average of the + ious of the two most similar cameras. + + Returns: + single-element Tensor + """ + return ious.topk(k=min(topk, len(ious) - 1)).values.mean() + + +def _get_camera_difficulty_bin_edges(camera_difficulty_bin_breaks: Tuple[float, float]): + """ + Get the edges of camera difficulty bins. + """ + _eps = 1e-5 + lower, upper = camera_difficulty_bin_breaks + diff_bin_edges = torch.tensor([0.0 - _eps, lower, upper, 1.0 + _eps]).float() + diff_bin_names = ["hard", "medium", "easy"] + return diff_bin_edges, diff_bin_names + + +def summarize_nvs_eval_results( + per_batch_eval_results: List[Dict[str, Any]], + is_multisequence: bool, +) -> Tuple[Dict[str, Any], Dict[str, Any]]: + """ + Compile the per-batch evaluation results `per_batch_eval_results` into + a set of aggregate metrics. The produced metrics depend on is_multisequence. + + Args: + per_batch_eval_results: Metrics of each per-batch evaluation. + is_multisequence: Whether to evaluate as a multisequence task + camera_difficulty_bin_breaks: edge hard-medium and medium-easy + + + Returns: + nvs_results_flat: A flattened dict of all aggregate metrics. + aux_out: A dictionary holding a set of auxiliary results. + """ + n_batches = len(per_batch_eval_results) + eval_sets: List[Optional[str]] = [] + eval_sets = [None] + if is_multisequence: + eval_sets = ["train", "test"] + batch_sizes = torch.tensor( + [r["meta"]["batch_size"] for r in per_batch_eval_results] + ).long() + + is_train = is_train_frame([r["meta"]["frame_type"] for r in per_batch_eval_results]) + + # init the result database dict + results = [] + + # add per set averages + for SET in eval_sets: + if SET is None: + ok_set = torch.ones(n_batches, dtype=torch.bool) + set_name = "test" + else: + ok_set = is_train == int(SET == "train") + set_name = SET + + # average over all results + bin_results = average_per_batch_results( + per_batch_eval_results, idx=torch.where(ok_set)[0] + ) + results.append( + { + "subset": set_name, + "subsubset": "diff=all", + "metrics": bin_results, + } + ) + + if is_multisequence: + # split based on n_src_views + n_src_views = batch_sizes - 1 + for n_src in EVAL_N_SRC_VIEWS: + ok_src = ok_set & (n_src_views == n_src) + n_src_results = average_per_batch_results( + per_batch_eval_results, + idx=torch.where(ok_src)[0], + ) + results.append( + { + "subset": set_name, + "subsubset": f"n_src={int(n_src)}", + "metrics": n_src_results, + } + ) + + aux_out = {"results": results} + return flatten_nvs_results(results), aux_out + + +def _get_flat_nvs_metric_key(result, metric_name) -> str: + metric_key_postfix = f"|subset={result['subset']}|{result['subsubset']}" + metric_key = f"{metric_name}{metric_key_postfix}" + return metric_key + + +def flatten_nvs_results(results) -> Dict[str, Any]: + """ + Takes input `results` list of dicts of the form:: + + [ + { + 'subset':'train/test/...', + 'subsubset': 'src=1/src=2/...', + 'metrics': nvs_eval_metrics} + }, + ... + ] + + And converts to a flat dict as follows:: + + { + 'subset=train/test/...|subsubset=src=1/src=2/...': nvs_eval_metrics, + ... + } + """ + results_flat = {} + for result in results: + for metric_name, metric_val in result["metrics"].items(): + metric_key = _get_flat_nvs_metric_key(result, metric_name) + assert metric_key not in results_flat + results_flat[metric_key] = metric_val + return results_flat + + +def pretty_print_nvs_metrics(results) -> None: + subsets, subsubsets = [ + _ordered_set([r[k] for r in results]) for k in ("subset", "subsubset") + ] + metrics = _ordered_set([metric for r in results for metric in r["metrics"]]) + + for subset in subsets: + tab = {} + for metric in metrics: + tab[metric] = [] + header = ["metric"] + for subsubset in subsubsets: + metric_vals = [ + r["metrics"][metric] + for r in results + if r["subsubset"] == subsubset and r["subset"] == subset + ] + if len(metric_vals) > 0: + tab[metric].extend(metric_vals) + header.extend(subsubsets) + + if any(len(v) > 0 for v in tab.values()): + print(f"===== NVS results; subset={subset} =====") + print( + tabulate( + [[metric, *v] for metric, v in tab.items()], + # pyre-fixme[61]: `header` is undefined, or not always defined. + headers=header, + ) + ) + + +def _ordered_set(list_): + return list(OrderedDict((i, 0) for i in list_).keys()) + + +def aggregate_nvs_results(task_results): + """ + Aggregate nvs results. + For singlescene, this averages over all categories and scenes, + for multiscene, the average is over all per-category results. + """ + task_results_cat = [r_ for r in task_results for r_ in r] + subsets, subsubsets = [ + _ordered_set([r[k] for r in task_results_cat]) for k in ("subset", "subsubset") + ] + metrics = _ordered_set( + [metric for r in task_results_cat for metric in r["metrics"]] + ) + average_results = [] + for subset in subsets: + for subsubset in subsubsets: + metrics_lists = [ + r["metrics"] + for r in task_results_cat + if r["subsubset"] == subsubset and r["subset"] == subset + ] + avg_metrics = {} + for metric in metrics: + avg_metrics[metric] = float( + np.nanmean( + np.array([metric_list[metric] for metric_list in metrics_lists]) + ) + ) + average_results.append( + { + "subset": subset, + "subsubset": subsubset, + "metrics": avg_metrics, + } + ) + return average_results diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/evaluation/evaluator.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/evaluation/evaluator.py new file mode 100644 index 0000000000000000000000000000000000000000..84306997cc414d8316cd62125570ff182ae3cbb0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/evaluation/evaluator.py @@ -0,0 +1,160 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import copy +import json +import logging +import os +import warnings +from typing import Any, Dict, List, Optional, Tuple + +import torch + +import tqdm +from pytorch3d.implicitron.evaluation import evaluate_new_view_synthesis as evaluate +from pytorch3d.implicitron.models.base_model import EvaluationMode, ImplicitronModelBase +from pytorch3d.implicitron.tools.config import ( + registry, + ReplaceableBase, + run_auto_creation, +) +from torch.utils.data import DataLoader + +logger = logging.getLogger(__name__) + + +class EvaluatorBase(ReplaceableBase): + """ + Evaluate a trained model on given data. Returns a dict of loss/objective + names and their values. + """ + + is_multisequence: bool = False + + def run( + self, model: ImplicitronModelBase, dataloader: DataLoader, **kwargs + ) -> Dict[str, Any]: + """ + Evaluate the results of Implicitron training. + """ + raise NotImplementedError() + + +@registry.register +class ImplicitronEvaluator(EvaluatorBase): + """ + Evaluate the results of Implicitron training. + """ + + # UNUSED; preserved for compatibility purposes + camera_difficulty_bin_breaks: Tuple[float, ...] = 0.97, 0.98 + + def __post_init__(self): + run_auto_creation(self) + + # pyre-fixme[14]: `run` overrides method defined in `EvaluatorBase` inconsistently. + def run( + self, + model: ImplicitronModelBase, + dataloader: DataLoader, + device: torch.device, + dump_to_json: bool = False, + exp_dir: Optional[str] = None, + epoch: Optional[int] = None, + **kwargs, + ) -> Dict[str, Any]: + """ + Evaluate the results of Implicitron training. Optionally, dump results to + exp_dir/results_test.json. + + Args: + model: A (trained) model to evaluate. + dataloader: A test dataloader. + device: A torch device. + dump_to_json: If True, will dump the results to a json file. + exp_dir: Root expeirment directory. + epoch: Evaluation epoch (to be stored in the results dict). + + Returns: + A dictionary of results. + """ + try: + import lpips + + lpips_model = lpips.LPIPS(net="vgg") + lpips_model = lpips_model.to(device) + except ImportError: + warnings.warn( + "lpips library NOT FOUND. lpips losses will not be calculated" + ) + lpips_model = None + + model.eval() + + per_batch_eval_results = [] + logger.info("Evaluating model ...") + for frame_data in tqdm.tqdm(dataloader): + frame_data = frame_data.to(device) + + # mask out the unknown images so that the model does not see them + frame_data_for_eval = _get_eval_frame_data(frame_data) + + with torch.no_grad(): + preds = model( + **{ + **frame_data_for_eval, + "evaluation_mode": EvaluationMode.EVALUATION, + } + ) + implicitron_render = copy.deepcopy(preds["implicitron_render"]) + per_batch_eval_results.append( + evaluate.eval_batch( + frame_data, + implicitron_render, + bg_color="black", + lpips_model=lpips_model, + ) + ) + + _, category_result = evaluate.summarize_nvs_eval_results( + per_batch_eval_results, + self.is_multisequence, + ) + + results = category_result["results"] + evaluate.pretty_print_nvs_metrics(results) + if dump_to_json: + _dump_to_json(epoch, exp_dir, results) + + return category_result["results"] + + +def _dump_to_json( + epoch: Optional[int], exp_dir: Optional[str], results: List[Dict[str, Any]] +) -> None: + if epoch is not None: + for r in results: + r["eval_epoch"] = int(epoch) + logger.info("Evaluation results") + + if exp_dir is None: + raise ValueError("Cannot save results to json without a specified save path.") + with open(os.path.join(exp_dir, "results_test.json"), "w") as f: + json.dump(results, f) + + +def _get_eval_frame_data(frame_data: Any) -> Any: + """ + Masks the target image data to make sure we cannot use it at model evaluation + time. Assumes the first batch element is target, the rest are source. + """ + frame_data_for_eval = copy.deepcopy(frame_data) + for k in ("image_rgb", "depth_map", "fg_probability", "mask_crop"): + value = getattr(frame_data_for_eval, k) + value[0].zero_() + return frame_data_for_eval diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7dc294bfb87b1948f305f0291726535266035861 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +# Allows to register the models +# see: pytorch3d.implicitron.tools.config.registry:register +from pytorch3d.implicitron.models.generic_model import GenericModel +from pytorch3d.implicitron.models.overfit_model import OverfitModel diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/base_model.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/base_model.py new file mode 100644 index 0000000000000000000000000000000000000000..ff645fd709ed20f61e0b68064b47662acb0f839c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/base_model.py @@ -0,0 +1,93 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from dataclasses import dataclass, field +from typing import Any, Dict, List, Optional + +import torch + +from pytorch3d.implicitron.models.renderer.base import EvaluationMode +from pytorch3d.implicitron.tools.config import ReplaceableBase +from pytorch3d.renderer.cameras import CamerasBase + + +@dataclass +class ImplicitronRender: + """ + Holds the tensors that describe a result of rendering. + """ + + depth_render: Optional[torch.Tensor] = None + image_render: Optional[torch.Tensor] = None + mask_render: Optional[torch.Tensor] = None + camera_distance: Optional[torch.Tensor] = None + + def clone(self) -> "ImplicitronRender": + def safe_clone(t: Optional[torch.Tensor]) -> Optional[torch.Tensor]: + return t.detach().clone() if t is not None else None + + return ImplicitronRender( + depth_render=safe_clone(self.depth_render), + image_render=safe_clone(self.image_render), + mask_render=safe_clone(self.mask_render), + camera_distance=safe_clone(self.camera_distance), + ) + + +class ImplicitronModelBase(ReplaceableBase, torch.nn.Module): + """ + Replaceable abstract base for all image generation / rendering models. + `forward()` method produces a render with a depth map. Derives from Module + so we can rely on basic functionality provided to torch for model + optimization. + """ + + # The keys from `preds` (output of ImplicitronModelBase.forward) to be logged in + # the training loop. + log_vars: List[str] = field(default_factory=lambda: ["objective"]) + + def forward( + self, + *, # force keyword-only arguments + image_rgb: Optional[torch.Tensor], + camera: CamerasBase, + fg_probability: Optional[torch.Tensor], + mask_crop: Optional[torch.Tensor], + depth_map: Optional[torch.Tensor], + sequence_name: Optional[List[str]], + evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION, + **kwargs, + ) -> Dict[str, Any]: + """ + Args: + image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images; + the first `min(B, n_train_target_views)` images are considered targets and + are used to supervise the renders; the rest corresponding to the source + viewpoints from which features will be extracted. + camera: An instance of CamerasBase containing a batch of `B` cameras corresponding + to the viewpoints of target images, from which the rays will be sampled, + and source images, which will be used for intersecting with target rays. + fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch of + foreground masks. + mask_crop: A binary tensor of shape `(B, 1, H, W)` deonting valid + regions in the input images (i.e. regions that do not correspond + to, e.g., zero-padding). When the `RaySampler`'s sampling mode is set to + "mask_sample", rays will be sampled in the non zero regions. + depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps. + sequence_name: A list of `B` strings corresponding to the sequence names + from which images `image_rgb` were extracted. They are used to match + target frames with relevant source frames. + evaluation_mode: one of EvaluationMode.TRAINING or + EvaluationMode.EVALUATION which determines the settings used for + rendering. + + Returns: + preds: A dictionary containing all outputs of the forward pass. All models should + output an instance of `ImplicitronRender` in `preds["implicitron_render"]`. + """ + raise NotImplementedError() diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/feature_extractor/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/feature_extractor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c47703b66349df4c048e5040b931ff9b67a925b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/feature_extractor/__init__.py @@ -0,0 +1,9 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .feature_extractor import FeatureExtractorBase diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/feature_extractor/feature_extractor.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/feature_extractor/feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..fd71b1e20d718408be6d553d3d1d38c1347a990a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/feature_extractor/feature_extractor.py @@ -0,0 +1,43 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Any, Dict, Optional + +import torch +from pytorch3d.implicitron.tools.config import ReplaceableBase + + +class FeatureExtractorBase(ReplaceableBase, torch.nn.Module): + """ + Base class for an extractor of a set of features from images. + """ + + def get_feat_dims(self) -> int: + """ + Returns: + total number of feature dimensions of the output. + (i.e. sum_i(dim_i)) + """ + raise NotImplementedError + + def forward( + self, + imgs: Optional[torch.Tensor], + masks: Optional[torch.Tensor] = None, + **kwargs, + ) -> Dict[Any, torch.Tensor]: + """ + Args: + imgs: A batch of input images of shape `(B, 3, H, W)`. + masks: A batch of input masks of shape `(B, 3, H, W)`. + + Returns: + out_feats: A dict `{f_i: t_i}` keyed by predicted feature names `f_i` + and their corresponding tensors `t_i` of shape `(B, dim_i, H_i, W_i)`. + """ + raise NotImplementedError diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/feature_extractor/resnet_feature_extractor.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/feature_extractor/resnet_feature_extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..d90b82347565afcefd546332dd748effe76520dc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/feature_extractor/resnet_feature_extractor.py @@ -0,0 +1,225 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import logging +import math +from typing import Any, Dict, Optional, Tuple + +import torch +import torch.nn.functional as Fu +import torchvision +from pytorch3d.implicitron.tools.config import registry + +from . import FeatureExtractorBase + + +logger = logging.getLogger(__name__) + +MASK_FEATURE_NAME = "mask" +IMAGE_FEATURE_NAME = "image" + +_FEAT_DIMS = { + "resnet18": (64, 128, 256, 512), + "resnet34": (64, 128, 256, 512), + "resnet50": (256, 512, 1024, 2048), + "resnet101": (256, 512, 1024, 2048), + "resnet152": (256, 512, 1024, 2048), +} + +_RESNET_MEAN = [0.485, 0.456, 0.406] +_RESNET_STD = [0.229, 0.224, 0.225] + + +@registry.register +class ResNetFeatureExtractor(FeatureExtractorBase): + """ + Implements an image feature extractor. Depending on the settings allows + to extract: + - deep features: A CNN ResNet backbone from torchvision (with/without + pretrained weights) which extracts deep features. + - masks: Segmentation masks. + - images: Raw input RGB images. + + Settings: + name: name of the resnet backbone (from torchvision) + pretrained: If true, will load the pretrained weights + stages: List of stages from which to extract features. + Features from each stage are returned as key value + pairs in the forward function + normalize_image: If set will normalize the RGB values of + the image based on the Resnet mean/std + image_rescale: If not 1.0, this rescale factor will be + used to resize the image + first_max_pool: If set, a max pool layer is added after the first + convolutional layer + proj_dim: The number of output channels for the convolutional layers + l2_norm: If set, l2 normalization is applied to the extracted features + add_masks: If set, the masks will be saved in the output dictionary + add_images: If set, the images will be saved in the output dictionary + global_average_pool: If set, global average pooling step is performed + feature_rescale: If not 1.0, this rescale factor will be used to + rescale the output features + """ + + name: str = "resnet34" + pretrained: bool = True + stages: Tuple[int, ...] = (1, 2, 3, 4) + normalize_image: bool = True + image_rescale: float = 128 / 800.0 + first_max_pool: bool = True + proj_dim: int = 32 + l2_norm: bool = True + add_masks: bool = True + add_images: bool = True + global_average_pool: bool = False # this can simulate global/non-spacial features + feature_rescale: float = 1.0 + + def __post_init__(self): + if self.normalize_image: + # register buffers needed to normalize the image + for k, v in (("_resnet_mean", _RESNET_MEAN), ("_resnet_std", _RESNET_STD)): + self.register_buffer( + k, + torch.FloatTensor(v).view(1, 3, 1, 1), + persistent=False, + ) + + self._feat_dim = {} + + if len(self.stages) == 0: + # do not extract any resnet features + pass + else: + net = getattr(torchvision.models, self.name)(pretrained=self.pretrained) + if self.first_max_pool: + self.stem = torch.nn.Sequential( + net.conv1, net.bn1, net.relu, net.maxpool + ) + else: + self.stem = torch.nn.Sequential(net.conv1, net.bn1, net.relu) + self.max_stage = max(self.stages) + self.layers = torch.nn.ModuleList() + self.proj_layers = torch.nn.ModuleList() + for stage in range(self.max_stage): + stage_name = f"layer{stage + 1}" + feature_name = self._get_resnet_stage_feature_name(stage) + if (stage + 1) in self.stages: + if ( + self.proj_dim > 0 + and _FEAT_DIMS[self.name][stage] > self.proj_dim + ): + proj = torch.nn.Conv2d( + _FEAT_DIMS[self.name][stage], + self.proj_dim, + 1, + 1, + bias=True, + ) + self._feat_dim[feature_name] = self.proj_dim + else: + proj = torch.nn.Identity() + self._feat_dim[feature_name] = _FEAT_DIMS[self.name][stage] + else: + proj = torch.nn.Identity() + self.proj_layers.append(proj) + self.layers.append(getattr(net, stage_name)) + + if self.add_masks: + self._feat_dim[MASK_FEATURE_NAME] = 1 + + if self.add_images: + self._feat_dim[IMAGE_FEATURE_NAME] = 3 + + logger.info(f"Feat extractor total dim = {self.get_feat_dims()}") + self.stages = set(self.stages) # convert to set for faster "in" + + def _get_resnet_stage_feature_name(self, stage) -> str: + return f"res_layer_{stage + 1}" + + def _resnet_normalize_image(self, img: torch.Tensor) -> torch.Tensor: + # pyre-fixme[58]: `-` is not supported for operand types `Tensor` and + # `Union[Tensor, Module]`. + # pyre-fixme[58]: `/` is not supported for operand types `Tensor` and + # `Union[Tensor, Module]`. + return (img - self._resnet_mean) / self._resnet_std + + def get_feat_dims(self) -> int: + # pyre-fixme[29]: `Union[(self: TensorBase) -> Tensor, Tensor, Module]` is + # not a function. + return sum(self._feat_dim.values()) + + def forward( + self, + imgs: Optional[torch.Tensor], + masks: Optional[torch.Tensor] = None, + **kwargs, + ) -> Dict[Any, torch.Tensor]: + """ + Args: + imgs: A batch of input images of shape `(B, 3, H, W)`. + masks: A batch of input masks of shape `(B, 3, H, W)`. + + Returns: + out_feats: A dict `{f_i: t_i}` keyed by predicted feature names `f_i` + and their corresponding tensors `t_i` of shape `(B, dim_i, H_i, W_i)`. + """ + + out_feats = {} + + imgs_input = imgs + if self.image_rescale != 1.0 and imgs_input is not None: + imgs_resized = Fu.interpolate( + imgs_input, + scale_factor=self.image_rescale, + mode="bilinear", + ) + else: + imgs_resized = imgs_input + + if len(self.stages) > 0: + assert imgs_resized is not None + + if self.normalize_image: + imgs_normed = self._resnet_normalize_image(imgs_resized) + else: + imgs_normed = imgs_resized + # is not a function. + # pyre-fixme[29]: `Union[Tensor, Module]` is not a function. + feats = self.stem(imgs_normed) + # pyre-fixme[6]: For 1st argument expected `Iterable[_T1]` but got + # `Union[Tensor, Module]`. + # pyre-fixme[6]: For 2nd argument expected `Iterable[_T2]` but got + # `Union[Tensor, Module]`. + for stage, (layer, proj) in enumerate(zip(self.layers, self.proj_layers)): + feats = layer(feats) + # just a sanity check below + assert feats.shape[1] == _FEAT_DIMS[self.name][stage] + if (stage + 1) in self.stages: + f = proj(feats) + if self.global_average_pool: + f = f.mean(dims=(2, 3)) + if self.l2_norm: + normfac = 1.0 / math.sqrt(len(self.stages)) + f = Fu.normalize(f, dim=1) * normfac + feature_name = self._get_resnet_stage_feature_name(stage) + out_feats[feature_name] = f + + if self.add_masks: + assert masks is not None + out_feats[MASK_FEATURE_NAME] = masks + + if self.add_images: + assert imgs_resized is not None + out_feats[IMAGE_FEATURE_NAME] = imgs_resized + + if self.feature_rescale != 1.0: + out_feats = {k: self.feature_rescale * f for k, f in out_feats.items()} + + # pyre-fixme[7]: Incompatible return type, expected `Dict[typing.Any, Tensor]` + # but got `Dict[typing.Any, float]` + return out_feats diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/generic_model.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/generic_model.py new file mode 100644 index 0000000000000000000000000000000000000000..3edd06c1a0d0577cf20aaaedb1a72c0ccbbe46cc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/generic_model.py @@ -0,0 +1,784 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +# Note: The #noqa comments below are for unused imports of pluggable implementations +# which are part of implicitron. They ensure that the registry is prepopulated. + +import logging +from dataclasses import field +from typing import Any, Dict, List, Optional, Tuple, TYPE_CHECKING, Union + +import torch +from omegaconf import DictConfig + +from pytorch3d.implicitron.models.base_model import ( + ImplicitronModelBase, + ImplicitronRender, +) +from pytorch3d.implicitron.models.feature_extractor import FeatureExtractorBase +from pytorch3d.implicitron.models.global_encoder.global_encoder import GlobalEncoderBase +from pytorch3d.implicitron.models.implicit_function.base import ImplicitFunctionBase +from pytorch3d.implicitron.models.metrics import ( + RegularizationMetricsBase, + ViewMetricsBase, +) + +from pytorch3d.implicitron.models.renderer.base import ( + BaseRenderer, + EvaluationMode, + ImplicitFunctionWrapper, + ImplicitronRayBundle, + RendererOutput, + RenderSamplingMode, +) +from pytorch3d.implicitron.models.renderer.ray_sampler import RaySamplerBase + +from pytorch3d.implicitron.models.utils import ( + apply_chunked, + chunk_generator, + log_loss_weights, + preprocess_input, + weighted_sum_losses, +) +from pytorch3d.implicitron.models.view_pooler.view_pooler import ViewPooler +from pytorch3d.implicitron.tools import vis_utils +from pytorch3d.implicitron.tools.config import ( + expand_args_fields, + registry, + run_auto_creation, +) + +from pytorch3d.implicitron.tools.rasterize_mc import rasterize_sparse_ray_bundle +from pytorch3d.renderer import utils as rend_utils +from pytorch3d.renderer.cameras import CamerasBase + + +if TYPE_CHECKING: + from visdom import Visdom +logger = logging.getLogger(__name__) + + +@registry.register +class GenericModel(ImplicitronModelBase): + """ + GenericModel is a wrapper for the neural implicit + rendering and reconstruction pipeline which consists + of the following sequence of 7 steps (steps 2–4 are normally + skipped in overfitting scenario, since conditioning on source views + does not add much information; otherwise they should be present altogether): + + + (1) Ray Sampling + ------------------ + Rays are sampled from an image grid based on the target view(s). + │_____________ + │ │ + │ ▼ + │ (2) Feature Extraction (optional) + │ ----------------------- + │ A feature extractor (e.g. a convolutional + │ neural net) is used to extract image features + │ from the source view(s). + │ │ + │ ▼ + │ (3) View Sampling (optional) + │ ------------------ + │ Image features are sampled at the 2D projections + │ of a set of 3D points along each of the sampled + │ target rays from (1). + │ │ + │ ▼ + │ (4) Feature Aggregation (optional) + │ ------------------ + │ Aggregate features and masks sampled from + │ image view(s) in (3). + │ │ + │____________▼ + │ + ▼ + (5) Implicit Function Evaluation + ------------------ + Evaluate the implicit function(s) at the sampled ray points + (optionally pass in the aggregated image features from (4)). + (also optionally pass in a global encoding from global_encoder). + │ + ▼ + (6) Rendering + ------------------ + Render the image into the target cameras by raymarching along + the sampled rays and aggregating the colors and densities + output by the implicit function in (5). + │ + ▼ + (7) Loss Computation + ------------------ + Compute losses based on the predicted target image(s). + + + The `forward` function of GenericModel executes + this sequence of steps. Currently, steps 1, 3, 4, 5, 6 + can be customized by intializing a subclass of the appropriate + baseclass and adding the newly created module to the registry. + Please see https://github.com/facebookresearch/pytorch3d/blob/main/projects/implicitron_trainer/README.md#custom-plugins + for more details on how to create and register a custom component. + + In the config .yaml files for experiments, the parameters below are + contained in the + `model_factory_ImplicitronModelFactory_args.model_GenericModel_args` + node. As GenericModel derives from ReplaceableBase, the input arguments are + parsed by the run_auto_creation function to initialize the + necessary member modules. Please see implicitron_trainer/README.md + for more details on this process. + + Args: + mask_images: Whether or not to mask the RGB image background given the + foreground mask (the `fg_probability` argument of `GenericModel.forward`) + mask_depths: Whether or not to mask the depth image background given the + foreground mask (the `fg_probability` argument of `GenericModel.forward`) + render_image_width: Width of the output image to render + render_image_height: Height of the output image to render + mask_threshold: If greater than 0.0, the foreground mask is + thresholded by this value before being applied to the RGB/Depth images + output_rasterized_mc: If True, visualize the Monte-Carlo pixel renders by + splatting onto an image grid. Default: False. + bg_color: RGB values for setting the background color of input image + if mask_images=True. Defaults to (0.0, 0.0, 0.0). Each renderer has its own + way to determine the background color of its output, unrelated to this. + num_passes: The specified implicit_function is initialized num_passes + times and run sequentially. + chunk_size_grid: The total number of points which can be rendered + per chunk. This is used to compute the number of rays used + per chunk when the chunked version of the renderer is used (in order + to fit rendering on all rays in memory) + render_features_dimensions: The number of output features to render. + Defaults to 3, corresponding to RGB images. + n_train_target_views: The number of cameras to render into at training + time; first `n_train_target_views` in the batch are considered targets, + the rest are sources. + sampling_mode_training: The sampling method to use during training. Must be + a value from the RenderSamplingMode Enum. + sampling_mode_evaluation: Same as above but for evaluation. + global_encoder_class_type: The name of the class to use for global_encoder, + which must be available in the registry. Or `None` to disable global encoder. + global_encoder: An instance of `GlobalEncoder`. This is used to generate an encoding + of the image (referred to as the global_code) that can be used to model aspects of + the scene such as multiple objects or morphing objects. It is up to the implicit + function definition how to use it, but the most typical way is to broadcast and + concatenate to the other inputs for the implicit function. + raysampler_class_type: The name of the raysampler class which is available + in the global registry. + raysampler: An instance of RaySampler which is used to emit + rays from the target view(s). + renderer_class_type: The name of the renderer class which is available in the global + registry. + renderer: A renderer class which inherits from BaseRenderer. This is used to + generate the images from the target view(s). + image_feature_extractor_class_type: If a str, constructs and enables + the `image_feature_extractor` object of this type. Or None if not needed. + image_feature_extractor: A module for extrating features from an input image. + view_pooler_enabled: If `True`, constructs and enables the `view_pooler` object. + This means features are sampled from the source image(s) + at the projected 2d locations of the sampled 3d ray points from the target + view(s), i.e. this activates step (3) above. + view_pooler: An instance of ViewPooler which is used for sampling of + image-based features at the 2D projections of a set + of 3D points and aggregating the sampled features. + implicit_function_class_type: The type of implicit function to use which + is available in the global registry. + implicit_function: An instance of ImplicitFunctionBase. The actual implicit functions + are initialised to be in self._implicit_functions. + view_metrics: An instance of ViewMetricsBase used to compute loss terms which + are independent of the model's parameters. + view_metrics_class_type: The type of view metrics to use, must be available in + the global registry. + regularization_metrics: An instance of RegularizationMetricsBase used to compute + regularization terms which can depend on the model's parameters. + regularization_metrics_class_type: The type of regularization metrics to use, + must be available in the global registry. + loss_weights: A dictionary with a {loss_name: weight} mapping; see documentation + for `ViewMetrics` class for available loss functions. + log_vars: A list of variable names which should be logged. + The names should correspond to a subset of the keys of the + dict `preds` output by the `forward` function. + """ # noqa: B950 + + mask_images: bool = True + mask_depths: bool = True + render_image_width: int = 400 + render_image_height: int = 400 + mask_threshold: float = 0.5 + output_rasterized_mc: bool = False + bg_color: Tuple[float, float, float] = (0.0, 0.0, 0.0) + num_passes: int = 1 + chunk_size_grid: int = 4096 + render_features_dimensions: int = 3 + tqdm_trigger_threshold: int = 16 + + n_train_target_views: int = 1 + sampling_mode_training: str = "mask_sample" + sampling_mode_evaluation: str = "full_grid" + + # ---- global encoder settings + global_encoder_class_type: Optional[str] = None + # pyre-fixme[13]: Attribute `global_encoder` is never initialized. + global_encoder: Optional[GlobalEncoderBase] + + # ---- raysampler + raysampler_class_type: str = "AdaptiveRaySampler" + # pyre-fixme[13]: Attribute `raysampler` is never initialized. + raysampler: RaySamplerBase + + # ---- renderer configs + renderer_class_type: str = "MultiPassEmissionAbsorptionRenderer" + # pyre-fixme[13]: Attribute `renderer` is never initialized. + renderer: BaseRenderer + + # ---- image feature extractor settings + # (This is only created if view_pooler is enabled) + # pyre-fixme[13]: Attribute `image_feature_extractor` is never initialized. + image_feature_extractor: Optional[FeatureExtractorBase] + image_feature_extractor_class_type: Optional[str] = None + # ---- view pooler settings + view_pooler_enabled: bool = False + # pyre-fixme[13]: Attribute `view_pooler` is never initialized. + view_pooler: Optional[ViewPooler] + + # ---- implicit function settings + implicit_function_class_type: str = "NeuralRadianceFieldImplicitFunction" + # This is just a model, never constructed. + # The actual implicit functions live in self._implicit_functions + # pyre-fixme[13]: Attribute `implicit_function` is never initialized. + implicit_function: ImplicitFunctionBase + + # ----- metrics + # pyre-fixme[13]: Attribute `view_metrics` is never initialized. + view_metrics: ViewMetricsBase + view_metrics_class_type: str = "ViewMetrics" + + # pyre-fixme[13]: Attribute `regularization_metrics` is never initialized. + regularization_metrics: RegularizationMetricsBase + regularization_metrics_class_type: str = "RegularizationMetrics" + + # ---- loss weights + loss_weights: Dict[str, float] = field( + default_factory=lambda: { + "loss_rgb_mse": 1.0, + "loss_prev_stage_rgb_mse": 1.0, + "loss_mask_bce": 0.0, + "loss_prev_stage_mask_bce": 0.0, + } + ) + + # ---- variables to be logged (logger automatically ignores if not computed) + log_vars: List[str] = field( + default_factory=lambda: [ + "loss_rgb_psnr_fg", + "loss_rgb_psnr", + "loss_rgb_mse", + "loss_rgb_huber", + "loss_depth_abs", + "loss_depth_abs_fg", + "loss_mask_neg_iou", + "loss_mask_bce", + "loss_mask_beta_prior", + "loss_eikonal", + "loss_density_tv", + "loss_depth_neg_penalty", + "loss_autodecoder_norm", + # metrics that are only logged in 2+stage renderes + "loss_prev_stage_rgb_mse", + "loss_prev_stage_rgb_psnr_fg", + "loss_prev_stage_rgb_psnr", + "loss_prev_stage_mask_bce", + # basic metrics + "objective", + "epoch", + "sec/it", + ] + ) + + @classmethod + def pre_expand(cls) -> None: + # use try/finally to bypass cinder's lazy imports + try: + from pytorch3d.implicitron.models.feature_extractor.resnet_feature_extractor import ( # noqa: F401, B950 + ResNetFeatureExtractor, + ) + from pytorch3d.implicitron.models.implicit_function.idr_feature_field import ( # noqa: F401, B950 + IdrFeatureField, + ) + from pytorch3d.implicitron.models.implicit_function.neural_radiance_field import ( # noqa: F401, B950 + NeRFormerImplicitFunction, + ) + from pytorch3d.implicitron.models.implicit_function.scene_representation_networks import ( # noqa: F401, B950 + SRNHyperNetImplicitFunction, + ) + from pytorch3d.implicitron.models.implicit_function.voxel_grid_implicit_function import ( # noqa: F401, B950 + VoxelGridImplicitFunction, + ) + from pytorch3d.implicitron.models.renderer.lstm_renderer import ( # noqa: F401 + LSTMRenderer, + ) + from pytorch3d.implicitron.models.renderer.multipass_ea import ( # noqa + MultiPassEmissionAbsorptionRenderer, + ) + from pytorch3d.implicitron.models.renderer.sdf_renderer import ( # noqa: F401 + SignedDistanceFunctionRenderer, + ) + finally: + pass + + def __post_init__(self): + if self.view_pooler_enabled: + if self.image_feature_extractor_class_type is None: + raise ValueError( + "image_feature_extractor must be present for view pooling." + ) + run_auto_creation(self) + + self._implicit_functions = self._construct_implicit_functions() + + log_loss_weights(self.loss_weights, logger) + + def forward( + self, + *, # force keyword-only arguments + image_rgb: Optional[torch.Tensor], + camera: CamerasBase, + fg_probability: Optional[torch.Tensor] = None, + mask_crop: Optional[torch.Tensor] = None, + depth_map: Optional[torch.Tensor] = None, + sequence_name: Optional[List[str]] = None, + frame_timestamp: Optional[torch.Tensor] = None, + evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION, + **kwargs, + ) -> Dict[str, Any]: + """ + Args: + image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images; + the first `min(B, n_train_target_views)` images are considered targets and + are used to supervise the renders; the rest corresponding to the source + viewpoints from which features will be extracted. + camera: An instance of CamerasBase containing a batch of `B` cameras corresponding + to the viewpoints of target images, from which the rays will be sampled, + and source images, which will be used for intersecting with target rays. + fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch of + foreground masks. + mask_crop: A binary tensor of shape `(B, 1, H, W)` denoting valid + regions in the input images (i.e. regions that do not correspond + to, e.g., zero-padding). When the `RaySampler`'s sampling mode is set to + "mask_sample", rays will be sampled in the non zero regions. + depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps. + sequence_name: A list of `B` strings corresponding to the sequence names + from which images `image_rgb` were extracted. They are used to match + target frames with relevant source frames. + frame_timestamp: Optionally a tensor of shape `(B,)` containing a batch + of frame timestamps. + evaluation_mode: one of EvaluationMode.TRAINING or + EvaluationMode.EVALUATION which determines the settings used for + rendering. + + Returns: + preds: A dictionary containing all outputs of the forward pass including the + rendered images, depths, masks, losses and other metrics. + """ + image_rgb, fg_probability, depth_map = preprocess_input( + image_rgb, + fg_probability, + depth_map, + self.mask_images, + self.mask_depths, + self.mask_threshold, + self.bg_color, + ) + + # Obtain the batch size from the camera as this is the only required input. + batch_size = camera.R.shape[0] + + # Determine the number of target views, i.e. cameras we render into. + n_targets = ( + 1 + if evaluation_mode == EvaluationMode.EVALUATION + else ( + batch_size + if self.n_train_target_views <= 0 + else min(self.n_train_target_views, batch_size) + ) + ) + + # A helper function for selecting n_target first elements from the input + # where the latter can be None. + def safe_slice_targets( + tensor: Optional[Union[torch.Tensor, List[str]]], + ) -> Optional[Union[torch.Tensor, List[str]]]: + return None if tensor is None else tensor[:n_targets] + + # Select the target cameras. + target_cameras = camera[list(range(n_targets))] + + # Determine the used ray sampling mode. + sampling_mode = RenderSamplingMode( + self.sampling_mode_training + if evaluation_mode == EvaluationMode.TRAINING + else self.sampling_mode_evaluation + ) + + # (1) Sample rendering rays with the ray sampler. + # pyre-ignore[29] + ray_bundle: ImplicitronRayBundle = self.raysampler( + target_cameras, + evaluation_mode, + mask=( + mask_crop[:n_targets] + if mask_crop is not None + and sampling_mode == RenderSamplingMode.MASK_SAMPLE + else None + ), + ) + + # custom_args hold additional arguments to the implicit function. + custom_args = {} + + if self.image_feature_extractor is not None: + # (2) Extract features for the image + img_feats = self.image_feature_extractor(image_rgb, fg_probability) + else: + img_feats = None + + if self.view_pooler_enabled: + if sequence_name is None: + raise ValueError("sequence_name must be provided for view pooling") + assert img_feats is not None + + # (3-4) Sample features and masks at the ray points. + # Aggregate features from multiple views. + def curried_viewpooler(pts): + return self.view_pooler( + pts=pts, + seq_id_pts=sequence_name[:n_targets], + camera=camera, + seq_id_camera=sequence_name, + feats=img_feats, + masks=mask_crop, + ) + + custom_args["fun_viewpool"] = curried_viewpooler + + global_code = None + if self.global_encoder is not None: + global_code = self.global_encoder( # pyre-fixme[29] + sequence_name=safe_slice_targets(sequence_name), + frame_timestamp=safe_slice_targets(frame_timestamp), + ) + custom_args["global_code"] = global_code + + # pyre-fixme[29]: `Union[(self: Tensor) -> Any, Tensor, Module]` is not a + # function. + for func in self._implicit_functions: + func.bind_args(**custom_args) + + inputs_to_be_chunked = {} + if fg_probability is not None and self.renderer.requires_object_mask(): + sampled_fb_prob = rend_utils.ndc_grid_sample( + fg_probability[:n_targets], ray_bundle.xys, mode="nearest" + ) + inputs_to_be_chunked["object_mask"] = sampled_fb_prob > 0.5 + + # (5)-(6) Implicit function evaluation and Rendering + rendered = self._render( + ray_bundle=ray_bundle, + sampling_mode=sampling_mode, + evaluation_mode=evaluation_mode, + implicit_functions=self._implicit_functions, + inputs_to_be_chunked=inputs_to_be_chunked, + ) + + # Unbind the custom arguments to prevent pytorch from storing + # large buffers of intermediate results due to points in the + # bound arguments. + # pyre-fixme[29]: `Union[(self: Tensor) -> Any, Tensor, Module]` is not a + # function. + for func in self._implicit_functions: + func.unbind_args() + + # A dict to store losses as well as rendering results. + preds: Dict[str, Any] = {} + + preds.update( + self.view_metrics( + results=preds, + raymarched=rendered, + ray_bundle=ray_bundle, + image_rgb=safe_slice_targets(image_rgb), + depth_map=safe_slice_targets(depth_map), + fg_probability=safe_slice_targets(fg_probability), + mask_crop=safe_slice_targets(mask_crop), + ) + ) + + preds.update( + self.regularization_metrics( + results=preds, + model=self, + ) + ) + + if sampling_mode == RenderSamplingMode.MASK_SAMPLE: + if self.output_rasterized_mc: + # Visualize the monte-carlo pixel renders by splatting onto + # an image grid. + ( + preds["images_render"], + preds["depths_render"], + preds["masks_render"], + ) = rasterize_sparse_ray_bundle( + ray_bundle, + rendered.features, + (self.render_image_height, self.render_image_width), + rendered.depths, + masks=rendered.masks, + ) + elif sampling_mode == RenderSamplingMode.FULL_GRID: + preds["images_render"] = rendered.features.permute(0, 3, 1, 2) + preds["depths_render"] = rendered.depths.permute(0, 3, 1, 2) + preds["masks_render"] = rendered.masks.permute(0, 3, 1, 2) + + preds["implicitron_render"] = ImplicitronRender( + image_render=preds["images_render"], + depth_render=preds["depths_render"], + mask_render=preds["masks_render"], + ) + else: + raise AssertionError("Unreachable state") + + # (7) Compute losses + objective = self._get_objective(preds) + if objective is not None: + preds["objective"] = objective + + return preds + + def _get_objective(self, preds: Dict[str, torch.Tensor]) -> Optional[torch.Tensor]: + """ + A helper function to compute the overall loss as the dot product + of individual loss functions with the corresponding weights. + """ + return weighted_sum_losses(preds, self.loss_weights) + + def visualize( + self, + viz: Optional["Visdom"], + visdom_env_imgs: str, + preds: Dict[str, Any], + prefix: str, + ) -> None: + """ + Helper function to visualize the predictions generated + in the forward pass. + + Args: + viz: Visdom connection object + visdom_env_imgs: name of visdom environment for the images. + preds: predictions dict like returned by forward() + prefix: prepended to the names of images + """ + if viz is None or not viz.check_connection(): + logger.info("no visdom server! -> skipping batch vis") + return + + idx_image = 0 + title = f"{prefix}_im{idx_image}" + + vis_utils.visualize_basics(viz, preds, visdom_env_imgs, title=title) + + def _render( + self, + *, + ray_bundle: ImplicitronRayBundle, + inputs_to_be_chunked: Dict[str, torch.Tensor], + sampling_mode: RenderSamplingMode, + **kwargs, + ) -> RendererOutput: + """ + Args: + ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the + sampled rendering rays. + inputs_to_be_chunked: A collection of tensor of shape `(B, _, H, W)`. E.g. + SignedDistanceFunctionRenderer requires "object_mask", shape + (B, 1, H, W), the silhouette of the object in the image. When + chunking, they are passed to the renderer as shape + `(B, _, chunksize)`. + sampling_mode: The sampling method to use. Must be a value from the + RenderSamplingMode Enum. + + Returns: + An instance of RendererOutput + """ + if sampling_mode == RenderSamplingMode.FULL_GRID and self.chunk_size_grid > 0: + return apply_chunked( + self.renderer, + chunk_generator( + self.chunk_size_grid, + ray_bundle, + inputs_to_be_chunked, + self.tqdm_trigger_threshold, + **kwargs, + ), + lambda batch: torch.cat(batch, dim=1).reshape( + *ray_bundle.lengths.shape[:-1], -1 + ), + ) + else: + # pyre-fixme[29]: `BaseRenderer` is not a function. + return self.renderer( + ray_bundle=ray_bundle, + **inputs_to_be_chunked, + **kwargs, + ) + + def _get_viewpooled_feature_dim(self) -> int: + if self.view_pooler is None: + return 0 + assert self.image_feature_extractor is not None + return self.view_pooler.get_aggregated_feature_dim( + self.image_feature_extractor.get_feat_dims() + ) + + @classmethod + def raysampler_tweak_args(cls, type, args: DictConfig) -> None: + """ + We don't expose certain fields of the raysampler because we want to set + them from our own members. + """ + del args["sampling_mode_training"] + del args["sampling_mode_evaluation"] + del args["image_width"] + del args["image_height"] + + def create_raysampler(self): + extra_args = { + "sampling_mode_training": self.sampling_mode_training, + "sampling_mode_evaluation": self.sampling_mode_evaluation, + "image_width": self.render_image_width, + "image_height": self.render_image_height, + } + raysampler_args = getattr( + self, "raysampler_" + self.raysampler_class_type + "_args" + ) + self.raysampler = registry.get(RaySamplerBase, self.raysampler_class_type)( + **raysampler_args, **extra_args + ) + + @classmethod + def renderer_tweak_args(cls, type, args: DictConfig) -> None: + """ + We don't expose certain fields of the renderer because we want to set + them based on other inputs. + """ + args.pop("render_features_dimensions", None) + args.pop("object_bounding_sphere", None) + + def create_renderer(self): + extra_args = {} + + if self.renderer_class_type == "SignedDistanceFunctionRenderer": + extra_args["render_features_dimensions"] = self.render_features_dimensions + if not hasattr(self.raysampler, "scene_extent"): + raise ValueError( + "SignedDistanceFunctionRenderer requires" + + " a raysampler that defines the 'scene_extent' field" + + " (this field is supported by, e.g., the adaptive raysampler - " + + " self.raysampler_class_type='AdaptiveRaySampler')." + ) + extra_args["object_bounding_sphere"] = self.raysampler.scene_extent + + renderer_args = getattr(self, "renderer_" + self.renderer_class_type + "_args") + self.renderer = registry.get(BaseRenderer, self.renderer_class_type)( + **renderer_args, **extra_args + ) + + def create_implicit_function(self) -> None: + """ + No-op called by run_auto_creation so that self.implicit_function + does not get created. __post_init__ creates the implicit function(s) + in wrappers explicitly in self._implicit_functions. + """ + pass + + @classmethod + def implicit_function_tweak_args(cls, type, args: DictConfig) -> None: + """ + We don't expose certain implicit_function fields because we want to set + them based on other inputs. + """ + args.pop("feature_vector_size", None) + args.pop("encoding_dim", None) + args.pop("latent_dim", None) + args.pop("latent_dim_hypernet", None) + args.pop("color_dim", None) + + def _construct_implicit_functions(self): + """ + After run_auto_creation has been called, the arguments + for each of the possible implicit function methods are + available. `GenericModel` arguments are first validated + based on the custom requirements for each specific + implicit function method. Then the required implicit + function(s) are initialized. + """ + extra_args = {} + global_encoder_dim = ( + 0 if self.global_encoder is None else self.global_encoder.get_encoding_dim() + ) + viewpooled_feature_dim = self._get_viewpooled_feature_dim() + + if self.implicit_function_class_type in ( + "NeuralRadianceFieldImplicitFunction", + "NeRFormerImplicitFunction", + ): + extra_args["latent_dim"] = viewpooled_feature_dim + global_encoder_dim + extra_args["color_dim"] = self.render_features_dimensions + + if self.implicit_function_class_type == "IdrFeatureField": + extra_args["feature_vector_size"] = self.render_features_dimensions + extra_args["encoding_dim"] = global_encoder_dim + + if self.implicit_function_class_type == "SRNImplicitFunction": + extra_args["latent_dim"] = viewpooled_feature_dim + global_encoder_dim + + # srn_hypernet preprocessing + if self.implicit_function_class_type == "SRNHyperNetImplicitFunction": + extra_args["latent_dim"] = viewpooled_feature_dim + extra_args["latent_dim_hypernet"] = global_encoder_dim + + # check that for srn, srn_hypernet, idr we have self.num_passes=1 + implicit_function_type = registry.get( + ImplicitFunctionBase, self.implicit_function_class_type + ) + expand_args_fields(implicit_function_type) + if self.num_passes != 1 and not implicit_function_type.allows_multiple_passes(): + raise ValueError( + self.implicit_function_class_type + + f"requires num_passes=1 not {self.num_passes}" + ) + + if implicit_function_type.requires_pooling_without_aggregation(): + if self.view_pooler_enabled and self.view_pooler.has_aggregation(): + raise ValueError( + "The chosen implicit function requires view pooling without aggregation." + ) + config_name = f"implicit_function_{self.implicit_function_class_type}_args" + config = getattr(self, config_name, None) + if config is None: + raise ValueError(f"{config_name} not present") + implicit_functions_list = [ + ImplicitFunctionWrapper(implicit_function_type(**config, **extra_args)) + for _ in range(self.num_passes) + ] + return torch.nn.ModuleList(implicit_functions_list) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/global_encoder/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/global_encoder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/global_encoder/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/global_encoder/autodecoder.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/global_encoder/autodecoder.py new file mode 100644 index 0000000000000000000000000000000000000000..f4b48513ed9e2dcfab1cfc75f1900df2cd7d84f0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/global_encoder/autodecoder.py @@ -0,0 +1,166 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings +from collections import defaultdict +from typing import Dict, List, Optional, Union + +import torch +from pytorch3d.implicitron.tools.config import Configurable + + +class Autodecoder(Configurable, torch.nn.Module): + """ + Autodecoder which maps a list of integer or string keys to optimizable embeddings. + + Settings: + encoding_dim: Embedding dimension for the decoder. + n_instances: The maximum number of instances stored by the autodecoder. + init_scale: Scale factor for the initial autodecoder weights. + ignore_input: If `True`, optimizes a single code for any input. + """ + + encoding_dim: int = 0 + n_instances: int = 1 + init_scale: float = 1.0 + ignore_input: bool = False + + def __post_init__(self): + if self.n_instances <= 0: + raise ValueError(f"Invalid n_instances {self.n_instances}") + + self._autodecoder_codes = torch.nn.Embedding( + self.n_instances, + self.encoding_dim, + scale_grad_by_freq=True, + ) + with torch.no_grad(): + # weight has been initialised from Normal(0, 1) + self._autodecoder_codes.weight *= self.init_scale + + self._key_map = self._build_key_map() + # Make sure to register hooks for correct handling of saving/loading + # the module's _key_map. + self._register_load_state_dict_pre_hook(self._load_key_map_hook) + self._register_state_dict_hook(_save_key_map_hook) + + def _build_key_map( + self, key_map_dict: Optional[Dict[str, int]] = None + ) -> Dict[str, int]: + """ + Args: + key_map_dict: A dictionary used to initialize the key_map. + + Returns: + key_map: a dictionary of key: id pairs. + """ + # increments the counter when asked for a new value + key_map = defaultdict(iter(range(self.n_instances)).__next__) + if key_map_dict is not None: + # Assign all keys from the loaded key_map_dict to self._key_map. + # Since this is done in the original order, it should generate + # the same set of key:id pairs. We check this with an assert to be sure. + for x, x_id in key_map_dict.items(): + x_id_ = key_map[x] + assert x_id == x_id_ + return key_map + + def calculate_squared_encoding_norm(self) -> Optional[torch.Tensor]: + # pyre-fixme[16]: Item `Tensor` of `Tensor | Module` has no attribute `weight`. + return (self._autodecoder_codes.weight**2).mean() + + def get_encoding_dim(self) -> int: + return self.encoding_dim + + def forward(self, x: Union[torch.LongTensor, List[str]]) -> Optional[torch.Tensor]: + """ + Args: + x: A batch of `N` identifiers. Either a long tensor of size + `(N,)` keys in [0, n_instances), or a list of `N` string keys that + are hashed to codes (without collisions). + + Returns: + codes: A tensor of shape `(N, self.encoding_dim)` containing the + key-specific autodecoder codes. + """ + if self.ignore_input: + x = ["singleton"] + + if isinstance(x[0], str): + try: + # pyre-fixme[9]: x has type `Union[List[str], LongTensor]`; used as + # `Tensor`. + x = torch.tensor( + # pyre-fixme[29]: `Union[(self: TensorBase, indices: Union[None, ... + [self._key_map[elem] for elem in x], + dtype=torch.long, + device=next(self.parameters()).device, + ) + except StopIteration: + raise ValueError("Not enough n_instances in the autodecoder") from None + + # pyre-fixme[29]: `Union[Tensor, Module]` is not a function. + return self._autodecoder_codes(x) + + def _load_key_map_hook( + self, + state_dict, + prefix, + local_metadata, + strict, + missing_keys, + unexpected_keys, + error_msgs, + ): + """ + Args: + state_dict (dict): a dict containing parameters and + persistent buffers. + prefix (str): the prefix for parameters and buffers used in this + module + local_metadata (dict): a dict containing the metadata for this module. + strict (bool): whether to strictly enforce that the keys in + :attr:`state_dict` with :attr:`prefix` match the names of + parameters and buffers in this module + missing_keys (list of str): if ``strict=True``, add missing keys to + this list + unexpected_keys (list of str): if ``strict=True``, add unexpected + keys to this list + error_msgs (list of str): error messages should be added to this + list, and will be reported together in + :meth:`~torch.nn.Module.load_state_dict` + + Returns: + Constructed key_map if it exists in the state_dict + else raises a warning only. + """ + key_map_key = prefix + "_key_map" + if key_map_key in state_dict: + key_map_dict = state_dict.pop(key_map_key) + self._key_map = self._build_key_map(key_map_dict=key_map_dict) + else: + warnings.warn("No key map in Autodecoder state dict!") + + +def _save_key_map_hook( + self, + state_dict, + prefix, + local_metadata, +) -> None: + """ + Args: + state_dict (dict): a dict containing parameters and + persistent buffers. + prefix (str): the prefix for parameters and buffers used in this + module + local_metadata (dict): a dict containing the metadata for this module. + """ + key_map_key = prefix + "_key_map" + key_map_dict = dict(self._key_map.items()) + state_dict[key_map_key] = key_map_dict diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/global_encoder/global_encoder.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/global_encoder/global_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..c31dbfc1dce277752a8391aa06aff956b03d8f7b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/global_encoder/global_encoder.py @@ -0,0 +1,129 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import List, Optional, Union + +import torch +from pytorch3d.implicitron.tools.config import ( + registry, + ReplaceableBase, + run_auto_creation, +) +from pytorch3d.renderer.implicit import HarmonicEmbedding + +from .autodecoder import Autodecoder + + +class GlobalEncoderBase(ReplaceableBase): + """ + A base class for implementing encoders of global frame-specific quantities. + + The latter includes e.g. the harmonic encoding of a frame timestamp + (`HarmonicTimeEncoder`), or an autodecoder encoding of the frame's sequence + (`SequenceAutodecoder`). + """ + + def get_encoding_dim(self): + """ + Returns the dimensionality of the returned encoding. + """ + raise NotImplementedError() + + def calculate_squared_encoding_norm(self) -> Optional[torch.Tensor]: + """ + Calculates the squared norm of the encoding to report as the + `autodecoder_norm` loss of the model, as a zero dimensional tensor. + """ + raise NotImplementedError() + + def forward( + self, + *, + frame_timestamp: Optional[torch.Tensor] = None, + sequence_name: Optional[Union[torch.LongTensor, List[str]]] = None, + **kwargs, + ) -> torch.Tensor: + """ + Given a set of inputs to encode, generates a tensor containing the encoding. + + Returns: + encoding: The tensor containing the global encoding. + """ + raise NotImplementedError() + + +# TODO: probabilistic embeddings? +@registry.register +class SequenceAutodecoder(GlobalEncoderBase, torch.nn.Module): + """ + A global encoder implementation which provides an autodecoder encoding + of the frame's sequence identifier. + """ + + # pyre-fixme[13]: Attribute `autodecoder` is never initialized. + autodecoder: Autodecoder + + def __post_init__(self): + run_auto_creation(self) + + def get_encoding_dim(self): + return self.autodecoder.get_encoding_dim() + + def forward( + self, + *, + frame_timestamp: Optional[torch.Tensor] = None, + sequence_name: Optional[Union[torch.LongTensor, List[str]]] = None, + **kwargs, + ) -> torch.Tensor: + if sequence_name is None: + raise ValueError("sequence_name must be provided.") + # run dtype checks and pass sequence_name to self.autodecoder + return self.autodecoder(sequence_name) + + def calculate_squared_encoding_norm(self) -> Optional[torch.Tensor]: + return self.autodecoder.calculate_squared_encoding_norm() + + +@registry.register +class HarmonicTimeEncoder(GlobalEncoderBase, torch.nn.Module): + """ + A global encoder implementation which provides harmonic embeddings + of each frame's timestamp. + """ + + n_harmonic_functions: int = 10 + append_input: bool = True + time_divisor: float = 1.0 + + def __post_init__(self): + self._harmonic_embedding = HarmonicEmbedding( + n_harmonic_functions=self.n_harmonic_functions, + append_input=self.append_input, + ) + + def get_encoding_dim(self): + return self._harmonic_embedding.get_output_dim(1) + + def forward( + self, + *, + frame_timestamp: Optional[torch.Tensor] = None, + sequence_name: Optional[Union[torch.LongTensor, List[str]]] = None, + **kwargs, + ) -> torch.Tensor: + if frame_timestamp is None: + raise ValueError("frame_timestamp must be provided.") + if frame_timestamp.shape[-1] != 1: + raise ValueError("Frame timestamp's last dimensions should be one.") + time = frame_timestamp / self.time_divisor + # pyre-fixme[29]: `Union[Tensor, Module]` is not a function. + return self._harmonic_embedding(time) + + def calculate_squared_encoding_norm(self) -> Optional[torch.Tensor]: + return None diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/metrics.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/metrics.py new file mode 100644 index 0000000000000000000000000000000000000000..9555b62ff9e8b431baea39002d8505d0a0fd6069 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/metrics.py @@ -0,0 +1,424 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings +from typing import Any, Dict, Optional + +import torch +from pytorch3d.implicitron.models.renderer.ray_sampler import ImplicitronRayBundle +from pytorch3d.implicitron.tools import metric_utils as utils +from pytorch3d.implicitron.tools.config import registry, ReplaceableBase +from pytorch3d.ops import padded_to_packed +from pytorch3d.renderer import utils as rend_utils + +from .renderer.base import RendererOutput + + +class RegularizationMetricsBase(ReplaceableBase, torch.nn.Module): + """ + Replaceable abstract base for regularization metrics. + `forward()` method produces regularization metrics and (unlike ViewMetrics) can + depend on the model's parameters. + """ + + def forward( + self, model: Any, keys_prefix: str = "loss_", **kwargs + ) -> Dict[str, Any]: + """ + Calculates various regularization terms useful for supervising differentiable + rendering pipelines. + + Args: + model: A model instance. Useful, for example, to implement + weights-based regularization. + keys_prefix: A common prefix for all keys in the output dictionary + containing all regularization metrics. + + Returns: + A dictionary with the resulting regularization metrics. The items + will have form `{metric_name_i: metric_value_i}` keyed by the + names of the output metrics `metric_name_i` with their corresponding + values `metric_value_i` represented as 0-dimensional float tensors. + """ + raise NotImplementedError + + +class ViewMetricsBase(ReplaceableBase, torch.nn.Module): + """ + Replaceable abstract base for model metrics. + `forward()` method produces losses and other metrics. + """ + + def forward( + self, + raymarched: RendererOutput, + ray_bundle: ImplicitronRayBundle, + image_rgb: Optional[torch.Tensor] = None, + depth_map: Optional[torch.Tensor] = None, + fg_probability: Optional[torch.Tensor] = None, + mask_crop: Optional[torch.Tensor] = None, + keys_prefix: str = "loss_", + **kwargs, + ) -> Dict[str, Any]: + """ + Calculates various metrics and loss functions useful for supervising + differentiable rendering pipelines. Any additional parameters can be passed + in the `raymarched.aux` dictionary. + + Args: + results: A dictionary with the resulting view metrics. The items + will have form `{metric_name_i: metric_value_i}` keyed by the + names of the output metrics `metric_name_i` with their corresponding + values `metric_value_i` represented as 0-dimensional float tensors. + raymarched: Output of the renderer. + ray_bundle: ImplicitronRayBundle object which was used to produce the raymarched + object + image_rgb: A tensor of shape `(B, H, W, 3)` containing ground truth rgb + values. + depth_map: A tensor of shape `(B, Hd, Wd, 1)` containing ground truth depth + values. + fg_probability: A tensor of shape `(B, Hm, Wm, 1)` containing ground truth + foreground masks. + keys_prefix: A common prefix for all keys in the output dictionary + containing all view metrics. + + Returns: + A dictionary with the resulting view metrics. The items + will have form `{metric_name_i: metric_value_i}` keyed by the + names of the output metrics `metric_name_i` with their corresponding + values `metric_value_i` represented as 0-dimensional float tensors. + """ + raise NotImplementedError() + + +@registry.register +class RegularizationMetrics(RegularizationMetricsBase): + def forward( + self, model: Any, keys_prefix: str = "loss_", **kwargs + ) -> Dict[str, Any]: + """ + Calculates the AD penalty, or returns an empty dict if the model's autoencoder + is inactive. + + Args: + model: A model instance. + keys_prefix: A common prefix for all keys in the output dictionary + containing all regularization metrics. + + Returns: + A dictionary with the resulting regularization metrics. The items + will have form `{metric_name_i: metric_value_i}` keyed by the + names of the output metrics `metric_name_i` with their corresponding + values `metric_value_i` represented as 0-dimensional float tensors. + + The calculated metric is: + autoencoder_norm: Autoencoder weight norm regularization term. + """ + metrics = {} + if getattr(model, "sequence_autodecoder", None) is not None: + ad_penalty = model.sequence_autodecoder.calculate_squared_encoding_norm() + if ad_penalty is not None: + metrics["autodecoder_norm"] = ad_penalty + + if keys_prefix is not None: + metrics = {(keys_prefix + k): v for k, v in metrics.items()} + + return metrics + + +@registry.register +class ViewMetrics(ViewMetricsBase): + def forward( + self, + raymarched: RendererOutput, + ray_bundle: ImplicitronRayBundle, + image_rgb: Optional[torch.Tensor] = None, + depth_map: Optional[torch.Tensor] = None, + fg_probability: Optional[torch.Tensor] = None, + mask_crop: Optional[torch.Tensor] = None, + keys_prefix: str = "loss_", + **kwargs, + ) -> Dict[str, Any]: + """ + Calculates various differentiable metrics useful for supervising + differentiable rendering pipelines. + + Args: + results: A dict to store the results in. + raymarched.features: Predicted rgb or feature values. + raymarched.depths: A tensor of shape `(B, ..., 1)` containing + predicted depth values. + raymarched.masks: A tensor of shape `(B, ..., 1)` containing + predicted foreground masks. + raymarched.aux["grad_theta"]: A tensor of shape `(B, ..., 3)` containing an + evaluation of a gradient of a signed distance function w.r.t. + input 3D coordinates used to compute the eikonal loss. + raymarched.aux["density_grid"]: A tensor of shape `(B, Hg, Wg, Dg, 1)` + containing a `Hg x Wg x Dg` voxel grid of density values. + ray_bundle: ImplicitronRayBundle object which was used to produce the raymarched + object + image_rgb: A tensor of shape `(B, H, W, 3)` containing ground truth rgb + values. + depth_map: A tensor of shape `(B, Hd, Wd, 1)` containing ground truth depth + values. + fg_probability: A tensor of shape `(B, Hm, Wm, 1)` containing ground truth + foreground masks. + keys_prefix: A common prefix for all keys in the output dictionary + containing all view metrics. + + Returns: + A dictionary `{metric_name_i: metric_value_i}` keyed by the + names of the output metrics `metric_name_i` with their corresponding + values `metric_value_i` represented as 0-dimensional float tensors. + + The calculated metrics are: + rgb_huber: A robust huber loss between `image_pred` and `image`. + rgb_mse: Mean squared error between `image_pred` and `image`. + rgb_psnr: Peak signal-to-noise ratio between `image_pred` and `image`. + rgb_psnr_fg: Peak signal-to-noise ratio between the foreground + region of `image_pred` and `image` as defined by `mask`. + rgb_mse_fg: Mean squared error between the foreground + region of `image_pred` and `image` as defined by `mask`. + mask_neg_iou: (1 - intersection-over-union) between `mask_pred` + and `mask`. + mask_bce: Binary cross entropy between `mask_pred` and `mask`. + mask_beta_prior: A loss enforcing strictly binary values + of `mask_pred`: `log(mask_pred) + log(1-mask_pred)` + depth_abs: Mean per-pixel L1 distance between + `depth_pred` and `depth`. + depth_abs_fg: Mean per-pixel L1 distance between the foreground + region of `depth_pred` and `depth` as defined by `mask`. + eikonal: Eikonal regularizer `(||grad_theta|| - 1)**2`. + density_tv: The Total Variation regularizer of density + values in `density_grid` (sum of L1 distances of values + of all 4-neighbouring cells). + depth_neg_penalty: `min(depth_pred, 0)**2` penalizing negative + predicted depth values. + """ + metrics = self._calculate_stage( + raymarched, + ray_bundle, + image_rgb, + depth_map, + fg_probability, + mask_crop, + keys_prefix, + ) + + if raymarched.prev_stage: + metrics.update( + self( + raymarched.prev_stage, + ray_bundle, + image_rgb, + depth_map, + fg_probability, + mask_crop, + keys_prefix=(keys_prefix + "prev_stage_"), + ) + ) + + return metrics + + def _calculate_stage( + self, + raymarched: RendererOutput, + ray_bundle: ImplicitronRayBundle, + image_rgb: Optional[torch.Tensor] = None, + depth_map: Optional[torch.Tensor] = None, + fg_probability: Optional[torch.Tensor] = None, + mask_crop: Optional[torch.Tensor] = None, + keys_prefix: str = "loss_", + **kwargs, + ) -> Dict[str, Any]: + """ + Calculate metrics for the current stage. + """ + # TODO: extract functions + + # reshape from B x ... x DIM to B x DIM x -1 x 1 + image_rgb_pred, fg_probability_pred, depth_map_pred = [ + _reshape_nongrid_var(x) + for x in [raymarched.features, raymarched.masks, raymarched.depths] + ] + xys = ray_bundle.xys + + # If ray_bundle is packed than we can sample images in padded state to lower + # memory requirements. Instead of having one image for every element in + # ray_bundle we can than have one image per unique sampled camera. + if ray_bundle.is_packed(): + xys, first_idxs, num_inputs = ray_bundle.get_padded_xys() + + # reshape the sampling grid as well + # TODO: we can get rid of the singular dimension here and in _reshape_nongrid_var + # now that we use rend_utils.ndc_grid_sample + xys = xys.reshape(xys.shape[0], -1, 1, 2) + + # closure with the given xys + def sample_full(tensor, mode): + if tensor is None: + return tensor + return rend_utils.ndc_grid_sample(tensor, xys, mode=mode) + + def sample_packed(tensor, mode): + if tensor is None: + return tensor + + # select images that corespond to sampled cameras if raybundle is packed + tensor = tensor[ray_bundle.camera_ids] + if ray_bundle.is_packed(): + # select images that corespond to sampled cameras if raybundle is packed + tensor = tensor[ray_bundle.camera_ids] + result = rend_utils.ndc_grid_sample(tensor, xys, mode=mode) + return padded_to_packed(result, first_idxs, num_inputs, max_size_dim=2)[ + :, :, None + ] # the result is [n_rays_total_training, 3, 1, 1] + + sample = sample_packed if ray_bundle.is_packed() else sample_full + + # eval all results in this size + image_rgb = sample(image_rgb, mode="bilinear") + depth_map = sample(depth_map, mode="nearest") + fg_probability = sample(fg_probability, mode="nearest") + mask_crop = sample(mask_crop, mode="nearest") + if mask_crop is None and image_rgb_pred is not None: + mask_crop = torch.ones_like(image_rgb_pred[:, :1]) + if mask_crop is None and depth_map_pred is not None: + mask_crop = torch.ones_like(depth_map_pred[:, :1]) + + metrics = {} + if image_rgb is not None and image_rgb_pred is not None: + metrics.update( + _rgb_metrics( + image_rgb, + image_rgb_pred, + masks=fg_probability, + masks_crop=mask_crop, + ) + ) + + if fg_probability_pred is not None: + metrics["mask_beta_prior"] = utils.beta_prior(fg_probability_pred) + if fg_probability is not None and fg_probability_pred is not None: + metrics["mask_neg_iou"] = utils.neg_iou_loss( + fg_probability_pred, fg_probability, mask=mask_crop + ) + if torch.is_autocast_enabled(): + # To avoid issues with mixed precision + metrics["mask_bce"] = utils.calc_bce( + fg_probability_pred.logit(), + fg_probability, + mask=mask_crop, + pred_logits=True, + ) + else: + metrics["mask_bce"] = utils.calc_bce( + fg_probability_pred, + fg_probability, + mask=mask_crop, + pred_logits=False, + ) + + if depth_map is not None and depth_map_pred is not None: + assert mask_crop is not None + _, abs_ = utils.eval_depth( + depth_map_pred, depth_map, get_best_scale=True, mask=mask_crop, crop=0 + ) + metrics["depth_abs"] = abs_.mean() + + if fg_probability is not None: + mask = fg_probability * mask_crop + _, abs_ = utils.eval_depth( + depth_map_pred, + depth_map, + get_best_scale=True, + mask=mask, + crop=0, + ) + metrics["depth_abs_fg"] = abs_.mean() + + # regularizers + grad_theta = raymarched.aux.get("grad_theta") + if grad_theta is not None: + metrics["eikonal"] = _get_eikonal_loss(grad_theta) + + density_grid = raymarched.aux.get("density_grid") + if density_grid is not None: + metrics["density_tv"] = _get_grid_tv_loss(density_grid) + + if depth_map_pred is not None: + metrics["depth_neg_penalty"] = _get_depth_neg_penalty_loss(depth_map_pred) + + if keys_prefix is not None: + metrics = {(keys_prefix + k): v for k, v in metrics.items()} + + return metrics + + +def _rgb_metrics( + images, + images_pred, + masks=None, + masks_crop=None, + huber_scaling: float = 0.03, +): + assert masks_crop is not None + if images.shape[1] != images_pred.shape[1]: + raise ValueError( + f"Network output's RGB images had {images_pred.shape[1]} " + f"channels. {images.shape[1]} expected." + ) + rgb_abs = ((images_pred - images).abs()).mean(dim=1, keepdim=True) + rgb_squared = ((images_pred - images) ** 2).mean(dim=1, keepdim=True) + rgb_loss = utils.huber(rgb_squared, scaling=huber_scaling) + crop_mass = masks_crop.sum().clamp(1.0) + results = { + "rgb_huber": (rgb_loss * masks_crop).sum() / crop_mass, + "rgb_l1": (rgb_abs * masks_crop).sum() / crop_mass, + "rgb_mse": (rgb_squared * masks_crop).sum() / crop_mass, + "rgb_psnr": utils.calc_psnr(images_pred, images, mask=masks_crop), + } + if masks is not None: + masks = masks_crop * masks + results["rgb_psnr_fg"] = utils.calc_psnr(images_pred, images, mask=masks) + results["rgb_mse_fg"] = (rgb_squared * masks).sum() / masks.sum().clamp(1.0) + return results + + +def _get_eikonal_loss(grad_theta): + return ((grad_theta.norm(2, dim=1) - 1) ** 2).mean() + + +def _get_grid_tv_loss(grid, log_domain: bool = True, eps: float = 1e-5): + if log_domain: + if (grid <= -eps).any(): + warnings.warn("Grid has negative values; this will produce NaN loss") + grid = torch.log(grid + eps) + + # this is an isotropic version, note that it ignores last rows/cols + return torch.mean( + utils.safe_sqrt( + (grid[..., :-1, :-1, 1:] - grid[..., :-1, :-1, :-1]) ** 2 + + (grid[..., :-1, 1:, :-1] - grid[..., :-1, :-1, :-1]) ** 2 + + (grid[..., 1:, :-1, :-1] - grid[..., :-1, :-1, :-1]) ** 2, + eps=1e-5, + ) + ) + + +def _get_depth_neg_penalty_loss(depth): + neg_penalty = depth.clamp(min=None, max=0.0) ** 2 + return torch.mean(neg_penalty) + + +def _reshape_nongrid_var(x): + if x is None: + return None + + ba, *_, dim = x.shape + return x.reshape(ba, -1, 1, dim).permute(0, 3, 1, 2).contiguous() diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/model_dbir.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/model_dbir.py new file mode 100644 index 0000000000000000000000000000000000000000..496f9ed15d10877d34b4489a032487bf28eba6e4 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/model_dbir.py @@ -0,0 +1,153 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import Any, Dict, List, Optional, Tuple + +import torch +from pytorch3d.implicitron.dataset.utils import is_known_frame +from pytorch3d.implicitron.tools.config import registry +from pytorch3d.implicitron.tools.point_cloud_utils import ( + get_rgbd_point_cloud, + render_point_cloud_pytorch3d, +) +from pytorch3d.renderer.cameras import CamerasBase +from pytorch3d.structures import Pointclouds + +from .base_model import ImplicitronModelBase, ImplicitronRender +from .renderer.base import EvaluationMode + + +@registry.register +class ModelDBIR(ImplicitronModelBase): + """ + A simple depth-based image rendering model. + + Args: + render_image_width: The width of the rendered rectangular images. + render_image_height: The height of the rendered rectangular images. + bg_color: The color of the background. + max_points: Maximum number of points in the point cloud + formed by unprojecting all source view depths. + If more points are present, they are randomly subsampled + to this number of points without replacement. + """ + + render_image_width: int = 256 + render_image_height: int = 256 + bg_color: Tuple[float, float, float] = (0.0, 0.0, 0.0) + max_points: int = -1 + + # pyre-fixme[14]: `forward` overrides method defined in `ImplicitronModelBase` + # inconsistently. + def forward( + self, + *, # force keyword-only arguments + image_rgb: Optional[torch.Tensor], + camera: CamerasBase, + fg_probability: Optional[torch.Tensor], + mask_crop: Optional[torch.Tensor], + depth_map: Optional[torch.Tensor], + sequence_name: Optional[List[str]], + evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION, + frame_type: List[str], + **kwargs, + ) -> Dict[str, Any]: # TODO: return a namedtuple or dataclass + """ + Given a set of input source cameras images and depth maps, unprojects + all RGBD maps to a colored point cloud and renders into the target views. + + Args: + camera: A batch of `N` PyTorch3D cameras. + image_rgb: A batch of `N` images of shape `(N, 3, H, W)`. + depth_map: A batch of `N` depth maps of shape `(N, 1, H, W)`. + fg_probability: A batch of `N` foreground probability maps + of shape `(N, 1, H, W)`. + frame_type: A list of `N` strings containing frame type indicators + which specify target and source views. + + Returns: + preds: A dict with the following fields: + implicitron_render: The rendered colors, depth and mask + of the target views. + point_cloud: The point cloud of the scene. It's renders are + stored in `implicitron_render`. + """ + + if image_rgb is None: + raise ValueError("ModelDBIR needs image input") + + if fg_probability is None: + raise ValueError("ModelDBIR needs foreground mask input") + + if depth_map is None: + raise ValueError("ModelDBIR needs depth map input") + + is_known = is_known_frame(frame_type) + is_known_idx = torch.where(is_known)[0] + + mask_fg = (fg_probability > 0.5).type_as(image_rgb) + + point_cloud = get_rgbd_point_cloud( + # pyre-fixme[6]: For 1st param expected `Union[List[int], int, + # LongTensor]` but got `Tensor`. + camera[is_known_idx], + image_rgb[is_known_idx], + depth_map[is_known_idx], + mask_fg[is_known_idx], + ) + + pcl_size = point_cloud.num_points_per_cloud().item() + if (self.max_points > 0) and (pcl_size > self.max_points): + # pyre-fixme[6]: For 1st param expected `int` but got `Union[bool, + # float, int]`. + prm = torch.randperm(pcl_size)[: self.max_points] + point_cloud = Pointclouds( + point_cloud.points_padded()[:, prm, :], + # pyre-fixme[16]: Optional type has no attribute `__getitem__`. + features=point_cloud.features_padded()[:, prm, :], + ) + + is_target_idx = torch.where(~is_known)[0] + + depth_render, image_render, mask_render = [], [], [] + + # render into target frames in a for loop to save memory + for tgt_idx in is_target_idx: + _image_render, _mask_render, _depth_render = render_point_cloud_pytorch3d( + camera[int(tgt_idx)], + point_cloud, + render_size=(self.render_image_height, self.render_image_width), + point_radius=1e-2, + topk=10, + bg_color=self.bg_color, + ) + _image_render = _image_render.clamp(0.0, 1.0) + # the mask is the set of pixels with opacity bigger than eps + _mask_render = (_mask_render > 1e-4).float() + + depth_render.append(_depth_render) + image_render.append(_image_render) + mask_render.append(_mask_render) + + implicitron_render = ImplicitronRender( + **{ + k: torch.cat(v, dim=0) + for k, v in zip( + ["depth_render", "image_render", "mask_render"], + [depth_render, image_render, mask_render], + ) + } + ) + + preds = { + "implicitron_render": implicitron_render, + "point_cloud": point_cloud, + } + + return preds diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/overfit_model.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/overfit_model.py new file mode 100644 index 0000000000000000000000000000000000000000..23a45665d58051f646adf2ae8f5ed80afbab6ff8 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/overfit_model.py @@ -0,0 +1,678 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +# Note: The #noqa comments below are for unused imports of pluggable implementations +# which are part of implicitron. They ensure that the registry is prepopulated. + +import functools +import logging +from dataclasses import field +from typing import Any, Callable, Dict, List, Optional, Tuple, TYPE_CHECKING, Union + +import torch +from omegaconf import DictConfig + +from pytorch3d.implicitron.models.base_model import ( + ImplicitronModelBase, + ImplicitronRender, +) +from pytorch3d.implicitron.models.global_encoder.global_encoder import GlobalEncoderBase +from pytorch3d.implicitron.models.implicit_function.base import ImplicitFunctionBase +from pytorch3d.implicitron.models.metrics import ( + RegularizationMetricsBase, + ViewMetricsBase, +) + +from pytorch3d.implicitron.models.renderer.base import ( + BaseRenderer, + EvaluationMode, + ImplicitronRayBundle, + RendererOutput, + RenderSamplingMode, +) +from pytorch3d.implicitron.models.renderer.ray_sampler import RaySamplerBase +from pytorch3d.implicitron.models.utils import ( + apply_chunked, + chunk_generator, + log_loss_weights, + preprocess_input, + weighted_sum_losses, +) +from pytorch3d.implicitron.tools import vis_utils +from pytorch3d.implicitron.tools.config import ( + expand_args_fields, + registry, + run_auto_creation, +) + +from pytorch3d.implicitron.tools.rasterize_mc import rasterize_sparse_ray_bundle +from pytorch3d.renderer import utils as rend_utils +from pytorch3d.renderer.cameras import CamerasBase + + +if TYPE_CHECKING: + from visdom import Visdom +logger = logging.getLogger(__name__) + +IMPLICIT_FUNCTION_ARGS_TO_REMOVE: List[str] = [ + "feature_vector_size", + "encoding_dim", + "latent_dim", + "color_dim", +] + + +@registry.register +class OverfitModel(ImplicitronModelBase): + """ + OverfitModel is a wrapper for the neural implicit + rendering and reconstruction pipeline which consists + of the following sequence of 4 steps: + + + (1) Ray Sampling + ------------------ + Rays are sampled from an image grid based on the target view(s). + │ + ▼ + (2) Implicit Function Evaluation + ------------------ + Evaluate the implicit function(s) at the sampled ray points + (also optionally pass in a global encoding from global_encoder). + │ + ▼ + (3) Rendering + ------------------ + Render the image into the target cameras by raymarching along + the sampled rays and aggregating the colors and densities + output by the implicit function in (2). + │ + ▼ + (4) Loss Computation + ------------------ + Compute losses based on the predicted target image(s). + + + The `forward` function of OverfitModel executes + this sequence of steps. Currently, steps 1, 2, 3 + can be customized by intializing a subclass of the appropriate + base class and adding the newly created module to the registry. + Please see https://github.com/facebookresearch/pytorch3d/blob/main/projects/implicitron_trainer/README.md#custom-plugins + for more details on how to create and register a custom component. + + In the config .yaml files for experiments, the parameters below are + contained in the + `model_factory_ImplicitronModelFactory_args.model_OverfitModel_args` + node. As OverfitModel derives from ReplaceableBase, the input arguments are + parsed by the run_auto_creation function to initialize the + necessary member modules. Please see implicitron_trainer/README.md + for more details on this process. + + Args: + mask_images: Whether or not to mask the RGB image background given the + foreground mask (the `fg_probability` argument of `GenericModel.forward`) + mask_depths: Whether or not to mask the depth image background given the + foreground mask (the `fg_probability` argument of `GenericModel.forward`) + render_image_width: Width of the output image to render + render_image_height: Height of the output image to render + mask_threshold: If greater than 0.0, the foreground mask is + thresholded by this value before being applied to the RGB/Depth images + output_rasterized_mc: If True, visualize the Monte-Carlo pixel renders by + splatting onto an image grid. Default: False. + bg_color: RGB values for setting the background color of input image + if mask_images=True. Defaults to (0.0, 0.0, 0.0). Each renderer has its own + way to determine the background color of its output, unrelated to this. + chunk_size_grid: The total number of points which can be rendered + per chunk. This is used to compute the number of rays used + per chunk when the chunked version of the renderer is used (in order + to fit rendering on all rays in memory) + render_features_dimensions: The number of output features to render. + Defaults to 3, corresponding to RGB images. + sampling_mode_training: The sampling method to use during training. Must be + a value from the RenderSamplingMode Enum. + sampling_mode_evaluation: Same as above but for evaluation. + global_encoder_class_type: The name of the class to use for global_encoder, + which must be available in the registry. Or `None` to disable global encoder. + global_encoder: An instance of `GlobalEncoder`. This is used to generate an encoding + of the image (referred to as the global_code) that can be used to model aspects of + the scene such as multiple objects or morphing objects. It is up to the implicit + function definition how to use it, but the most typical way is to broadcast and + concatenate to the other inputs for the implicit function. + raysampler_class_type: The name of the raysampler class which is available + in the global registry. + raysampler: An instance of RaySampler which is used to emit + rays from the target view(s). + renderer_class_type: The name of the renderer class which is available in the global + registry. + renderer: A renderer class which inherits from BaseRenderer. This is used to + generate the images from the target view(s). + share_implicit_function_across_passes: If set to True + coarse_implicit_function is automatically set as implicit_function + (coarse_implicit_function=implicit_funciton). The + implicit_functions are then run sequentially during the rendering. + implicit_function_class_type: The type of implicit function to use which + is available in the global registry. + implicit_function: An instance of ImplicitFunctionBase. + coarse_implicit_function_class_type: The type of implicit function to use which + is available in the global registry. + coarse_implicit_function: An instance of ImplicitFunctionBase. + If set and `share_implicit_function_across_passes` is set to False, + coarse_implicit_function is instantiated on itself. It + is then used as the second pass during the rendering. + If set to None, we only do a single pass with implicit_function. + view_metrics: An instance of ViewMetricsBase used to compute loss terms which + are independent of the model's parameters. + view_metrics_class_type: The type of view metrics to use, must be available in + the global registry. + regularization_metrics: An instance of RegularizationMetricsBase used to compute + regularization terms which can depend on the model's parameters. + regularization_metrics_class_type: The type of regularization metrics to use, + must be available in the global registry. + loss_weights: A dictionary with a {loss_name: weight} mapping; see documentation + for `ViewMetrics` class for available loss functions. + log_vars: A list of variable names which should be logged. + The names should correspond to a subset of the keys of the + dict `preds` output by the `forward` function. + """ # noqa: B950 + + mask_images: bool = True + mask_depths: bool = True + render_image_width: int = 400 + render_image_height: int = 400 + mask_threshold: float = 0.5 + output_rasterized_mc: bool = False + bg_color: Tuple[float, float, float] = (0.0, 0.0, 0.0) + chunk_size_grid: int = 4096 + render_features_dimensions: int = 3 + tqdm_trigger_threshold: int = 16 + + n_train_target_views: int = 1 + sampling_mode_training: str = "mask_sample" + sampling_mode_evaluation: str = "full_grid" + + # ---- global encoder settings + global_encoder_class_type: Optional[str] = None + # pyre-fixme[13]: Attribute `global_encoder` is never initialized. + global_encoder: Optional[GlobalEncoderBase] + + # ---- raysampler + raysampler_class_type: str = "AdaptiveRaySampler" + # pyre-fixme[13]: Attribute `raysampler` is never initialized. + raysampler: RaySamplerBase + + # ---- renderer configs + renderer_class_type: str = "MultiPassEmissionAbsorptionRenderer" + # pyre-fixme[13]: Attribute `renderer` is never initialized. + renderer: BaseRenderer + + # ---- implicit function settings + share_implicit_function_across_passes: bool = False + implicit_function_class_type: str = "NeuralRadianceFieldImplicitFunction" + # pyre-fixme[13]: Attribute `implicit_function` is never initialized. + implicit_function: ImplicitFunctionBase + coarse_implicit_function_class_type: Optional[str] = None + # pyre-fixme[13]: Attribute `coarse_implicit_function` is never initialized. + coarse_implicit_function: Optional[ImplicitFunctionBase] + + # ----- metrics + # pyre-fixme[13]: Attribute `view_metrics` is never initialized. + view_metrics: ViewMetricsBase + view_metrics_class_type: str = "ViewMetrics" + + # pyre-fixme[13]: Attribute `regularization_metrics` is never initialized. + regularization_metrics: RegularizationMetricsBase + regularization_metrics_class_type: str = "RegularizationMetrics" + + # ---- loss weights + loss_weights: Dict[str, float] = field( + default_factory=lambda: { + "loss_rgb_mse": 1.0, + "loss_prev_stage_rgb_mse": 1.0, + "loss_mask_bce": 0.0, + "loss_prev_stage_mask_bce": 0.0, + } + ) + + # ---- variables to be logged (logger automatically ignores if not computed) + log_vars: List[str] = field( + default_factory=lambda: [ + "loss_rgb_psnr_fg", + "loss_rgb_psnr", + "loss_rgb_mse", + "loss_rgb_huber", + "loss_depth_abs", + "loss_depth_abs_fg", + "loss_mask_neg_iou", + "loss_mask_bce", + "loss_mask_beta_prior", + "loss_eikonal", + "loss_density_tv", + "loss_depth_neg_penalty", + "loss_autodecoder_norm", + # metrics that are only logged in 2+stage renderes + "loss_prev_stage_rgb_mse", + "loss_prev_stage_rgb_psnr_fg", + "loss_prev_stage_rgb_psnr", + "loss_prev_stage_mask_bce", + # basic metrics + "objective", + "epoch", + "sec/it", + ] + ) + + @classmethod + def pre_expand(cls) -> None: + # use try/finally to bypass cinder's lazy imports + try: + from pytorch3d.implicitron.models.implicit_function.idr_feature_field import ( # noqa: F401, B950 + IdrFeatureField, + ) + from pytorch3d.implicitron.models.implicit_function.neural_radiance_field import ( # noqa: F401, B950 + NeuralRadianceFieldImplicitFunction, + ) + from pytorch3d.implicitron.models.implicit_function.scene_representation_networks import ( # noqa: F401, B950 + SRNImplicitFunction, + ) + from pytorch3d.implicitron.models.renderer.lstm_renderer import ( # noqa: F401 + LSTMRenderer, + ) + from pytorch3d.implicitron.models.renderer.multipass_ea import ( # noqa: F401 + MultiPassEmissionAbsorptionRenderer, + ) + from pytorch3d.implicitron.models.renderer.sdf_renderer import ( # noqa: F401 + SignedDistanceFunctionRenderer, + ) + finally: + pass + + def __post_init__(self): + # The attribute will be filled by run_auto_creation + run_auto_creation(self) + log_loss_weights(self.loss_weights, logger) + # We need to set it here since run_auto_creation + # will create coarse_implicit_function before implicit_function + if self.share_implicit_function_across_passes: + self.coarse_implicit_function = self.implicit_function + + def forward( + self, + *, # force keyword-only arguments + image_rgb: Optional[torch.Tensor], + camera: CamerasBase, + fg_probability: Optional[torch.Tensor] = None, + mask_crop: Optional[torch.Tensor] = None, + depth_map: Optional[torch.Tensor] = None, + sequence_name: Optional[List[str]] = None, + frame_timestamp: Optional[torch.Tensor] = None, + evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION, + **kwargs, + ) -> Dict[str, Any]: + """ + Args: + image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images; + the first `min(B, n_train_target_views)` images are considered targets and + are used to supervise the renders; the rest corresponding to the source + viewpoints from which features will be extracted. + camera: An instance of CamerasBase containing a batch of `B` cameras corresponding + to the viewpoints of target images, from which the rays will be sampled, + and source images, which will be used for intersecting with target rays. + fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch of + foreground masks. + mask_crop: A binary tensor of shape `(B, 1, H, W)` deonting valid + regions in the input images (i.e. regions that do not correspond + to, e.g., zero-padding). When the `RaySampler`'s sampling mode is set to + "mask_sample", rays will be sampled in the non zero regions. + depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps. + sequence_name: A list of `B` strings corresponding to the sequence names + from which images `image_rgb` were extracted. They are used to match + target frames with relevant source frames. + frame_timestamp: Optionally a tensor of shape `(B,)` containing a batch + of frame timestamps. + evaluation_mode: one of EvaluationMode.TRAINING or + EvaluationMode.EVALUATION which determines the settings used for + rendering. + + Returns: + preds: A dictionary containing all outputs of the forward pass including the + rendered images, depths, masks, losses and other metrics. + """ + image_rgb, fg_probability, depth_map = preprocess_input( + image_rgb, + fg_probability, + depth_map, + self.mask_images, + self.mask_depths, + self.mask_threshold, + self.bg_color, + ) + + # Determine the used ray sampling mode. + sampling_mode = RenderSamplingMode( + self.sampling_mode_training + if evaluation_mode == EvaluationMode.TRAINING + else self.sampling_mode_evaluation + ) + + # (1) Sample rendering rays with the ray sampler. + # pyre-ignore[29] + ray_bundle: ImplicitronRayBundle = self.raysampler( + camera, + evaluation_mode, + mask=( + mask_crop + if mask_crop is not None + and sampling_mode == RenderSamplingMode.MASK_SAMPLE + else None + ), + ) + + inputs_to_be_chunked = {} + if fg_probability is not None and self.renderer.requires_object_mask(): + sampled_fb_prob = rend_utils.ndc_grid_sample( + fg_probability, ray_bundle.xys, mode="nearest" + ) + inputs_to_be_chunked["object_mask"] = sampled_fb_prob > 0.5 + + # (2)-(3) Implicit function evaluation and Rendering + implicit_functions: List[Union[Callable, ImplicitFunctionBase]] = [ + self.implicit_function + ] + if self.coarse_implicit_function is not None: + implicit_functions = [self.coarse_implicit_function, self.implicit_function] + + if self.global_encoder is not None: + global_code = self.global_encoder( # pyre-fixme[29] + sequence_name=sequence_name, + frame_timestamp=frame_timestamp, + ) + implicit_functions = [ + ( + functools.partial(implicit_function, global_code=global_code) + if isinstance(implicit_function, Callable) + else functools.partial( + implicit_function.forward, global_code=global_code + ) + ) + for implicit_function in implicit_functions + ] + rendered = self._render( + ray_bundle=ray_bundle, + sampling_mode=sampling_mode, + evaluation_mode=evaluation_mode, + implicit_functions=implicit_functions, + inputs_to_be_chunked=inputs_to_be_chunked, + ) + + # A dict to store losses as well as rendering results. + preds: Dict[str, Any] = self.view_metrics( + results={}, + raymarched=rendered, + ray_bundle=ray_bundle, + image_rgb=image_rgb, + depth_map=depth_map, + fg_probability=fg_probability, + mask_crop=mask_crop, + ) + + preds.update( + self.regularization_metrics( + results=preds, + model=self, + ) + ) + + if sampling_mode == RenderSamplingMode.MASK_SAMPLE: + if self.output_rasterized_mc: + # Visualize the monte-carlo pixel renders by splatting onto + # an image grid. + ( + preds["images_render"], + preds["depths_render"], + preds["masks_render"], + ) = rasterize_sparse_ray_bundle( + ray_bundle, + rendered.features, + (self.render_image_height, self.render_image_width), + rendered.depths, + masks=rendered.masks, + ) + elif sampling_mode == RenderSamplingMode.FULL_GRID: + preds["images_render"] = rendered.features.permute(0, 3, 1, 2) + preds["depths_render"] = rendered.depths.permute(0, 3, 1, 2) + preds["masks_render"] = rendered.masks.permute(0, 3, 1, 2) + + preds["implicitron_render"] = ImplicitronRender( + image_render=preds["images_render"], + depth_render=preds["depths_render"], + mask_render=preds["masks_render"], + ) + else: + raise AssertionError("Unreachable state") + + # (4) Compute losses + # finally get the optimization objective using self.loss_weights + objective = self._get_objective(preds) + if objective is not None: + preds["objective"] = objective + + return preds + + def _get_objective(self, preds: Dict[str, torch.Tensor]) -> Optional[torch.Tensor]: + """ + A helper function to compute the overall loss as the dot product + of individual loss functions with the corresponding weights. + """ + return weighted_sum_losses(preds, self.loss_weights) + + def visualize( + self, + viz: Optional["Visdom"], + visdom_env_imgs: str, + preds: Dict[str, Any], + prefix: str, + ) -> None: + """ + Helper function to visualize the predictions generated + in the forward pass. + + Args: + viz: Visdom connection object + visdom_env_imgs: name of visdom environment for the images. + preds: predictions dict like returned by forward() + prefix: prepended to the names of images + """ + if viz is None or not viz.check_connection(): + logger.info("no visdom server! -> skipping batch vis") + return + + idx_image = 0 + title = f"{prefix}_im{idx_image}" + + vis_utils.visualize_basics(viz, preds, visdom_env_imgs, title=title) + + def _render( + self, + *, + ray_bundle: ImplicitronRayBundle, + inputs_to_be_chunked: Dict[str, torch.Tensor], + sampling_mode: RenderSamplingMode, + **kwargs, + ) -> RendererOutput: + """ + Args: + ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the + sampled rendering rays. + inputs_to_be_chunked: A collection of tensor of shape `(B, _, H, W)`. E.g. + SignedDistanceFunctionRenderer requires "object_mask", shape + (B, 1, H, W), the silhouette of the object in the image. When + chunking, they are passed to the renderer as shape + `(B, _, chunksize)`. + sampling_mode: The sampling method to use. Must be a value from the + RenderSamplingMode Enum. + + Returns: + An instance of RendererOutput + """ + if sampling_mode == RenderSamplingMode.FULL_GRID and self.chunk_size_grid > 0: + return apply_chunked( + self.renderer, + chunk_generator( + self.chunk_size_grid, + ray_bundle, + inputs_to_be_chunked, + self.tqdm_trigger_threshold, + **kwargs, + ), + lambda batch: torch.cat(batch, dim=1).reshape( + *ray_bundle.lengths.shape[:-1], -1 + ), + ) + else: + # pyre-fixme[29]: `BaseRenderer` is not a function. + return self.renderer( + ray_bundle=ray_bundle, + **inputs_to_be_chunked, + **kwargs, + ) + + @classmethod + def raysampler_tweak_args(cls, type, args: DictConfig) -> None: + """ + We don't expose certain fields of the raysampler because we want to set + them from our own members. + """ + del args["sampling_mode_training"] + del args["sampling_mode_evaluation"] + del args["image_width"] + del args["image_height"] + + def create_raysampler(self): + extra_args = { + "sampling_mode_training": self.sampling_mode_training, + "sampling_mode_evaluation": self.sampling_mode_evaluation, + "image_width": self.render_image_width, + "image_height": self.render_image_height, + } + raysampler_args = getattr( + self, "raysampler_" + self.raysampler_class_type + "_args" + ) + self.raysampler = registry.get(RaySamplerBase, self.raysampler_class_type)( + **raysampler_args, **extra_args + ) + + @classmethod + def renderer_tweak_args(cls, type, args: DictConfig) -> None: + """ + We don't expose certain fields of the renderer because we want to set + them based on other inputs. + """ + args.pop("render_features_dimensions", None) + args.pop("object_bounding_sphere", None) + + def create_renderer(self): + extra_args = {} + + if self.renderer_class_type == "SignedDistanceFunctionRenderer": + extra_args["render_features_dimensions"] = self.render_features_dimensions + if not hasattr(self.raysampler, "scene_extent"): + raise ValueError( + "SignedDistanceFunctionRenderer requires" + + " a raysampler that defines the 'scene_extent' field" + + " (this field is supported by, e.g., the adaptive raysampler - " + + " self.raysampler_class_type='AdaptiveRaySampler')." + ) + extra_args["object_bounding_sphere"] = self.raysampler.scene_extent + + renderer_args = getattr(self, "renderer_" + self.renderer_class_type + "_args") + self.renderer = registry.get(BaseRenderer, self.renderer_class_type)( + **renderer_args, **extra_args + ) + + @classmethod + def implicit_function_tweak_args(cls, type, args: DictConfig) -> None: + """ + We don't expose certain implicit_function fields because we want to set + them based on other inputs. + """ + for arg in IMPLICIT_FUNCTION_ARGS_TO_REMOVE: + args.pop(arg, None) + + @classmethod + def coarse_implicit_function_tweak_args(cls, type, args: DictConfig) -> None: + """ + We don't expose certain implicit_function fields because we want to set + them based on other inputs. + """ + for arg in IMPLICIT_FUNCTION_ARGS_TO_REMOVE: + args.pop(arg, None) + + def _create_extra_args_for_implicit_function(self) -> Dict[str, Any]: + extra_args = {} + global_encoder_dim = ( + 0 if self.global_encoder is None else self.global_encoder.get_encoding_dim() + ) + if self.implicit_function_class_type in ( + "NeuralRadianceFieldImplicitFunction", + "NeRFormerImplicitFunction", + ): + extra_args["latent_dim"] = global_encoder_dim + extra_args["color_dim"] = self.render_features_dimensions + + if self.implicit_function_class_type == "IdrFeatureField": + extra_args["feature_work_size"] = global_encoder_dim + extra_args["feature_vector_size"] = self.render_features_dimensions + + if self.implicit_function_class_type == "SRNImplicitFunction": + extra_args["latent_dim"] = global_encoder_dim + return extra_args + + def create_implicit_function(self) -> None: + implicit_function_type = registry.get( + ImplicitFunctionBase, self.implicit_function_class_type + ) + expand_args_fields(implicit_function_type) + + config_name = f"implicit_function_{self.implicit_function_class_type}_args" + config = getattr(self, config_name, None) + if config is None: + raise ValueError(f"{config_name} not present") + + extra_args = self._create_extra_args_for_implicit_function() + self.implicit_function = implicit_function_type(**config, **extra_args) + + def create_coarse_implicit_function(self) -> None: + # If coarse_implicit_function_class_type has been defined + # then we init a module based on its arguments + if ( + self.coarse_implicit_function_class_type is not None + and not self.share_implicit_function_across_passes + ): + config_name = "coarse_implicit_function_{0}_args".format( + self.coarse_implicit_function_class_type + ) + config = getattr(self, config_name, {}) + + implicit_function_type = registry.get( + ImplicitFunctionBase, + # pyre-ignore: config is None allow to check if this is None. + self.coarse_implicit_function_class_type, + ) + expand_args_fields(implicit_function_type) + + extra_args = self._create_extra_args_for_implicit_function() + self.coarse_implicit_function = implicit_function_type( + **config, **extra_args + ) + elif self.share_implicit_function_across_passes: + # Since coarse_implicit_function is initialised before + # implicit_function we handle this case in the post_init. + pass + else: + self.coarse_implicit_function = None diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/multipass_ea.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/multipass_ea.py new file mode 100644 index 0000000000000000000000000000000000000000..c6367fc871a1166a700028753b14d00d5d662bb2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/multipass_ea.py @@ -0,0 +1,186 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import List + +import torch +from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle +from pytorch3d.implicitron.tools.config import registry, run_auto_creation + +from .base import BaseRenderer, EvaluationMode, ImplicitFunctionWrapper, RendererOutput +from .ray_point_refiner import RayPointRefiner +from .raymarcher import RaymarcherBase + + +@registry.register +class MultiPassEmissionAbsorptionRenderer(BaseRenderer, torch.nn.Module): + """ + Implements the multi-pass rendering function, in particular, + with emission-absorption ray marching used in NeRF [1]. First, it evaluates + opacity-based ray-point weights and then optionally (in case more implicit + functions are given) resamples points using importance sampling and evaluates + new weights. + + During each ray marching pass, features, depth map, and masks + are integrated: Let o_i be the opacity estimated by the implicit function, + and d_i be the offset between points `i` and `i+1` along the respective ray. + Ray marching is performed using the following equations:: + + ray_opacity_n = cap_fn(sum_i=1^n cap_fn(d_i * o_i)), + weight_n = weight_fn(cap_fn(d_i * o_i), 1 - ray_opacity_{n-1}), + + and the final rendered quantities are computed by a dot-product of ray values + with the weights, e.g. `features = sum_n(weight_n * ray_features_n)`. + + By default, for the EA raymarcher from [1] ( + activated with `self.raymarcher_class_type="EmissionAbsorptionRaymarcher"` + ):: + + cap_fn(x) = 1 - exp(-x), + weight_fn(x) = w * x. + + Note that the latter can altered by changing `self.raymarcher_class_type`, + e.g. to "CumsumRaymarcher" which implements the cumulative-sum raymarcher + from NeuralVolumes [2]. + + Settings: + n_pts_per_ray_fine_training: The number of points sampled per ray for the + fine rendering pass during training. + n_pts_per_ray_fine_evaluation: The number of points sampled per ray for the + fine rendering pass during evaluation. + stratified_sampling_coarse_training: Enable/disable stratified sampling in the + refiner during training. Only matters if there are multiple implicit + functions (i.e. in GenericModel if num_passes>1). + stratified_sampling_coarse_evaluation: Enable/disable stratified sampling in + the refiner during evaluation. Only matters if there are multiple implicit + functions (i.e. in GenericModel if num_passes>1). + append_coarse_samples_to_fine: Add the fine ray points to the coarse points + after sampling. + density_noise_std_train: Standard deviation of the noise added to the + opacity field. + return_weights: Enables returning the rendering weights of the EA raymarcher. + Setting to `True` can lead to a prohibitivelly large memory consumption. + blurpool_weights: Use blurpool defined in [3], on the input weights of + each implicit_function except the first (implicit_functions[0]). + sample_pdf_eps: Padding applied to the weights (alpha in equation 18 of [3]). + raymarcher_class_type: The type of self.raymarcher corresponding to + a child of `RaymarcherBase` in the registry. + raymarcher: The raymarcher object used to convert per-point features + and opacities to a feature render. + + References: + [1] Mildenhall, Ben, et al. "Nerf: Representing Scenes as Neural Radiance + Fields for View Synthesis." ECCV 2020. + [2] Lombardi, Stephen, et al. "Neural Volumes: Learning Dynamic Renderable + Volumes from Images." SIGGRAPH 2019. + [3] Jonathan T. Barron, et al. "Mip-NeRF: A Multiscale Representation + for Anti-Aliasing Neural Radiance Fields." ICCV 2021. + + """ + + raymarcher_class_type: str = "EmissionAbsorptionRaymarcher" + # pyre-fixme[13]: Attribute `raymarcher` is never initialized. + raymarcher: RaymarcherBase + + n_pts_per_ray_fine_training: int = 64 + n_pts_per_ray_fine_evaluation: int = 64 + stratified_sampling_coarse_training: bool = True + stratified_sampling_coarse_evaluation: bool = False + append_coarse_samples_to_fine: bool = True + density_noise_std_train: float = 0.0 + return_weights: bool = False + blurpool_weights: bool = False + sample_pdf_eps: float = 1e-5 + + def __post_init__(self): + self._refiners = { + EvaluationMode.TRAINING: RayPointRefiner( + n_pts_per_ray=self.n_pts_per_ray_fine_training, + random_sampling=self.stratified_sampling_coarse_training, + add_input_samples=self.append_coarse_samples_to_fine, + blurpool_weights=self.blurpool_weights, + sample_pdf_eps=self.sample_pdf_eps, + ), + EvaluationMode.EVALUATION: RayPointRefiner( + n_pts_per_ray=self.n_pts_per_ray_fine_evaluation, + random_sampling=self.stratified_sampling_coarse_evaluation, + add_input_samples=self.append_coarse_samples_to_fine, + blurpool_weights=self.blurpool_weights, + sample_pdf_eps=self.sample_pdf_eps, + ), + } + run_auto_creation(self) + + def forward( + self, + ray_bundle: ImplicitronRayBundle, + implicit_functions: List[ImplicitFunctionWrapper], + evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION, + **kwargs, + ) -> RendererOutput: + """ + Args: + ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the + sampled rendering rays. + implicit_functions: List of ImplicitFunctionWrappers which + define the implicit functions to be used sequentially in + the raymarching step. The output of raymarching with + implicit_functions[n-1] is refined, and then used as + input for raymarching with implicit_functions[n]. + evaluation_mode: one of EvaluationMode.TRAINING or + EvaluationMode.EVALUATION which determines the settings used for + rendering + + Returns: + instance of RendererOutput + """ + if not implicit_functions: + raise ValueError("EA renderer expects implicit functions") + + return self._run_raymarcher( + ray_bundle, + implicit_functions, + None, + evaluation_mode, + ) + + def _run_raymarcher( + self, ray_bundle, implicit_functions, prev_stage, evaluation_mode + ): + density_noise_std = ( + self.density_noise_std_train + if evaluation_mode == EvaluationMode.TRAINING + else 0.0 + ) + + ray_deltas = ( + None if ray_bundle.bins is None else torch.diff(ray_bundle.bins, dim=-1) + ) + output = self.raymarcher( + *implicit_functions[0](ray_bundle=ray_bundle), + ray_lengths=ray_bundle.lengths, + ray_deltas=ray_deltas, + density_noise_std=density_noise_std, + ) + output.prev_stage = prev_stage + + weights = output.weights + if not self.return_weights: + output.weights = None + + # we may need to make a recursive call + if len(implicit_functions) > 1: + fine_ray_bundle = self._refiners[evaluation_mode](ray_bundle, weights) + output = self._run_raymarcher( + fine_ray_bundle, + implicit_functions[1:], + output, + evaluation_mode, + ) + + return output diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/ray_point_refiner.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/ray_point_refiner.py new file mode 100644 index 0000000000000000000000000000000000000000..74a7f046b155a6b1d6ca2545bad7d8397fd9b959 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/ray_point_refiner.py @@ -0,0 +1,147 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import copy + +import torch +from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle +from pytorch3d.implicitron.tools.config import Configurable, expand_args_fields + +from pytorch3d.renderer.implicit.sample_pdf import sample_pdf + + +@expand_args_fields +class RayPointRefiner(Configurable, torch.nn.Module): + """ + Implements the importance sampling of points along rays. + The input is a `RayBundle` object with a `ray_weights` tensor + which specifies the probabilities of sampling a point along each ray. + + This raysampler is used for the fine rendering pass of NeRF. + As such, the forward pass accepts the RayBundle output by the + raysampling of the coarse rendering pass. Hence, it does not + take cameras as input. + + Args: + n_pts_per_ray: The number of points to sample along each ray. + random_sampling: If `False`, returns equispaced percentiles of the + distribution defined by the input weights, otherwise performs + sampling from that distribution. + add_input_samples: Concatenates and returns the sampled values + together with the input samples. + blurpool_weights: Use blurpool defined in [1], on the input weights. + sample_pdf_eps: A constant preventing division by zero in case empty bins + are present. + + References: + [1] Jonathan T. Barron, et al. "Mip-NeRF: A Multiscale Representation + for Anti-Aliasing Neural Radiance Fields." ICCV 2021. + """ + + # pyre-fixme[13]: Attribute `n_pts_per_ray` is never initialized. + n_pts_per_ray: int + # pyre-fixme[13]: Attribute `random_sampling` is never initialized. + random_sampling: bool + add_input_samples: bool = True + blurpool_weights: bool = False + sample_pdf_eps: float = 1e-5 + + def forward( + self, + input_ray_bundle: ImplicitronRayBundle, + ray_weights: torch.Tensor, + blurpool_weights: bool = False, + sample_pdf_padding: float = 1e-5, + **kwargs, + ) -> ImplicitronRayBundle: + """ + Args: + input_ray_bundle: An instance of `ImplicitronRayBundle` specifying the + source rays for sampling of the probability distribution. + ray_weights: A tensor of shape + `(..., input_ray_bundle.lengths.shape[-1])` with non-negative + elements defining the probability distribution to sample + ray points from. + blurpool_weights: Use blurpool defined in [1], on the input weights. + sample_pdf_padding: A constant preventing division by zero in case empty bins + are present. + + Returns: + ray_bundle: A new `ImplicitronRayBundle` instance containing the input ray + points together with `n_pts_per_ray` additionally sampled + points per ray. For each ray, the lengths are sorted. + + References: + [1] Jonathan T. Barron, et al. "Mip-NeRF: A Multiscale Representation + for Anti-Aliasing Neural Radiance Fields." ICCV 2021. + + """ + + with torch.no_grad(): + if self.blurpool_weights: + ray_weights = apply_blurpool_on_weights(ray_weights) + + n_pts_per_ray = self.n_pts_per_ray + ray_weights = ray_weights.view(-1, ray_weights.shape[-1]) + if input_ray_bundle.bins is None: + z_vals: torch.Tensor = input_ray_bundle.lengths + ray_weights = ray_weights[..., 1:-1] + bins = torch.lerp(z_vals[..., 1:], z_vals[..., :-1], 0.5) + else: + z_vals = input_ray_bundle.bins + n_pts_per_ray += 1 + bins = z_vals + z_samples = sample_pdf( + bins.view(-1, bins.shape[-1]), + ray_weights, + n_pts_per_ray, + det=not self.random_sampling, + eps=self.sample_pdf_eps, + ).view(*z_vals.shape[:-1], n_pts_per_ray) + + if self.add_input_samples: + z_vals = torch.cat((z_vals, z_samples), dim=-1) + else: + z_vals = z_samples + # Resort by depth. + z_vals, _ = torch.sort(z_vals, dim=-1) + ray_bundle = copy.copy(input_ray_bundle) + if input_ray_bundle.bins is None: + ray_bundle.lengths = z_vals + else: + ray_bundle.bins = z_vals + + return ray_bundle + + +def apply_blurpool_on_weights(weights) -> torch.Tensor: + """ + Filter weights with a 2-tap max filters followed by a 2-tap blur filter, + which produces a wide and smooth upper envelope on the weights. + + Args: + weights: Tensor of shape `(..., dim)` + + Returns: + blured_weights: Tensor of shape `(..., dim)` + """ + weights_pad = torch.concatenate( + [ + weights[..., :1], + weights, + weights[..., -1:], + ], + dim=-1, + ) + + weights_max = torch.nn.functional.max_pool1d( + weights_pad.flatten(end_dim=-2), 2, stride=1 + ) + return torch.lerp(weights_max[..., :-1], weights_max[..., 1:], 0.5).reshape_as( + weights + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/ray_sampler.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/ray_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..417d0ae6662cd194d104a9392f8822c3168ebcb1 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/ray_sampler.py @@ -0,0 +1,391 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Optional, Tuple + +import torch +from pytorch3d.implicitron.tools import camera_utils +from pytorch3d.implicitron.tools.config import registry, ReplaceableBase +from pytorch3d.renderer import NDCMultinomialRaysampler +from pytorch3d.renderer.cameras import CamerasBase +from pytorch3d.renderer.implicit.utils import HeterogeneousRayBundle + +from .base import EvaluationMode, ImplicitronRayBundle, RenderSamplingMode + + +class RaySamplerBase(ReplaceableBase): + """ + Base class for ray samplers. + """ + + def forward( + self, + cameras: CamerasBase, + evaluation_mode: EvaluationMode, + mask: Optional[torch.Tensor] = None, + ) -> ImplicitronRayBundle: + """ + Args: + cameras: A batch of `batch_size` cameras from which the rays are emitted. + evaluation_mode: one of `EvaluationMode.TRAINING` or + `EvaluationMode.EVALUATION` which determines the sampling mode + that is used. + mask: Active for the `RenderSamplingMode.MASK_SAMPLE` sampling mode. + Defines a non-negative mask of shape + `(batch_size, image_height, image_width)` where each per-pixel + value is proportional to the probability of sampling the + corresponding pixel's ray. + + Returns: + ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the + sampled rendering rays. + """ + raise NotImplementedError() + + +class AbstractMaskRaySampler(RaySamplerBase, torch.nn.Module): + """ + Samples a fixed number of points along rays which are in turn sampled for + each camera in a batch. + + This class utilizes `NDCMultinomialRaysampler` which allows to either + randomly sample rays from an input foreground saliency mask + (`RenderSamplingMode.MASK_SAMPLE`), or on a rectangular image grid + (`RenderSamplingMode.FULL_GRID`). The sampling mode can be set separately + for training and evaluation by setting `self.sampling_mode_training` + and `self.sampling_mode_training` accordingly. + + The class allows to adjust the sampling points along rays by overwriting the + `AbstractMaskRaySampler._get_min_max_depth_bounds` function which returns + the near/far planes (`min_depth`/`max_depth`) `NDCMultinomialRaysampler`. + + Settings: + image_width: The horizontal size of the image grid. + image_height: The vertical size of the image grid. + sampling_mode_training: The ray sampling mode for training. This should be a str + option from the RenderSamplingMode Enum + sampling_mode_evaluation: Same as above but for evaluation. + n_pts_per_ray_training: The number of points sampled along each ray during training. + n_pts_per_ray_evaluation: The number of points sampled along each ray during evaluation. + n_rays_per_image_sampled_from_mask: The amount of rays to be sampled from the image + grid. Given a batch of image grids, this many is sampled from each. + `n_rays_per_image_sampled_from_mask` and `n_rays_total_training` cannot both be + defined. + n_rays_total_training: (optional) How many rays in total to sample from the entire + batch of provided image grid. The result is as if `n_rays_total_training` + cameras/image grids were sampled with replacement from the cameras / image grids + provided and for every camera one ray was sampled. + `n_rays_per_image_sampled_from_mask` and `n_rays_total_training` cannot both be + defined, to use you have to set `n_rays_per_image` to None. + Used only for EvaluationMode.TRAINING. + stratified_point_sampling_training: if set, performs stratified random sampling + along the ray; otherwise takes ray points at deterministic offsets. + stratified_point_sampling_evaluation: Same as above but for evaluation. + cast_ray_bundle_as_cone: If True, the sampling will generate the bins and radii + attribute of ImplicitronRayBundle. The `bins` contain the z-coordinate + (=depth) of each ray in world units and are of shape + `(batch_size, n_rays_per_image, n_pts_per_ray_training/evaluation + 1)` + while `lengths` is equal to the midpoint of the bins: + (0.5 * (bins[..., 1:] + bins[..., :-1]). + If False, `bins` is None, `radii` is None and `lengths` contains + the z-coordinate (=depth) of each ray in world units and are of shape + `(batch_size, n_rays_per_image, n_pts_per_ray_training/evaluation)` + + Raises: + TypeError: if cast_ray_bundle_as_cone is set to True and n_rays_total_training + is not None will result in an error. HeterogeneousRayBundle is + not supported for conical frustum computation yet. + """ + + image_width: int = 400 + image_height: int = 400 + sampling_mode_training: str = "mask_sample" + sampling_mode_evaluation: str = "full_grid" + n_pts_per_ray_training: int = 64 + n_pts_per_ray_evaluation: int = 64 + n_rays_per_image_sampled_from_mask: Optional[int] = 1024 + n_rays_total_training: Optional[int] = None + # stratified sampling vs taking points at deterministic offsets + stratified_point_sampling_training: bool = True + stratified_point_sampling_evaluation: bool = False + cast_ray_bundle_as_cone: bool = False + + def __post_init__(self): + if (self.n_rays_per_image_sampled_from_mask is not None) and ( + self.n_rays_total_training is not None + ): + raise ValueError( + "Cannot both define n_rays_total_training and " + "n_rays_per_image_sampled_from_mask." + ) + + self._sampling_mode = { + EvaluationMode.TRAINING: RenderSamplingMode(self.sampling_mode_training), + EvaluationMode.EVALUATION: RenderSamplingMode( + self.sampling_mode_evaluation + ), + } + + n_pts_per_ray_training = ( + self.n_pts_per_ray_training + 1 + if self.cast_ray_bundle_as_cone + else self.n_pts_per_ray_training + ) + n_pts_per_ray_evaluation = ( + self.n_pts_per_ray_evaluation + 1 + if self.cast_ray_bundle_as_cone + else self.n_pts_per_ray_evaluation + ) + self._training_raysampler = NDCMultinomialRaysampler( + image_width=self.image_width, + image_height=self.image_height, + n_pts_per_ray=n_pts_per_ray_training, + min_depth=0.0, + max_depth=0.0, + n_rays_per_image=( + self.n_rays_per_image_sampled_from_mask + if self._sampling_mode[EvaluationMode.TRAINING] + == RenderSamplingMode.MASK_SAMPLE + else None + ), + n_rays_total=self.n_rays_total_training, + unit_directions=True, + stratified_sampling=self.stratified_point_sampling_training, + ) + + self._evaluation_raysampler = NDCMultinomialRaysampler( + image_width=self.image_width, + image_height=self.image_height, + n_pts_per_ray=n_pts_per_ray_evaluation, + min_depth=0.0, + max_depth=0.0, + n_rays_per_image=( + self.n_rays_per_image_sampled_from_mask + if self._sampling_mode[EvaluationMode.EVALUATION] + == RenderSamplingMode.MASK_SAMPLE + else None + ), + unit_directions=True, + stratified_sampling=self.stratified_point_sampling_evaluation, + ) + + max_y, min_y = self._training_raysampler.max_y, self._training_raysampler.min_y + max_x, min_x = self._training_raysampler.max_x, self._training_raysampler.min_x + self.pixel_height: float = (max_y - min_y) / (self.image_height - 1) + self.pixel_width: float = (max_x - min_x) / (self.image_width - 1) + + def _get_min_max_depth_bounds(self, cameras: CamerasBase) -> Tuple[float, float]: + raise NotImplementedError() + + def forward( + self, + cameras: CamerasBase, + evaluation_mode: EvaluationMode, + mask: Optional[torch.Tensor] = None, + ) -> ImplicitronRayBundle: + """ + + Args: + cameras: A batch of `batch_size` cameras from which the rays are emitted. + evaluation_mode: one of `EvaluationMode.TRAINING` or + `EvaluationMode.EVALUATION` which determines the sampling mode + that is used. + mask: Active for the `RenderSamplingMode.MASK_SAMPLE` sampling mode. + Defines a non-negative mask of shape + `(batch_size, image_height, image_width)` where each per-pixel + value is proportional to the probability of sampling the + corresponding pixel's ray. + + Returns: + ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the + sampled rendering rays. + """ + sample_mask = None + if ( + # pyre-fixme[29]: `Union[(self: TensorBase, indices: Union[None, slice[An... + self._sampling_mode[evaluation_mode] == RenderSamplingMode.MASK_SAMPLE + and mask is not None + ): + sample_mask = torch.nn.functional.interpolate( + mask, + size=[self.image_height, self.image_width], + mode="nearest", + )[:, 0] + + min_depth, max_depth = self._get_min_max_depth_bounds(cameras) + + raysampler = { + EvaluationMode.TRAINING: self._training_raysampler, + EvaluationMode.EVALUATION: self._evaluation_raysampler, + }[evaluation_mode] + + # pyre-fixme[29]: `Union[Tensor, Module]` is not a function. + ray_bundle = raysampler( + cameras=cameras, + mask=sample_mask, + min_depth=min_depth, + max_depth=max_depth, + ) + if self.cast_ray_bundle_as_cone and isinstance( + ray_bundle, HeterogeneousRayBundle + ): + # If this error rises it means that raysampler has among + # its arguments `n_ray_totals`. If it is the case + # then you should update the radii computation and lengths + # computation to handle padding and unpadding. + raise TypeError( + "Heterogeneous ray bundle is not supported for conical frustum computation yet" + ) + elif self.cast_ray_bundle_as_cone: + # pyre-fixme[9]: pixel_hw has type `Tuple[float, float]`; used as + # `Tuple[Union[Tensor, Module], Union[Tensor, Module]]`. + pixel_hw: Tuple[float, float] = (self.pixel_height, self.pixel_width) + pixel_radii_2d = compute_radii(cameras, ray_bundle.xys[..., :2], pixel_hw) + return ImplicitronRayBundle( + directions=ray_bundle.directions, + origins=ray_bundle.origins, + lengths=None, + xys=ray_bundle.xys, + bins=ray_bundle.lengths, + pixel_radii_2d=pixel_radii_2d, + ) + + return ImplicitronRayBundle( + directions=ray_bundle.directions, + origins=ray_bundle.origins, + lengths=ray_bundle.lengths, + xys=ray_bundle.xys, + camera_counts=getattr(ray_bundle, "camera_counts", None), + camera_ids=getattr(ray_bundle, "camera_ids", None), + ) + + +@registry.register +class AdaptiveRaySampler(AbstractMaskRaySampler): + """ + Adaptively samples points on each ray between near and far planes whose + depths are determined based on the distance from the camera center + to a predefined scene center. + + More specifically, + `min_depth = max( + (self.scene_center-camera_center).norm() - self.scene_extent, eps + )` and + `max_depth = (self.scene_center-camera_center).norm() + self.scene_extent`. + + This sampling is ideal for object-centric scenes whose contents are + centered around a known `self.scene_center` and fit into a bounding sphere + with a radius of `self.scene_extent`. + + Args: + scene_center: The xyz coordinates of the center of the scene used + along with `scene_extent` to compute the min and max depth planes + for sampling ray-points. + scene_extent: The radius of the scene bounding box centered at `scene_center`. + """ + + scene_extent: float = 8.0 + scene_center: Tuple[float, float, float] = (0.0, 0.0, 0.0) + + def __post_init__(self): + super().__post_init__() + if self.scene_extent <= 0.0: + raise ValueError("Adaptive raysampler requires self.scene_extent > 0.") + self._scene_center = torch.FloatTensor(self.scene_center) + + def _get_min_max_depth_bounds(self, cameras: CamerasBase) -> Tuple[float, float]: + """ + Returns the adaptively calculated near/far planes. + """ + min_depth, max_depth = camera_utils.get_min_max_depth_bounds( + cameras, self._scene_center, self.scene_extent + ) + return float(min_depth[0]), float(max_depth[0]) + + +@registry.register +class NearFarRaySampler(AbstractMaskRaySampler): + """ + Samples a fixed number of points between fixed near and far z-planes. + Specifically, samples points along each ray with approximately uniform spacing + of z-coordinates between the minimum depth `self.min_depth` and the maximum depth + `self.max_depth`. This sampling is useful for rendering scenes where the camera is + in a constant distance from the focal point of the scene. + + Args: + min_depth: The minimum depth of a ray-point. + max_depth: The maximum depth of a ray-point. + """ + + min_depth: float = 0.1 + max_depth: float = 8.0 + + def _get_min_max_depth_bounds(self, cameras: CamerasBase) -> Tuple[float, float]: + """ + Returns the stored near/far planes. + """ + return self.min_depth, self.max_depth + + +def compute_radii( + cameras: CamerasBase, + xy_grid: torch.Tensor, + pixel_hw_ndc: Tuple[float, float], +) -> torch.Tensor: + """ + Compute radii of conical frustums in world coordinates. + + Args: + cameras: cameras object representing a batch of cameras. + xy_grid: torch.tensor grid of image xy coords. + pixel_hw_ndc: pixel height and width in NDC + + Returns: + radii: A tensor of shape `(..., 1)` radii of a cone. + """ + batch_size = xy_grid.shape[0] + spatial_size = xy_grid.shape[1:-1] + n_rays_per_image = spatial_size.numel() + + xy = xy_grid.view(batch_size, n_rays_per_image, 2) + + # [batch_size, 3 * n_rays_per_image, 2] + xy = torch.cat( + [ + xy, + # Will allow to find the norm on the x axis + xy + torch.tensor([pixel_hw_ndc[1], 0], device=xy.device), + # Will allow to find the norm on the y axis + xy + torch.tensor([0, pixel_hw_ndc[0]], device=xy.device), + ], + dim=1, + ) + # [batch_size, 3 * n_rays_per_image, 3] + xyz = torch.cat( + ( + xy, + xy.new_ones(batch_size, 3 * n_rays_per_image, 1), + ), + dim=-1, + ) + + # unproject the points + unprojected_xyz = cameras.unproject_points(xyz, from_ndc=True) + + plane_world, plane_world_dx, plane_world_dy = torch.split( + unprojected_xyz, n_rays_per_image, dim=1 + ) + + # Distance from each unit-norm direction vector to its neighbors. + dx_norm = torch.linalg.norm(plane_world_dx - plane_world, dim=-1, keepdims=True) + dy_norm = torch.linalg.norm(plane_world_dy - plane_world, dim=-1, keepdims=True) + # Cut the distance in half to obtain the base radius: (dx_norm + dy_norm) * 0.5 + # Scale it by 2/12**0.5 to match the variance of the pixel’s footprint + radii = (dx_norm + dy_norm) / 12**0.5 + + return radii.view(batch_size, *spatial_size, 1) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/ray_tracing.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/ray_tracing.py new file mode 100644 index 0000000000000000000000000000000000000000..9ad46a83e9eee904e61dea6e00306083c0fd4327 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/ray_tracing.py @@ -0,0 +1,592 @@ +# @lint-ignore-every LICENSELINT +# Adapted from https://github.com/lioryariv/idr +# Copyright (c) 2020 Lior Yariv + +# pyre-unsafe + +from typing import Any, Callable, Tuple + +import torch +import torch.nn as nn +from pytorch3d.implicitron.tools.config import Configurable + + +class RayTracing(Configurable, nn.Module): + """ + Finds the intersection points of rays with the implicit surface defined + by a signed distance function (SDF). The algorithm follows the pipeline: + 1. Initialise start and end points on rays by the intersections with + the circumscribing sphere. + 2. Run sphere tracing from both ends. + 3. Divide the untraced segments of non-convergent rays into uniform + intervals and find the one with the sign transition. + 4. Run the secant method to estimate the point of the sign transition. + + Args: + object_bounding_sphere: The radius of the initial sphere circumscribing + the object. + sdf_threshold: Absolute SDF value small enough for the sphere tracer + to consider it a surface. + line_search_step: Length of the backward correction on sphere tracing + iterations. + line_step_iters: Number of backward correction iterations. + sphere_tracing_iters: Maximum number of sphere tracing iterations + (the actual number of iterations may be smaller if all ray + intersections are found). + n_steps: Number of intervals sampled for unconvergent rays. + n_secant_steps: Number of iterations in the secant algorithm. + """ + + object_bounding_sphere: float = 1.0 + sdf_threshold: float = 5.0e-5 + line_search_step: float = 0.5 + line_step_iters: int = 1 + sphere_tracing_iters: int = 10 + n_steps: int = 100 + n_secant_steps: int = 8 + + def forward( + self, + sdf: Callable[[torch.Tensor], torch.Tensor], + cam_loc: torch.Tensor, + object_mask: torch.BoolTensor, + ray_directions: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Args: + sdf: A callable that takes a (N, 3) tensor of points and returns + a tensor of (N,) SDF values. + cam_loc: A tensor of (B, N, 3) ray origins. + object_mask: A (N, 3) tensor of indicators whether a sampled pixel + corresponds to the rendered object or background. + ray_directions: A tensor of (B, N, 3) ray directions. + + Returns: + curr_start_points: A tensor of (B*N, 3) found intersection points + with the implicit surface. + network_object_mask: A tensor of (B*N,) indicators denoting whether + intersections were found. + acc_start_dis: A tensor of (B*N,) distances from the ray origins + to intersrection points. + """ + batch_size, num_pixels, _ = ray_directions.shape + device = cam_loc.device + + sphere_intersections, mask_intersect = _get_sphere_intersection( + cam_loc, ray_directions, r=self.object_bounding_sphere + ) + + ( + curr_start_points, + unfinished_mask_start, + acc_start_dis, + acc_end_dis, + min_dis, + max_dis, + ) = self.sphere_tracing( + batch_size, + num_pixels, + sdf, + cam_loc, + ray_directions, + mask_intersect, + sphere_intersections, + ) + + network_object_mask = acc_start_dis < acc_end_dis + + # The non convergent rays should be handled by the sampler + sampler_mask = unfinished_mask_start + sampler_net_obj_mask = torch.zeros_like( + sampler_mask, dtype=torch.bool, device=device + ) + if sampler_mask.sum() > 0: + sampler_min_max = torch.zeros((batch_size, num_pixels, 2), device=device) + sampler_min_max.reshape(-1, 2)[sampler_mask, 0] = acc_start_dis[ + sampler_mask + ] + sampler_min_max.reshape(-1, 2)[sampler_mask, 1] = acc_end_dis[sampler_mask] + + sampler_pts, sampler_net_obj_mask, sampler_dists = self.ray_sampler( + sdf, cam_loc, object_mask, ray_directions, sampler_min_max, sampler_mask + ) + + curr_start_points[sampler_mask] = sampler_pts[sampler_mask] + acc_start_dis[sampler_mask] = sampler_dists[sampler_mask] + network_object_mask[sampler_mask] = sampler_net_obj_mask[sampler_mask] + + if not self.training: + return curr_start_points, network_object_mask, acc_start_dis + + # in case we are training, we are updating curr_start_points and acc_start_dis for + + ray_directions = ray_directions.reshape(-1, 3) + mask_intersect = mask_intersect.reshape(-1) + # pyre-fixme[9]: object_mask has type `BoolTensor`; used as `Tensor`. + object_mask = object_mask.reshape(-1) + + in_mask = ~network_object_mask & object_mask & ~sampler_mask + out_mask = ~object_mask & ~sampler_mask + + mask_left_out = (in_mask | out_mask) & ~mask_intersect + if ( + mask_left_out.sum() > 0 + ): # project the origin to the not intersect points on the sphere + cam_left_out = cam_loc.reshape(-1, 3)[mask_left_out] + rays_left_out = ray_directions[mask_left_out] + acc_start_dis[mask_left_out] = -torch.bmm( + rays_left_out.view(-1, 1, 3), cam_left_out.view(-1, 3, 1) + ).squeeze() + curr_start_points[mask_left_out] = ( + cam_left_out + acc_start_dis[mask_left_out].unsqueeze(1) * rays_left_out + ) + + mask = (in_mask | out_mask) & mask_intersect + + if mask.sum() > 0: + min_dis[network_object_mask & out_mask] = acc_start_dis[ + network_object_mask & out_mask + ] + + min_mask_points, min_mask_dist = self.minimal_sdf_points( + sdf, cam_loc, ray_directions, mask, min_dis, max_dis + ) + + curr_start_points[mask] = min_mask_points + acc_start_dis[mask] = min_mask_dist + + return curr_start_points, network_object_mask, acc_start_dis + + def sphere_tracing( + self, + batch_size: int, + num_pixels: int, + sdf: Callable[[torch.Tensor], torch.Tensor], + cam_loc: torch.Tensor, + ray_directions: torch.Tensor, + mask_intersect: torch.Tensor, + sphere_intersections: torch.Tensor, + ) -> Tuple[Any, Any, Any, Any, Any, Any]: + """ + Run sphere tracing algorithm for max iterations + from both sides of unit sphere intersection + + Args: + batch_size: + num_pixels: + sdf: + cam_loc: + ray_directions: + mask_intersect: + sphere_intersections: + + Returns: + curr_start_points: + unfinished_mask_start: + acc_start_dis: + acc_end_dis: + min_dis: + max_dis: + """ + + device = cam_loc.device + sphere_intersections_points = ( + cam_loc[..., None, :] + + sphere_intersections[..., None] * ray_directions[..., None, :] + ) + unfinished_mask_start = mask_intersect.reshape(-1).clone() + unfinished_mask_end = mask_intersect.reshape(-1).clone() + + # Initialize start current points + curr_start_points = torch.zeros(batch_size * num_pixels, 3, device=device) + curr_start_points[unfinished_mask_start] = sphere_intersections_points[ + :, :, 0, : + ].reshape(-1, 3)[unfinished_mask_start] + acc_start_dis = torch.zeros(batch_size * num_pixels, device=device) + acc_start_dis[unfinished_mask_start] = sphere_intersections.reshape(-1, 2)[ + unfinished_mask_start, 0 + ] + + # Initialize end current points + curr_end_points = torch.zeros(batch_size * num_pixels, 3, device=device) + curr_end_points[unfinished_mask_end] = sphere_intersections_points[ + :, :, 1, : + ].reshape(-1, 3)[unfinished_mask_end] + acc_end_dis = torch.zeros(batch_size * num_pixels, device=device) + acc_end_dis[unfinished_mask_end] = sphere_intersections.reshape(-1, 2)[ + unfinished_mask_end, 1 + ] + + # Initialise min and max depth + min_dis = acc_start_dis.clone() + max_dis = acc_end_dis.clone() + + # Iterate on the rays (from both sides) till finding a surface + iters = 0 + + # TODO: sdf should also pass info about batches + + next_sdf_start = torch.zeros_like(acc_start_dis) + next_sdf_start[unfinished_mask_start] = sdf( + curr_start_points[unfinished_mask_start] + ) + + next_sdf_end = torch.zeros_like(acc_end_dis) + next_sdf_end[unfinished_mask_end] = sdf(curr_end_points[unfinished_mask_end]) + + while True: + # Update sdf + curr_sdf_start = torch.zeros_like(acc_start_dis) + curr_sdf_start[unfinished_mask_start] = next_sdf_start[ + unfinished_mask_start + ] + curr_sdf_start[curr_sdf_start <= self.sdf_threshold] = 0 + + curr_sdf_end = torch.zeros_like(acc_end_dis) + curr_sdf_end[unfinished_mask_end] = next_sdf_end[unfinished_mask_end] + curr_sdf_end[curr_sdf_end <= self.sdf_threshold] = 0 + + # Update masks + unfinished_mask_start = unfinished_mask_start & ( + curr_sdf_start > self.sdf_threshold + ) + unfinished_mask_end = unfinished_mask_end & ( + curr_sdf_end > self.sdf_threshold + ) + + if ( + unfinished_mask_start.sum() == 0 and unfinished_mask_end.sum() == 0 + ) or iters == self.sphere_tracing_iters: + break + iters += 1 + + # Make step + # Update distance + acc_start_dis = acc_start_dis + curr_sdf_start + acc_end_dis = acc_end_dis - curr_sdf_end + + # Update points + curr_start_points = ( + cam_loc + + acc_start_dis.reshape(batch_size, num_pixels, 1) * ray_directions + ).reshape(-1, 3) + curr_end_points = ( + cam_loc + + acc_end_dis.reshape(batch_size, num_pixels, 1) * ray_directions + ).reshape(-1, 3) + + # Fix points which wrongly crossed the surface + next_sdf_start = torch.zeros_like(acc_start_dis) + next_sdf_start[unfinished_mask_start] = sdf( + curr_start_points[unfinished_mask_start] + ) + + next_sdf_end = torch.zeros_like(acc_end_dis) + next_sdf_end[unfinished_mask_end] = sdf( + curr_end_points[unfinished_mask_end] + ) + + not_projected_start = next_sdf_start < 0 + not_projected_end = next_sdf_end < 0 + not_proj_iters = 0 + while ( + not_projected_start.sum() > 0 or not_projected_end.sum() > 0 + ) and not_proj_iters < self.line_step_iters: + # Step backwards + acc_start_dis[not_projected_start] -= ( + (1 - self.line_search_step) / (2**not_proj_iters) + ) * curr_sdf_start[not_projected_start] + curr_start_points[not_projected_start] = ( + cam_loc + + acc_start_dis.reshape(batch_size, num_pixels, 1) * ray_directions + ).reshape(-1, 3)[not_projected_start] + + acc_end_dis[not_projected_end] += ( + (1 - self.line_search_step) / (2**not_proj_iters) + ) * curr_sdf_end[not_projected_end] + curr_end_points[not_projected_end] = ( + cam_loc + + acc_end_dis.reshape(batch_size, num_pixels, 1) * ray_directions + ).reshape(-1, 3)[not_projected_end] + + # Calc sdf + next_sdf_start[not_projected_start] = sdf( + curr_start_points[not_projected_start] + ) + next_sdf_end[not_projected_end] = sdf( + curr_end_points[not_projected_end] + ) + + # Update mask + not_projected_start = next_sdf_start < 0 + not_projected_end = next_sdf_end < 0 + not_proj_iters += 1 + + unfinished_mask_start = unfinished_mask_start & ( + acc_start_dis < acc_end_dis + ) + unfinished_mask_end = unfinished_mask_end & (acc_start_dis < acc_end_dis) + + return ( + curr_start_points, + unfinished_mask_start, + acc_start_dis, + acc_end_dis, + min_dis, + max_dis, + ) + + def ray_sampler( + self, + sdf: Callable[[torch.Tensor], torch.Tensor], + cam_loc: torch.Tensor, + object_mask: torch.Tensor, + ray_directions: torch.Tensor, + sampler_min_max: torch.Tensor, + sampler_mask: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Sample the ray in a given range and run secant on rays which have sign transition. + + Args: + sdf: + cam_loc: + object_mask: + ray_directions: + sampler_min_max: + sampler_mask: + + Returns: + + """ + + batch_size, num_pixels, _ = ray_directions.shape + device = cam_loc.device + n_total_pxl = batch_size * num_pixels + sampler_pts = torch.zeros(n_total_pxl, 3, device=device) + sampler_dists = torch.zeros(n_total_pxl, device=device) + + intervals_dist = torch.linspace(0, 1, steps=self.n_steps, device=device).view( + 1, 1, -1 + ) + + pts_intervals = sampler_min_max[:, :, 0].unsqueeze(-1) + intervals_dist * ( + sampler_min_max[:, :, 1] - sampler_min_max[:, :, 0] + ).unsqueeze(-1) + points = ( + cam_loc[..., None, :] + + pts_intervals[..., None] * ray_directions[..., None, :] + ) + + # Get the non convergent rays + mask_intersect_idx = torch.nonzero(sampler_mask).flatten() + points = points.reshape((-1, self.n_steps, 3))[sampler_mask, :, :] + pts_intervals = pts_intervals.reshape((-1, self.n_steps))[sampler_mask] + + sdf_val_all = [] + for pnts in torch.split(points.reshape(-1, 3), 100000, dim=0): + sdf_val_all.append(sdf(pnts)) + sdf_val = torch.cat(sdf_val_all).reshape(-1, self.n_steps) + + tmp = torch.sign(sdf_val) * torch.arange( + self.n_steps, 0, -1, device=device, dtype=torch.float32 + ).reshape(1, self.n_steps) + # Force argmin to return the first min value + sampler_pts_ind = torch.argmin(tmp, -1) + sampler_pts[mask_intersect_idx] = points[ + torch.arange(points.shape[0]), sampler_pts_ind, : + ] + sampler_dists[mask_intersect_idx] = pts_intervals[ + torch.arange(pts_intervals.shape[0]), sampler_pts_ind + ] + + true_surface_pts = object_mask.reshape(-1)[sampler_mask] + net_surface_pts = sdf_val[torch.arange(sdf_val.shape[0]), sampler_pts_ind] < 0 + + # take points with minimal SDF value for P_out pixels + p_out_mask = ~(true_surface_pts & net_surface_pts) + n_p_out = p_out_mask.sum() + if n_p_out > 0: + out_pts_idx = torch.argmin(sdf_val[p_out_mask, :], -1) + sampler_pts[mask_intersect_idx[p_out_mask]] = points[p_out_mask, :, :][ + # pyre-fixme[6]: For 1st param expected `Union[bool, float, int]` + # but got `Tensor`. + torch.arange(n_p_out), + out_pts_idx, + :, + ] + sampler_dists[mask_intersect_idx[p_out_mask]] = pts_intervals[ + p_out_mask, + :, + # pyre-fixme[6]: For 1st param expected `Union[bool, float, int]` but + # got `Tensor`. + ][torch.arange(n_p_out), out_pts_idx] + + # Get Network object mask + sampler_net_obj_mask = sampler_mask.clone() + sampler_net_obj_mask[mask_intersect_idx[~net_surface_pts]] = False + + # Run Secant method + secant_pts = ( + net_surface_pts & true_surface_pts if self.training else net_surface_pts + ) + n_secant_pts = secant_pts.sum() + if n_secant_pts > 0: + # Get secant z predictions + z_high = pts_intervals[ + torch.arange(pts_intervals.shape[0]), sampler_pts_ind + ][secant_pts] + sdf_high = sdf_val[torch.arange(sdf_val.shape[0]), sampler_pts_ind][ + secant_pts + ] + z_low = pts_intervals[secant_pts][ + # pyre-fixme[6]: For 1st param expected `Union[bool, float, int]` + # but got `Tensor`. + torch.arange(n_secant_pts), + sampler_pts_ind[secant_pts] - 1, + ] + sdf_low = sdf_val[secant_pts][ + # pyre-fixme[6]: For 1st param expected `Union[bool, float, int]` + # but got `Tensor`. + torch.arange(n_secant_pts), + sampler_pts_ind[secant_pts] - 1, + ] + cam_loc_secant = cam_loc.reshape(-1, 3)[mask_intersect_idx[secant_pts]] + ray_directions_secant = ray_directions.reshape((-1, 3))[ + mask_intersect_idx[secant_pts] + ] + z_pred_secant = self.secant( + sdf_low, + sdf_high, + z_low, + z_high, + cam_loc_secant, + ray_directions_secant, + # pyre-fixme[6]: For 7th param expected `Module` but got `(Tensor) + # -> Tensor`. + sdf, + ) + + # Get points + sampler_pts[mask_intersect_idx[secant_pts]] = ( + cam_loc_secant + z_pred_secant.unsqueeze(-1) * ray_directions_secant + ) + sampler_dists[mask_intersect_idx[secant_pts]] = z_pred_secant + + return sampler_pts, sampler_net_obj_mask, sampler_dists + + def secant( + self, + sdf_low: torch.Tensor, + sdf_high: torch.Tensor, + z_low: torch.Tensor, + z_high: torch.Tensor, + cam_loc: torch.Tensor, + ray_directions: torch.Tensor, + sdf: nn.Module, + ) -> torch.Tensor: + """ + Runs the secant method for interval [z_low, z_high] for n_secant_steps + """ + + z_pred = -sdf_low * (z_high - z_low) / (sdf_high - sdf_low) + z_low + for _ in range(self.n_secant_steps): + p_mid = cam_loc + z_pred.unsqueeze(-1) * ray_directions + sdf_mid = sdf(p_mid) + ind_low = sdf_mid > 0 + if ind_low.sum() > 0: + z_low[ind_low] = z_pred[ind_low] + sdf_low[ind_low] = sdf_mid[ind_low] + ind_high = sdf_mid < 0 + if ind_high.sum() > 0: + z_high[ind_high] = z_pred[ind_high] + sdf_high[ind_high] = sdf_mid[ind_high] + + z_pred = -sdf_low * (z_high - z_low) / (sdf_high - sdf_low) + z_low + + return z_pred + + def minimal_sdf_points( + self, + sdf: Callable[[torch.Tensor], torch.Tensor], + cam_loc: torch.Tensor, + ray_directions: torch.Tensor, + mask: torch.Tensor, + min_dis: torch.Tensor, + max_dis: torch.Tensor, + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Find points with minimal SDF value on rays for P_out pixels + """ + + n_mask_points = mask.sum() + + n = self.n_steps + steps = torch.empty(n, device=cam_loc.device).uniform_(0.0, 1.0) + mask_max_dis = max_dis[mask].unsqueeze(-1) + mask_min_dis = min_dis[mask].unsqueeze(-1) + steps = ( + # pyre-fixme[6]: For 1st param expected `int` but got `Tensor`. + steps.unsqueeze(0).repeat(n_mask_points, 1) * (mask_max_dis - mask_min_dis) + + mask_min_dis + ) + + mask_points = cam_loc.reshape(-1, 3)[mask] + mask_rays = ray_directions[mask, :] + + mask_points_all = mask_points.unsqueeze(1).repeat(1, n, 1) + steps.unsqueeze( + -1 + ) * mask_rays.unsqueeze(1).repeat(1, n, 1) + points = mask_points_all.reshape(-1, 3) + + mask_sdf_all = [] + for pnts in torch.split(points, 100000, dim=0): + mask_sdf_all.append(sdf(pnts)) + + mask_sdf_all = torch.cat(mask_sdf_all).reshape(-1, n) + min_vals, min_idx = mask_sdf_all.min(-1) + min_mask_points = mask_points_all.reshape(-1, n, 3)[ + # pyre-fixme[6]: For 2nd param expected `Union[bool, float, int]` but + # got `Tensor`. + torch.arange(0, n_mask_points), + min_idx, + ] + # pyre-fixme[6]: For 2nd param expected `Union[bool, float, int]` but got + # `Tensor`. + min_mask_dist = steps.reshape(-1, n)[torch.arange(0, n_mask_points), min_idx] + + return min_mask_points, min_mask_dist + + +# TODO: support variable origins +def _get_sphere_intersection( + cam_loc: torch.Tensor, ray_directions: torch.Tensor, r: float = 1.0 +) -> Tuple[torch.Tensor, torch.Tensor]: + # Input: n_images x 3 ; n_images x n_rays x 3 + # Output: n_images * n_rays x 2 (close and far) ; n_images * n_rays + + n_imgs, n_pix, _ = ray_directions.shape + device = cam_loc.device + + # cam_loc = cam_loc.unsqueeze(-1) + # ray_cam_dot = torch.bmm(ray_directions, cam_loc).squeeze() + ray_cam_dot = (ray_directions * cam_loc).sum(-1) # n_images x n_rays + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + under_sqrt = ray_cam_dot**2 - (cam_loc.norm(2, dim=-1) ** 2 - r**2) + + under_sqrt = under_sqrt.reshape(-1) + mask_intersect = under_sqrt > 0 + + sphere_intersections = torch.zeros(n_imgs * n_pix, 2, device=device) + sphere_intersections[mask_intersect] = torch.sqrt( + under_sqrt[mask_intersect] + ).unsqueeze(-1) * torch.tensor([-1.0, 1.0], device=device) + sphere_intersections[mask_intersect] -= ray_cam_dot.reshape(-1)[ + mask_intersect + ].unsqueeze(-1) + + sphere_intersections = sphere_intersections.reshape(n_imgs, n_pix, 2) + sphere_intersections = sphere_intersections.clamp_min(0.0) + mask_intersect = mask_intersect.reshape(n_imgs, n_pix) + + return sphere_intersections, mask_intersect diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/raymarcher.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/raymarcher.py new file mode 100644 index 0000000000000000000000000000000000000000..2bca6e77c771321e6439835fe472fcc9afac13e5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/raymarcher.py @@ -0,0 +1,245 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Any, Callable, Dict, Optional, Tuple + +import torch +from pytorch3d.implicitron.models.renderer.base import RendererOutput +from pytorch3d.implicitron.tools.config import registry, ReplaceableBase +from pytorch3d.renderer.implicit.raymarching import _check_raymarcher_inputs + + +_TTensor = torch.Tensor + + +class RaymarcherBase(ReplaceableBase): + """ + Defines a base class for raymarchers. Specifically, a raymarcher is responsible + for taking a set of features and density descriptors along rendering rays + and marching along them in order to generate a feature render. + """ + + def forward( + self, + rays_densities: torch.Tensor, + rays_features: torch.Tensor, + aux: Dict[str, Any], + ) -> RendererOutput: + """ + Args: + rays_densities: Per-ray density values represented with a tensor + of shape `(..., n_points_per_ray, 1)`. + rays_features: Per-ray feature values represented with a tensor + of shape `(..., n_points_per_ray, feature_dim)`. + aux: a dictionary with extra information. + """ + raise NotImplementedError() + + +class AccumulativeRaymarcherBase(RaymarcherBase, torch.nn.Module): + """ + This generalizes the `pytorch3d.renderer.EmissionAbsorptionRaymarcher` + and NeuralVolumes' cumsum ray marcher. It additionally returns + the rendering weights that can be used in the NVS pipeline to carry out + the importance ray-sampling in the refining pass. + Different from `pytorch3d.renderer.EmissionAbsorptionRaymarcher`, it takes raw + (non-exponentiated) densities. + + Args: + surface_thickness: The thickness of the raymarched surface. + bg_color: The background color. A tuple of either 1 element or of D elements, + where D matches the feature dimensionality; it is broadcast when necessary. + replicate_last_interval: If True, the ray length assigned to the last interval + for the opacity delta calculation is copied from the penultimate interval. + background_opacity: The length over which the last raw opacity value + (i.e. before exponentiation) is considered to apply, for the delta + calculation. Ignored if replicate_last_interval=True. + density_relu: If `True`, passes the input density through ReLU before + raymarching. + blend_output: If `True`, alpha-blends the output renders with the + background color using the rendered opacity mask. + + capping_function: The capping function of the raymarcher. + Options: + - "exponential" (`cap_fn(x) = 1 - exp(-x)`) + - "cap1" (`cap_fn(x) = min(x, 1)`) + Set to "exponential" for the standard Emission Absorption raymarching. + weight_function: The weighting function of the raymarcher. + Options: + - "product" (`weight_fn(w, x) = w * x`) + - "minimum" (`weight_fn(w, x) = min(w, x)`) + Set to "product" for the standard Emission Absorption raymarching. + """ + + surface_thickness: int = 1 + bg_color: Tuple[float, ...] = (0.0,) + replicate_last_interval: bool = False + background_opacity: float = 0.0 + density_relu: bool = True + blend_output: bool = False + + @property + def capping_function_type(self) -> str: + raise NotImplementedError() + + @property + def weight_function_type(self) -> str: + raise NotImplementedError() + + def __post_init__(self): + """ + Args: + surface_thickness: Denotes the overlap between the absorption + function and the density function. + """ + bg_color = torch.tensor(self.bg_color) + if bg_color.ndim != 1: + raise ValueError(f"bg_color (shape {bg_color.shape}) should be a 1D tensor") + + self.register_buffer("_bg_color", bg_color, persistent=False) + + self._capping_function: Callable[[_TTensor], _TTensor] = { + "exponential": lambda x: 1.0 - torch.exp(-x), + "cap1": lambda x: x.clamp(max=1.0), + }[self.capping_function_type] + + self._weight_function: Callable[[_TTensor, _TTensor], _TTensor] = { + "product": lambda curr, acc: curr * acc, + "minimum": lambda curr, acc: torch.minimum(curr, acc), + }[self.weight_function_type] + + # pyre-fixme[14]: `forward` overrides method defined in `RaymarcherBase` + # inconsistently. + def forward( + self, + rays_densities: torch.Tensor, + rays_features: torch.Tensor, + aux: Dict[str, Any], + ray_lengths: torch.Tensor, + ray_deltas: Optional[torch.Tensor] = None, + density_noise_std: float = 0.0, + **kwargs, + ) -> RendererOutput: + """ + Args: + rays_densities: Per-ray density values represented with a tensor + of shape `(..., n_points_per_ray, 1)`. + rays_features: Per-ray feature values represented with a tensor + of shape `(..., n_points_per_ray, feature_dim)`. + aux: a dictionary with extra information. + ray_lengths: Per-ray depth values represented with a tensor + of shape `(..., n_points_per_ray, feature_dim)`. + ray_deltas: Optional differences between consecutive elements along the ray bundle + represented with a tensor of shape `(..., n_points_per_ray)`. If None, + these differences are computed from ray_lengths. + density_noise_std: the magnitude of the noise added to densities. + + Returns: + features: A tensor of shape `(..., feature_dim)` containing + the rendered features for each ray. + depth: A tensor of shape `(..., 1)` containing estimated depth. + opacities: A tensor of shape `(..., 1)` containing rendered opacities. + weights: A tensor of shape `(..., n_points_per_ray)` containing + the ray-specific non-negative opacity weights. In general, they + don't sum to 1 but do not overcome it, i.e. + `(weights.sum(dim=-1) <= 1.0).all()` holds. + """ + _check_raymarcher_inputs( + rays_densities, + rays_features, + ray_lengths, + z_can_be_none=True, + features_can_be_none=False, + density_1d=True, + ) + + if ray_deltas is None: + ray_lengths_diffs = torch.diff(ray_lengths, dim=-1) + if self.replicate_last_interval: + last_interval = ray_lengths_diffs[..., -1:] + else: + last_interval = torch.full_like( + ray_lengths[..., :1], self.background_opacity + ) + deltas = torch.cat((ray_lengths_diffs, last_interval), dim=-1) + else: + deltas = ray_deltas + + rays_densities = rays_densities[..., 0] + + if density_noise_std > 0.0: + noise: _TTensor = torch.randn_like(rays_densities).mul(density_noise_std) + rays_densities = rays_densities + noise + if self.density_relu: + rays_densities = torch.relu(rays_densities) + + weighted_densities = deltas * rays_densities + # pyre-fixme[29]: `Union[Tensor, Module]` is not a function. + capped_densities = self._capping_function(weighted_densities) + + # pyre-fixme[29]: `Union[Tensor, Module]` is not a function. + rays_opacities = self._capping_function( + torch.cumsum(weighted_densities, dim=-1) + ) + opacities = rays_opacities[..., -1:] + absorption_shifted = (-rays_opacities + 1.0).roll( + self.surface_thickness, dims=-1 + ) + absorption_shifted[..., : self.surface_thickness] = 1.0 + + # pyre-fixme[29]: `Union[Tensor, Module]` is not a function. + weights = self._weight_function(capped_densities, absorption_shifted) + features = (weights[..., None] * rays_features).sum(dim=-2) + depth = (weights * ray_lengths)[..., None].sum(dim=-2) + + alpha = opacities if self.blend_output else 1 + if self._bg_color.shape[-1] not in [1, features.shape[-1]]: + raise ValueError("Wrong number of background color channels.") + # pyre-fixme[58]: `*` is not supported for operand types `int` and + # `Union[Tensor, Module]`. + features = alpha * features + (1 - opacities) * self._bg_color + + return RendererOutput( + features=features, + depths=depth, + masks=opacities, + weights=weights, + aux=aux, + ) + + +@registry.register +class EmissionAbsorptionRaymarcher(AccumulativeRaymarcherBase): + """ + Implements the EmissionAbsorption raymarcher. + """ + + background_opacity: float = 1e10 + + @property + def capping_function_type(self) -> str: + return "exponential" + + @property + def weight_function_type(self) -> str: + return "product" + + +@registry.register +class CumsumRaymarcher(AccumulativeRaymarcherBase): + """ + Implements the NeuralVolumes' cumulative-sum raymarcher. + """ + + @property + def capping_function_type(self) -> str: + return "cap1" + + @property + def weight_function_type(self) -> str: + return "minimum" diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/rgb_net.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/rgb_net.py new file mode 100644 index 0000000000000000000000000000000000000000..de25389680878ac00d9970d6f6dfb6eed1874ca8 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/rgb_net.py @@ -0,0 +1,140 @@ +# @lint-ignore-every LICENSELINT +# Adapted from RenderingNetwork from IDR +# https://github.com/lioryariv/idr/ +# Copyright (c) 2020 Lior Yariv + +# pyre-unsafe + +import logging +from typing import List, Tuple + +import torch +from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle +from pytorch3d.implicitron.tools.config import enable_get_default_args +from pytorch3d.renderer.implicit import HarmonicEmbedding + +from torch import nn + + +logger = logging.getLogger(__name__) + + +class RayNormalColoringNetwork(torch.nn.Module): + """ + Members: + d_in and feature_vector_size: Sum of these is the input + dimension. These must add up to the sum of + - 3 [for the points] + - 3 unless mode=no_normal [for the normals] + - 3 unless mode=no_view_dir [for view directions] + - the feature size, [number of channels in feature_vectors] + + d_out: dimension of output. + mode: One of "idr", "no_view_dir" or "no_normal" to allow omitting + part of the network input. + dims: list of hidden layer sizes. + weight_norm: whether to apply weight normalization to each layer. + n_harmonic_functions_dir: + If >0, use a harmonic embedding with this number of + harmonic functions for the view direction. Otherwise view directions + are fed without embedding, unless mode is `no_view_dir`. + pooled_feature_dim: If a pooling function is in use (provided as + pooling_fn to forward()) this must be its number of features. + Otherwise this must be set to 0. (If used from GenericModel, + this will be set automatically.) + """ + + def __init__( + self, + feature_vector_size: int = 3, + mode: str = "idr", + d_in: int = 9, + d_out: int = 3, + dims: Tuple[int, ...] = (512, 512, 512, 512), + weight_norm: bool = True, + n_harmonic_functions_dir: int = 0, + pooled_feature_dim: int = 0, + ) -> None: + super().__init__() + + self.mode = mode + self.output_dimensions = d_out + dims_full: List[int] = [d_in + feature_vector_size] + list(dims) + [d_out] + + self.embedview_fn = None + if n_harmonic_functions_dir > 0: + self.embedview_fn = HarmonicEmbedding( + n_harmonic_functions_dir, append_input=True + ) + dims_full[0] += self.embedview_fn.get_output_dim() - 3 + + if pooled_feature_dim > 0: + logger.info("Pooled features in rendering network.") + dims_full[0] += pooled_feature_dim + + self.num_layers = len(dims_full) + + layers = [] + for layer_idx in range(self.num_layers - 1): + out_dim = dims_full[layer_idx + 1] + lin = nn.Linear(dims_full[layer_idx], out_dim) + + if weight_norm: + lin = nn.utils.weight_norm(lin) + + layers.append(lin) + self.linear_layers = torch.nn.ModuleList(layers) + + self.relu = nn.ReLU() + self.tanh = nn.Tanh() + + def forward( + self, + feature_vectors: torch.Tensor, + points, + normals, + ray_bundle: ImplicitronRayBundle, + masks=None, + pooling_fn=None, + ): + if masks is not None and not masks.any(): + return torch.zeros_like(normals) + + view_dirs = ray_bundle.directions + if masks is not None: + # in case of IDR, other outputs are passed here after applying the mask + view_dirs = view_dirs.reshape(view_dirs.shape[0], -1, 3)[ + :, masks.reshape(-1) + ] + + if self.embedview_fn is not None: + view_dirs = self.embedview_fn(view_dirs) + + if self.mode == "idr": + rendering_input = torch.cat( + [points, view_dirs, normals, feature_vectors], dim=-1 + ) + elif self.mode == "no_view_dir": + rendering_input = torch.cat([points, normals, feature_vectors], dim=-1) + elif self.mode == "no_normal": + rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1) + else: + raise ValueError(f"Unsupported rendering mode: {self.mode}") + + if pooling_fn is not None: + featspool = pooling_fn(points[None])[0] + rendering_input = torch.cat((rendering_input, featspool), dim=-1) + + x = rendering_input + + for layer_idx in range(self.num_layers - 1): + x = self.linear_layers[layer_idx](x) + + if layer_idx < self.num_layers - 2: + x = self.relu(x) + + x = self.tanh(x) + return x + + +enable_get_default_args(RayNormalColoringNetwork) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/sdf_renderer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/sdf_renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..c769b5f500144ccac0c1fe31e2daee1e94b898ad --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/renderer/sdf_renderer.py @@ -0,0 +1,280 @@ +# @lint-ignore-every LICENSELINT +# Adapted from https://github.com/lioryariv/idr/blob/main/code/model/ +# implicit_differentiable_renderer.py +# Copyright (c) 2020 Lior Yariv + +# pyre-unsafe +import functools +from typing import List, Optional, Tuple + +import torch +from omegaconf import DictConfig +from pytorch3d.common.compat import prod +from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle +from pytorch3d.implicitron.tools.config import ( + get_default_args_field, + registry, + run_auto_creation, +) +from pytorch3d.implicitron.tools.utils import evaluating + +from .base import BaseRenderer, EvaluationMode, ImplicitFunctionWrapper, RendererOutput +from .ray_tracing import RayTracing +from .rgb_net import RayNormalColoringNetwork + + +@registry.register +class SignedDistanceFunctionRenderer(BaseRenderer, torch.nn.Module): + render_features_dimensions: int = 3 + object_bounding_sphere: float = 1.0 + # pyre-fixme[13]: Attribute `ray_tracer` is never initialized. + ray_tracer: RayTracing + ray_normal_coloring_network_args: DictConfig = get_default_args_field( + RayNormalColoringNetwork + ) + bg_color: Tuple[float, ...] = (0.0,) + soft_mask_alpha: float = 50.0 + + def __post_init__( + self, + ): + render_features_dimensions = self.render_features_dimensions + if len(self.bg_color) not in [1, render_features_dimensions]: + raise ValueError( + f"Background color should have {render_features_dimensions} entries." + ) + + run_auto_creation(self) + + self.ray_normal_coloring_network_args["feature_vector_size"] = ( + render_features_dimensions + ) + self._rgb_network = RayNormalColoringNetwork( + **self.ray_normal_coloring_network_args + ) + + self.register_buffer("_bg_color", torch.tensor(self.bg_color), persistent=False) + + @classmethod + def ray_tracer_tweak_args(cls, type, args: DictConfig) -> None: + del args["object_bounding_sphere"] + + def create_ray_tracer(self) -> None: + self.ray_tracer = RayTracing( + # pyre-fixme[32]: Keyword argument must be a mapping with string keys. + **self.ray_tracer_args, + object_bounding_sphere=self.object_bounding_sphere, + ) + + def requires_object_mask(self) -> bool: + return True + + def forward( + self, + ray_bundle: ImplicitronRayBundle, + implicit_functions: List[ImplicitFunctionWrapper], + evaluation_mode: EvaluationMode = EvaluationMode.EVALUATION, + object_mask: Optional[torch.Tensor] = None, + **kwargs, + ) -> RendererOutput: + """ + Args: + ray_bundle: A `ImplicitronRayBundle` object containing the parametrizations of the + sampled rendering rays. + implicit_functions: single element list of ImplicitFunctionWrappers which + defines the implicit function to be used. + evaluation_mode: one of EvaluationMode.TRAINING or + EvaluationMode.EVALUATION which determines the settings used for + rendering. + kwargs: + object_mask: BoolTensor, denoting the silhouette of the object. + This is a required keyword argument for SignedDistanceFunctionRenderer + + Returns: + instance of RendererOutput + """ + if len(implicit_functions) != 1: + raise ValueError( + "SignedDistanceFunctionRenderer supports only single pass." + ) + + if object_mask is None: + raise ValueError("Expected object_mask to be provided in the kwargs") + object_mask = object_mask.bool() + + implicit_function = implicit_functions[0] + implicit_function_gradient = functools.partial(_gradient, implicit_function) + + # object_mask: silhouette of the object + batch_size, *spatial_size, _ = ray_bundle.lengths.shape + num_pixels = prod(spatial_size) + + cam_loc = ray_bundle.origins.reshape(batch_size, -1, 3) + ray_dirs = ray_bundle.directions.reshape(batch_size, -1, 3) + object_mask = object_mask.reshape(batch_size, -1) + + with torch.no_grad(), evaluating(implicit_function): + points, network_object_mask, dists = self.ray_tracer( + sdf=lambda x: implicit_function(rays_points_world=x)[ + :, 0 + ], # TODO: get rid of this wrapper + cam_loc=cam_loc, + object_mask=object_mask, + ray_directions=ray_dirs, + ) + + # TODO: below, cam_loc might as well be different + depth = dists.reshape(batch_size, num_pixels, 1) + points = (cam_loc + depth * ray_dirs).reshape(-1, 3) + + sdf_output = implicit_function(rays_points_world=points)[:, 0:1] + # NOTE most of the intermediate variables are flattened for + # no apparent reason (here and in the ray tracer) + ray_dirs = ray_dirs.reshape(-1, 3) + object_mask = object_mask.reshape(-1) + + # TODO: move it to loss computation + if evaluation_mode == EvaluationMode.TRAINING: + surface_mask = network_object_mask & object_mask + surface_points = points[surface_mask] + surface_dists = dists[surface_mask].unsqueeze(-1) + surface_ray_dirs = ray_dirs[surface_mask] + surface_cam_loc = cam_loc.reshape(-1, 3)[surface_mask] + surface_output = sdf_output[surface_mask] + N = surface_points.shape[0] + + # Sample points for the eikonal loss + eik_bounding_box: float = self.object_bounding_sphere + n_eik_points = batch_size * num_pixels // 2 + eikonal_points = torch.empty( + n_eik_points, + 3, + # but got `Union[device, Tensor, Module]`. + # pyre-fixme[6]: For 3rd argument expected `Union[None, int, str, + # device]` but got `Union[device, Tensor, Module]`. + device=self._bg_color.device, + ).uniform_(-eik_bounding_box, eik_bounding_box) + eikonal_pixel_points = points.clone() + eikonal_pixel_points = eikonal_pixel_points.detach() + eikonal_points = torch.cat([eikonal_points, eikonal_pixel_points], 0) + + points_all = torch.cat([surface_points, eikonal_points], dim=0) + + output = implicit_function(rays_points_world=surface_points) + surface_sdf_values = output[ + :N, 0:1 + ].detach() # how is it different from sdf_output? + + g = implicit_function_gradient(points_all) + surface_points_grad = g[:N, 0, :].clone().detach() + grad_theta = g[N:, 0, :] + + differentiable_surface_points = _sample_network( + surface_output, + surface_sdf_values, + surface_points_grad, + surface_dists, + surface_cam_loc, + surface_ray_dirs, + ) + + else: + surface_mask = network_object_mask + differentiable_surface_points = points[surface_mask] + grad_theta = None + + empty_render = differentiable_surface_points.shape[0] == 0 + features = implicit_function(rays_points_world=differentiable_surface_points)[ + None, :, 1: + ] + normals_full = features.new_zeros( + batch_size, *spatial_size, 3, requires_grad=empty_render + ) + render_full = ( + features.new_ones( + batch_size, + *spatial_size, + self.render_features_dimensions, + requires_grad=empty_render, + ) + * self._bg_color + ) + mask_full = features.new_ones( + batch_size, *spatial_size, 1, requires_grad=empty_render + ) + if not empty_render: + normals = implicit_function_gradient(differentiable_surface_points)[ + None, :, 0, : + ] + normals_full.view(-1, 3)[surface_mask] = normals + render_full.view(-1, self.render_features_dimensions)[surface_mask] = ( + # pyre-fixme[29]: `Union[Tensor, Module]` is not a function. + self._rgb_network( + features, + differentiable_surface_points[None], + normals, + ray_bundle, + surface_mask[None, :, None], + pooling_fn=None, # TODO + ) + ) + mask_full.view(-1, 1)[~surface_mask] = torch.sigmoid( + # pyre-fixme[6]: For 1st param expected `Tensor` but got `float`. + -self.soft_mask_alpha * sdf_output[~surface_mask] + ) + + # scatter points with surface_mask + points_full = ray_bundle.origins.detach().clone() + points_full.view(-1, 3)[surface_mask] = differentiable_surface_points + + # TODO: it is sparse here but otherwise dense + return RendererOutput( + features=render_full, + normals=normals_full, + depths=depth.reshape(batch_size, *spatial_size, 1), + masks=mask_full, # this is a differentiable approximation, see (7) in the paper + points=points_full, + aux={"grad_theta": grad_theta}, # TODO: will be moved to eikonal loss + # TODO: do we need sdf_output, grad_theta? Only for loss probably + ) + + +def _sample_network( + surface_output, + surface_sdf_values, + surface_points_grad, + surface_dists, + surface_cam_loc, + surface_ray_dirs, + eps: float = 1e-4, +): + # t -> t(theta) + surface_ray_dirs_0 = surface_ray_dirs.detach() + surface_points_dot = torch.bmm( + surface_points_grad.view(-1, 1, 3), surface_ray_dirs_0.view(-1, 3, 1) + ).squeeze(-1) + dot_sign = (surface_points_dot >= 0).to(surface_points_dot) * 2 - 1 + surface_dists_theta = surface_dists - (surface_output - surface_sdf_values) / ( + surface_points_dot.abs().clip(eps) * dot_sign + ) + + # t(theta) -> x(theta,c,v) + surface_points_theta_c_v = surface_cam_loc + surface_dists_theta * surface_ray_dirs + + return surface_points_theta_c_v + + +@torch.enable_grad() +def _gradient(module, rays_points_world): + rays_points_world.requires_grad_(True) + y = module.forward(rays_points_world=rays_points_world)[:, :1] + d_output = torch.ones_like(y, requires_grad=False, device=y.device) + gradients = torch.autograd.grad( + outputs=y, + inputs=rays_points_world, + grad_outputs=d_output, + create_graph=True, + retain_graph=True, + only_inputs=True, + )[0] + return gradients.unsqueeze(1) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..70b2fc06a690a5be7ea1e08385b39c6a1fbacdbc --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/utils.py @@ -0,0 +1,212 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +# Note: The #noqa comments below are for unused imports of pluggable implementations +# which are part of implicitron. They ensure that the registry is prepopulated. + +import warnings +from logging import Logger +from typing import Any, Dict, Optional, Tuple + +import torch +import tqdm +from pytorch3d.common.compat import prod + +from pytorch3d.implicitron.models.renderer.base import ImplicitronRayBundle + +from pytorch3d.implicitron.tools import image_utils + +from pytorch3d.implicitron.tools.utils import cat_dataclass + + +def preprocess_input( + image_rgb: Optional[torch.Tensor], + fg_probability: Optional[torch.Tensor], + depth_map: Optional[torch.Tensor], + mask_images: bool, + mask_depths: bool, + mask_threshold: float, + bg_color: Tuple[float, float, float], +) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor], Optional[torch.Tensor]]: + """ + Helper function to preprocess the input images and optional depth maps + to apply masking if required. + + Args: + image_rgb: A tensor of shape `(B, 3, H, W)` containing a batch of rgb images + corresponding to the source viewpoints from which features will be extracted + fg_probability: A tensor of shape `(B, 1, H, W)` containing a batch + of foreground masks with values in [0, 1]. + depth_map: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps. + mask_images: Whether or not to mask the RGB image background given the + foreground mask (the `fg_probability` argument of `GenericModel.forward`) + mask_depths: Whether or not to mask the depth image background given the + foreground mask (the `fg_probability` argument of `GenericModel.forward`) + mask_threshold: If greater than 0.0, the foreground mask is + thresholded by this value before being applied to the RGB/Depth images + bg_color: RGB values for setting the background color of input image + if mask_images=True. Defaults to (0.0, 0.0, 0.0). Each renderer has its own + way to determine the background color of its output, unrelated to this. + + Returns: + Modified image_rgb, fg_mask, depth_map + """ + if image_rgb is not None and image_rgb.ndim == 3: + # The FrameData object is used for both frames and batches of frames, + # and a user might get this error if those were confused. + # Perhaps a user has a FrameData `fd` representing a single frame and + # wrote something like `model(**fd)` instead of + # `model(**fd.collate([fd]))`. + raise ValueError( + "Model received unbatched inputs. " + + "Perhaps they came from a FrameData which had not been collated." + ) + + fg_mask = fg_probability + if fg_mask is not None and mask_threshold > 0.0: + # threshold masks + warnings.warn("Thresholding masks!") + fg_mask = (fg_mask >= mask_threshold).type_as(fg_mask) + + if mask_images and fg_mask is not None and image_rgb is not None: + # mask the image + warnings.warn("Masking images!") + image_rgb = image_utils.mask_background( + image_rgb, fg_mask, dim_color=1, bg_color=torch.tensor(bg_color) + ) + + if mask_depths and fg_mask is not None and depth_map is not None: + # mask the depths + assert ( + mask_threshold > 0.0 + ), "Depths should be masked only with thresholded masks" + warnings.warn("Masking depths!") + depth_map = depth_map * fg_mask + + return image_rgb, fg_mask, depth_map + + +def log_loss_weights(loss_weights: Dict[str, float], logger: Logger) -> None: + """ + Print a table of the loss weights. + """ + loss_weights_message = ( + "-------\nloss_weights:\n" + + "\n".join(f"{k:40s}: {w:1.2e}" for k, w in loss_weights.items()) + + "-------" + ) + logger.info(loss_weights_message) + + +def weighted_sum_losses( + preds: Dict[str, torch.Tensor], loss_weights: Dict[str, float] +) -> Optional[torch.Tensor]: + """ + A helper function to compute the overall loss as the dot product + of individual loss functions with the corresponding weights. + """ + losses_weighted = [ + preds[k] * float(w) + for k, w in loss_weights.items() + if (k in preds and w != 0.0) + ] + if len(losses_weighted) == 0: + warnings.warn("No main objective found.") + return None + loss = sum(losses_weighted) + assert torch.is_tensor(loss) + return loss + + +def apply_chunked(func, chunk_generator, tensor_collator): + """ + Helper function to apply a function on a sequence of + chunked inputs yielded by a generator and collate + the result. + """ + processed_chunks = [ + func(*chunk_args, **chunk_kwargs) + for chunk_args, chunk_kwargs in chunk_generator + ] + + return cat_dataclass(processed_chunks, tensor_collator) + + +def chunk_generator( + chunk_size: int, + ray_bundle: ImplicitronRayBundle, + chunked_inputs: Dict[str, torch.Tensor], + tqdm_trigger_threshold: int, + *args, + **kwargs, +): + """ + Helper function which yields chunks of rays from the + input ray_bundle, to be used when the number of rays is + large and will not fit in memory for rendering. + """ + ( + batch_size, + *spatial_dim, + n_pts_per_ray, + ) = ray_bundle.lengths.shape # B x ... x n_pts_per_ray + if n_pts_per_ray > 0 and chunk_size % n_pts_per_ray != 0: + raise ValueError( + f"chunk_size_grid ({chunk_size}) should be divisible " + f"by n_pts_per_ray ({n_pts_per_ray})" + ) + + n_rays = prod(spatial_dim) + # special handling for raytracing-based methods + n_chunks = -(-n_rays * max(n_pts_per_ray, 1) // chunk_size) + chunk_size_in_rays = -(-n_rays // n_chunks) + + iter = range(0, n_rays, chunk_size_in_rays) + if len(iter) >= tqdm_trigger_threshold: + iter = tqdm.tqdm(iter) + + def _safe_slice( + tensor: Optional[torch.Tensor], start_idx: int, end_idx: int + ) -> Any: + return tensor[start_idx:end_idx] if tensor is not None else None + + for start_idx in iter: + end_idx = min(start_idx + chunk_size_in_rays, n_rays) + bins = ( + None + if ray_bundle.bins is None + else ray_bundle.bins.reshape(batch_size, n_rays, n_pts_per_ray + 1)[ + :, start_idx:end_idx + ] + ) + pixel_radii_2d = ( + None + if ray_bundle.pixel_radii_2d is None + else ray_bundle.pixel_radii_2d.reshape(batch_size, -1, 1)[ + :, start_idx:end_idx + ] + ) + ray_bundle_chunk = ImplicitronRayBundle( + origins=ray_bundle.origins.reshape(batch_size, -1, 3)[:, start_idx:end_idx], + directions=ray_bundle.directions.reshape(batch_size, -1, 3)[ + :, start_idx:end_idx + ], + lengths=ray_bundle.lengths.reshape(batch_size, n_rays, n_pts_per_ray)[ + :, start_idx:end_idx + ], + xys=ray_bundle.xys.reshape(batch_size, -1, 2)[:, start_idx:end_idx], + bins=bins, + pixel_radii_2d=pixel_radii_2d, + camera_ids=_safe_slice(ray_bundle.camera_ids, start_idx, end_idx), + camera_counts=_safe_slice(ray_bundle.camera_counts, start_idx, end_idx), + ) + extra_args = kwargs.copy() + for k, v in chunked_inputs.items(): + extra_args[k] = v.flatten(2)[:, :, start_idx:end_idx] + yield [ray_bundle_chunk, *args], extra_args diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/visualization/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/visualization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6267ddbf92b36f9ebed9b194fd0eab924ad35556 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/visualization/__init__.py @@ -0,0 +1,8 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/visualization/render_flyaround.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/visualization/render_flyaround.py new file mode 100644 index 0000000000000000000000000000000000000000..2d352de7305ecd009ede6e5536310d6a134f31b8 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/models/visualization/render_flyaround.py @@ -0,0 +1,393 @@ +#!/usr/bin/env python3 +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import logging +import math +import os +import random +from typing import ( + Any, + Dict, + Iterable, + List, + Optional, + Sequence, + Tuple, + TYPE_CHECKING, + Union, +) + +import numpy as np +import torch +import torch.nn.functional as Fu +from pytorch3d.implicitron.dataset.dataset_base import DatasetBase, FrameData +from pytorch3d.implicitron.dataset.utils import is_train_frame +from pytorch3d.implicitron.models.base_model import EvaluationMode +from pytorch3d.implicitron.tools.eval_video_trajectory import ( + generate_eval_video_cameras, +) +from pytorch3d.implicitron.tools.video_writer import VideoWriter +from pytorch3d.implicitron.tools.vis_utils import ( + get_visdom_connection, + make_depth_image, +) +from tqdm import tqdm + +if TYPE_CHECKING: + from visdom import Visdom + +logger = logging.getLogger(__name__) + + +def render_flyaround( + dataset: DatasetBase, + sequence_name: str, + model: torch.nn.Module, + output_video_path: str, + n_flyaround_poses: int = 40, + fps: int = 20, + trajectory_type: str = "circular_lsq_fit", + max_angle: float = 2 * math.pi, + trajectory_scale: float = 1.1, + scene_center: Tuple[float, float, float] = (0.0, 0.0, 0.0), + up: Tuple[float, float, float] = (0.0, -1.0, 0.0), + traj_offset: float = 0.0, + n_source_views: int = 9, + visdom_show_preds: bool = False, + visdom_environment: str = "render_flyaround", + visdom_server: str = "http://127.0.0.1", + visdom_port: int = 8097, + num_workers: int = 10, + device: Union[str, torch.device] = "cuda", + seed: Optional[int] = None, + video_resize: Optional[Tuple[int, int]] = None, + output_video_frames_dir: Optional[str] = None, + visualize_preds_keys: Sequence[str] = ( + "images_render", + "masks_render", + "depths_render", + "_all_source_images", + ), +) -> None: + """ + Uses `model` to generate a video consisting of renders of a scene imaged from + a camera flying around the scene. The scene is specified with the `dataset` object and + `sequence_name` which denotes the name of the scene whose frames are in `dataset`. + + Args: + dataset: The dataset object containing frames from a sequence in `sequence_name`. + sequence_name: Name of a sequence from `dataset`. + model: The model whose predictions are going to be visualized. + output_video_path: The path to the video output by this script. + n_flyaround_poses: The number of camera poses of the flyaround trajectory. + fps: Framerate of the output video. + trajectory_type: The type of the camera trajectory. Can be one of: + circular_lsq_fit: Camera centers follow a trajectory obtained + by fitting a 3D circle to train_cameras centers. + All cameras are looking towards scene_center. + figure_eight: Figure-of-8 trajectory around the center of the + central camera of the training dataset. + trefoil_knot: Same as 'figure_eight', but the trajectory has a shape + of a trefoil knot (https://en.wikipedia.org/wiki/Trefoil_knot). + figure_eight_knot: Same as 'figure_eight', but the trajectory has a shape + of a figure-eight knot + (https://en.wikipedia.org/wiki/Figure-eight_knot_(mathematics)). + trajectory_type: The type of the camera trajectory. Can be one of: + circular_lsq_fit: Camera centers follow a trajectory obtained + by fitting a 3D circle to train_cameras centers. + All cameras are looking towards scene_center. + figure_eight: Figure-of-8 trajectory around the center of the + central camera of the training dataset. + trefoil_knot: Same as 'figure_eight', but the trajectory has a shape + of a trefoil knot (https://en.wikipedia.org/wiki/Trefoil_knot). + figure_eight_knot: Same as 'figure_eight', but the trajectory has a shape + of a figure-eight knot + (https://en.wikipedia.org/wiki/Figure-eight_knot_(mathematics)). + max_angle: Defines the total length of the generated camera trajectory. + All possible trajectories (set with the `trajectory_type` argument) are + periodic with the period of `time==2pi`. + E.g. setting `trajectory_type=circular_lsq_fit` and `time=4pi` will generate + a trajectory of camera poses rotating the total of 720 deg around the object. + trajectory_scale: The extent of the trajectory. + scene_center: The center of the scene in world coordinates which all + the cameras from the generated trajectory look at. + up: The "up" vector of the scene (=the normal of the scene floor). + Active for the `trajectory_type="circular"`. + traj_offset: 3D offset vector added to each point of the trajectory. + n_source_views: The number of source views sampled from the known views of the + training sequence added to each evaluation batch. + visdom_show_preds: If `True`, exports the visualizations to visdom. + visdom_environment: The name of the visdom environment. + visdom_server: The address of the visdom server. + visdom_port: The visdom port. + num_workers: The number of workers used to load the training data. + seed: The random seed used for reproducible sampling of the source views. + video_resize: Optionally, defines the size of the output video. + output_video_frames_dir: If specified, the frames of the output video are going + to be permanently stored in this directory. + visualize_preds_keys: The names of the model predictions to visualize. + """ + + if seed is None: + seed = hash(sequence_name) + + if visdom_show_preds: + viz = get_visdom_connection(server=visdom_server, port=visdom_port) + else: + viz = None + + logger.info(f"Loading all data of sequence '{sequence_name}'.") + seq_idx = list(dataset.sequence_indices_in_order(sequence_name)) + train_data = _load_whole_dataset(dataset, seq_idx, num_workers=num_workers) + assert all(train_data.sequence_name[0] == sn for sn in train_data.sequence_name) + # pyre-ignore[6] + sequence_set_name = "train" if is_train_frame(train_data.frame_type)[0] else "test" + logger.info(f"Sequence set = {sequence_set_name}.") + train_cameras = train_data.camera + time = torch.linspace(0, max_angle, n_flyaround_poses + 1)[:n_flyaround_poses] + test_cameras = generate_eval_video_cameras( + train_cameras, + time=time, + n_eval_cams=n_flyaround_poses, + trajectory_type=trajectory_type, + trajectory_scale=trajectory_scale, + scene_center=scene_center, + up=up, + focal_length=None, + principal_point=torch.zeros(n_flyaround_poses, 2), + traj_offset_canonical=(0.0, 0.0, traj_offset), + ) + + # sample the source views reproducibly + with torch.random.fork_rng(): + torch.manual_seed(seed) + source_views_i = torch.randperm(len(seq_idx))[:n_source_views] + + # add the first dummy view that will get replaced with the target camera + source_views_i = Fu.pad(source_views_i, [1, 0]) + source_views = [seq_idx[i] for i in source_views_i.tolist()] + batch = _load_whole_dataset(dataset, source_views, num_workers=num_workers) + assert all(batch.sequence_name[0] == sn for sn in batch.sequence_name) + + preds_total = [] + for n in tqdm(range(n_flyaround_poses), total=n_flyaround_poses): + # set the first batch camera to the target camera + for k in ("R", "T", "focal_length", "principal_point"): + getattr(batch.camera, k)[0] = getattr(test_cameras[n], k) + + # Move to cuda + net_input = batch.to(device) + with torch.no_grad(): + preds = model(**{**net_input, "evaluation_mode": EvaluationMode.EVALUATION}) + + # make sure we dont overwrite something + assert all(k not in preds for k in net_input.keys()) + preds.update(net_input) # merge everything into one big dict + + # Render the predictions to images + rendered_pred = _images_from_preds(preds, extract_keys=visualize_preds_keys) + preds_total.append(rendered_pred) + + # show the preds every 5% of the export iterations + if visdom_show_preds and ( + n % max(n_flyaround_poses // 20, 1) == 0 or n == n_flyaround_poses - 1 + ): + assert viz is not None + _show_predictions( + preds_total, + sequence_name=batch.sequence_name[0], + viz=viz, + viz_env=visdom_environment, + predicted_keys=visualize_preds_keys, + ) + + logger.info(f"Exporting videos for sequence {sequence_name} ...") + _generate_prediction_videos( + preds_total, + sequence_name=batch.sequence_name[0], + viz=viz, + viz_env=visdom_environment, + fps=fps, + video_path=output_video_path, + resize=video_resize, + video_frames_dir=output_video_frames_dir, + predicted_keys=visualize_preds_keys, + ) + + +def _load_whole_dataset( + dataset: torch.utils.data.Dataset, idx: Sequence[int], num_workers: int = 10 +) -> FrameData: + load_all_dataloader = torch.utils.data.DataLoader( + torch.utils.data.Subset(dataset, idx), + batch_size=len(idx), + num_workers=num_workers, + shuffle=False, + collate_fn=FrameData.collate, + ) + return next(iter(load_all_dataloader)) + + +def _images_from_preds( + preds: Dict[str, Any], + extract_keys: Iterable[str] = ( + "image_rgb", + "images_render", + "fg_probability", + "masks_render", + "depths_render", + "depth_map", + "_all_source_images", + ), +) -> Dict[str, torch.Tensor]: + imout = {} + for k in extract_keys: + if k == "_all_source_images" and "image_rgb" in preds: + src_ims = preds["image_rgb"][1:].cpu().detach().clone() + v = _stack_images(src_ims, None)[None] + else: + if k not in preds or preds[k] is None: + print(f"cant show {k}") + continue + v = preds[k].cpu().detach().clone() + if k.startswith("depth"): + mask_resize = Fu.interpolate( + preds["masks_render"], + size=preds[k].shape[2:], + mode="nearest", + ) + v = make_depth_image(preds[k], mask_resize) + if v.shape[1] == 1: + v = v.repeat(1, 3, 1, 1) + imout[k] = v.detach().cpu() + + return imout + + +def _stack_images(ims: torch.Tensor, size: Optional[Tuple[int, int]]) -> torch.Tensor: + ba = ims.shape[0] + H = int(np.ceil(np.sqrt(ba))) + W = H + n_add = H * W - ba + if n_add > 0: + ims = torch.cat((ims, torch.zeros_like(ims[:1]).repeat(n_add, 1, 1, 1))) + + ims = ims.view(H, W, *ims.shape[1:]) + cated = torch.cat([torch.cat(list(row), dim=2) for row in ims], dim=1) + if size is not None: + cated = Fu.interpolate(cated[None], size=size, mode="bilinear")[0] + return cated.clamp(0.0, 1.0) + + +def _show_predictions( + preds: List[Dict[str, Any]], + sequence_name: str, + viz: "Visdom", + viz_env: str = "visualizer", + predicted_keys: Sequence[str] = ( + "images_render", + "masks_render", + "depths_render", + "_all_source_images", + ), + n_samples=10, + one_image_width=200, +) -> None: + """Given a list of predictions visualize them into a single image using visdom.""" + assert isinstance(preds, list) + + pred_all = [] + # Randomly choose a subset of the rendered images, sort by order in the sequence + n_samples = min(n_samples, len(preds)) + pred_idx = sorted(random.sample(list(range(len(preds))), n_samples)) + for predi in pred_idx: + # Make the concatenation for the same camera vertically + pred_all.append( + torch.cat( + [ + torch.nn.functional.interpolate( + preds[predi][k].cpu(), + scale_factor=one_image_width / preds[predi][k].shape[3], + mode="bilinear", + ).clamp(0.0, 1.0) + for k in predicted_keys + ], + dim=2, + ) + ) + # Concatenate the images horizontally + pred_all_cat = torch.cat(pred_all, dim=3)[0] + viz.image( + pred_all_cat, + win="show_predictions", + env=viz_env, + opts={"title": f"pred_{sequence_name}"}, + ) + + +def _generate_prediction_videos( + preds: List[Dict[str, Any]], + sequence_name: str, + viz: Optional["Visdom"] = None, + viz_env: str = "visualizer", + predicted_keys: Sequence[str] = ( + "images_render", + "masks_render", + "depths_render", + "_all_source_images", + ), + fps: int = 20, + video_path: str = "/tmp/video", + video_frames_dir: Optional[str] = None, + resize: Optional[Tuple[int, int]] = None, +) -> None: + """Given a list of predictions create and visualize rotating videos of the + objects using visdom. + """ + + # make sure the target video directory exists + os.makedirs(os.path.dirname(video_path), exist_ok=True) + + # init a video writer for each predicted key + vws = {} + for k in predicted_keys: + if k not in preds[0]: + logger.warning(f"Cannot generate video for prediction key '{k}'") + continue + cache_dir = ( + None + if video_frames_dir is None + else os.path.join(video_frames_dir, f"{sequence_name}_{k}") + ) + vws[k] = VideoWriter( + fps=fps, + out_path=f"{video_path}_{sequence_name}_{k}.mp4", + cache_dir=cache_dir, + ) + + for rendered_pred in tqdm(preds): + for k in vws: + vws[k].write_frame( + rendered_pred[k][0].clip(0.0, 1.0).detach().cpu().numpy(), + resize=resize, + ) + + for k in predicted_keys: + if k not in vws: + continue + vws[k].get_video() + logger.info(f"Generated {vws[k].out_path}.") + if viz is not None: + viz.video( + videofile=vws[k].out_path, + env=viz_env, + win=k, # we reuse the same window otherwise visdom dies + opts={"title": sequence_name + " " + k}, + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/third_party/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/third_party/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ac1a72bde66f104691245d2de4e83c6863718d5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/third_party/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/third_party/hyperlayers.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/third_party/hyperlayers.py new file mode 100644 index 0000000000000000000000000000000000000000..8ad9ebeb73e938b019b358b265b9013fac9434d5 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/third_party/hyperlayers.py @@ -0,0 +1,255 @@ +# a copy-paste from https://github.com/vsitzmann/scene-representation-networks/blob/master/hyperlayers.py +# fmt: off +# flake8: noqa + +# pyre-unsafe +'''Pytorch implementations of hyper-network modules. +''' +import functools + +import torch +import torch.nn as nn + +from . import pytorch_prototyping + + +def partialclass(cls, *args, **kwds): + class NewCls(cls): + __init__ = functools.partialmethod(cls.__init__, *args, **kwds) + + return NewCls + + +class LookupLayer(nn.Module): + def __init__(self, in_ch, out_ch, num_objects): + super().__init__() + + self.out_ch = out_ch + self.lookup_lin = LookupLinear(in_ch, out_ch, num_objects=num_objects) + self.norm_nl = nn.Sequential( + nn.LayerNorm([self.out_ch], elementwise_affine=False), nn.ReLU(inplace=True) + ) + + def forward(self, obj_idx): + net = nn.Sequential(self.lookup_lin(obj_idx), self.norm_nl) + return net + + +class LookupFC(nn.Module): + def __init__( + self, + hidden_ch, + num_hidden_layers, + num_objects, + in_ch, + out_ch, + outermost_linear=False, + ): + super().__init__() + self.layers = nn.ModuleList() + self.layers.append( + LookupLayer(in_ch=in_ch, out_ch=hidden_ch, num_objects=num_objects) + ) + + for i in range(num_hidden_layers): + self.layers.append( + LookupLayer(in_ch=hidden_ch, out_ch=hidden_ch, num_objects=num_objects) + ) + + if outermost_linear: + self.layers.append( + LookupLinear(in_ch=hidden_ch, out_ch=out_ch, num_objects=num_objects) + ) + else: + self.layers.append( + LookupLayer(in_ch=hidden_ch, out_ch=out_ch, num_objects=num_objects) + ) + + def forward(self, obj_idx): + net = [] + for i in range(len(self.layers)): + net.append(self.layers[i](obj_idx)) + + return nn.Sequential(*net) + + +class LookupLinear(nn.Module): + def __init__(self, in_ch, out_ch, num_objects): + super().__init__() + self.in_ch = in_ch + self.out_ch = out_ch + + self.hypo_params = nn.Embedding(num_objects, in_ch * out_ch + out_ch) + + for i in range(num_objects): + nn.init.kaiming_normal_( + self.hypo_params.weight.data[i, : self.in_ch * self.out_ch].view( + self.out_ch, self.in_ch + ), + a=0.0, + nonlinearity="relu", + mode="fan_in", + ) + self.hypo_params.weight.data[i, self.in_ch * self.out_ch :].fill_(0.0) + + def forward(self, obj_idx): + hypo_params = self.hypo_params(obj_idx) + + # Indices explicit to catch erros in shape of output layer + weights = hypo_params[..., : self.in_ch * self.out_ch] + biases = hypo_params[ + ..., self.in_ch * self.out_ch : (self.in_ch * self.out_ch) + self.out_ch + ] + + biases = biases.view(*(biases.size()[:-1]), 1, self.out_ch) + weights = weights.view(*(weights.size()[:-1]), self.out_ch, self.in_ch) + + return BatchLinear(weights=weights, biases=biases) + + +class HyperLayer(nn.Module): + """A hypernetwork that predicts a single Dense Layer, including LayerNorm and a ReLU.""" + + def __init__( + self, in_ch, out_ch, hyper_in_ch, hyper_num_hidden_layers, hyper_hidden_ch + ): + super().__init__() + + self.hyper_linear = HyperLinear( + in_ch=in_ch, + out_ch=out_ch, + hyper_in_ch=hyper_in_ch, + hyper_num_hidden_layers=hyper_num_hidden_layers, + hyper_hidden_ch=hyper_hidden_ch, + ) + self.norm_nl = nn.Sequential( + nn.LayerNorm([out_ch], elementwise_affine=False), nn.ReLU(inplace=True) + ) + + def forward(self, hyper_input): + """ + :param hyper_input: input to hypernetwork. + :return: nn.Module; predicted fully connected network. + """ + return nn.Sequential(self.hyper_linear(hyper_input), self.norm_nl) + + +class HyperFC(nn.Module): + """Builds a hypernetwork that predicts a fully connected neural network.""" + + def __init__( + self, + hyper_in_ch, + hyper_num_hidden_layers, + hyper_hidden_ch, + hidden_ch, + num_hidden_layers, + in_ch, + out_ch, + outermost_linear=False, + ): + super().__init__() + + PreconfHyperLinear = partialclass( + HyperLinear, + hyper_in_ch=hyper_in_ch, + hyper_num_hidden_layers=hyper_num_hidden_layers, + hyper_hidden_ch=hyper_hidden_ch, + ) + PreconfHyperLayer = partialclass( + HyperLayer, + hyper_in_ch=hyper_in_ch, + hyper_num_hidden_layers=hyper_num_hidden_layers, + hyper_hidden_ch=hyper_hidden_ch, + ) + + self.layers = nn.ModuleList() + self.layers.append(PreconfHyperLayer(in_ch=in_ch, out_ch=hidden_ch)) + + for i in range(num_hidden_layers): + self.layers.append(PreconfHyperLayer(in_ch=hidden_ch, out_ch=hidden_ch)) + + if outermost_linear: + self.layers.append(PreconfHyperLinear(in_ch=hidden_ch, out_ch=out_ch)) + else: + self.layers.append(PreconfHyperLayer(in_ch=hidden_ch, out_ch=out_ch)) + + def forward(self, hyper_input): + """ + :param hyper_input: Input to hypernetwork. + :return: nn.Module; Predicted fully connected neural network. + """ + net = [] + for i in range(len(self.layers)): + net.append(self.layers[i](hyper_input)) + + return nn.Sequential(*net) + + +class BatchLinear(nn.Module): + def __init__(self, weights, biases): + """Implements a batch linear layer. + + :param weights: Shape: (batch, out_ch, in_ch) + :param biases: Shape: (batch, 1, out_ch) + """ + super().__init__() + + self.weights = weights + self.biases = biases + + def __repr__(self): + return "BatchLinear(in_ch=%d, out_ch=%d)" % ( + self.weights.shape[-1], + self.weights.shape[-2], + ) + + def forward(self, input): + output = input.matmul( + self.weights.permute( + *[i for i in range(len(self.weights.shape) - 2)], -1, -2 + ) + ) + output += self.biases + return output + + +def last_hyper_layer_init(m) -> None: + if type(m) == nn.Linear: + nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity="relu", mode="fan_in") + m.weight.data *= 1e-1 + + +class HyperLinear(nn.Module): + """A hypernetwork that predicts a single linear layer (weights & biases).""" + + def __init__( + self, in_ch, out_ch, hyper_in_ch, hyper_num_hidden_layers, hyper_hidden_ch + ): + + super().__init__() + self.in_ch = in_ch + self.out_ch = out_ch + + self.hypo_params = pytorch_prototyping.FCBlock( + in_features=hyper_in_ch, + hidden_ch=hyper_hidden_ch, + num_hidden_layers=hyper_num_hidden_layers, + out_features=(in_ch * out_ch) + out_ch, + outermost_linear=True, + ) + self.hypo_params[-1].apply(last_hyper_layer_init) + + def forward(self, hyper_input): + hypo_params = self.hypo_params(hyper_input) + + # Indices explicit to catch erros in shape of output layer + weights = hypo_params[..., : self.in_ch * self.out_ch] + biases = hypo_params[ + ..., self.in_ch * self.out_ch : (self.in_ch * self.out_ch) + self.out_ch + ] + + biases = biases.view(*(biases.size()[:-1]), 1, self.out_ch) + weights = weights.view(*(weights.size()[:-1]), self.out_ch, self.in_ch) + + return BatchLinear(weights=weights, biases=biases) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/third_party/pytorch_prototyping.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/third_party/pytorch_prototyping.py new file mode 100644 index 0000000000000000000000000000000000000000..6e2366d545fae2306428c6c578fa29b7ec4537cb --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/third_party/pytorch_prototyping.py @@ -0,0 +1,773 @@ +# a copy-paste from https://raw.githubusercontent.com/vsitzmann/pytorch_prototyping/10f49b1e7df38a58fd78451eac91d7ac1a21df64/pytorch_prototyping.py +# fmt: off +# flake8: noqa + +# pyre-unsafe +'''A number of custom pytorch modules with sane defaults that I find useful for model prototyping. +''' +import torch +import torch.nn as nn +import torchvision.utils +from torch.nn import functional as F + + +class FCLayer(nn.Module): + def __init__(self, in_features, out_features): + super().__init__() + self.net = nn.Sequential( + nn.Linear(in_features, out_features), + nn.LayerNorm([out_features]), + nn.ReLU(inplace=True), + ) + + def forward(self, input): + return self.net(input) + + +# From https://gist.github.com/wassname/ecd2dac6fc8f9918149853d17e3abf02 +class LayerNormConv2d(nn.Module): + def __init__(self, num_features, eps=1e-5, affine=True): + super().__init__() + self.num_features = num_features + self.affine = affine + self.eps = eps + + if self.affine: + self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_()) + self.beta = nn.Parameter(torch.zeros(num_features)) + + def forward(self, x): + shape = [-1] + [1] * (x.dim() - 1) + mean = x.view(x.size(0), -1).mean(1).view(*shape) + std = x.view(x.size(0), -1).std(1).view(*shape) + + y = (x - mean) / (std + self.eps) + if self.affine: + shape = [1, -1] + [1] * (x.dim() - 2) + y = self.gamma.view(*shape) * y + self.beta.view(*shape) + return y + + +class FCBlock(nn.Module): + def __init__( + self, + hidden_ch, + num_hidden_layers, + in_features, + out_features, + outermost_linear=False, + ): + super().__init__() + + self.net = [] + self.net.append(FCLayer(in_features=in_features, out_features=hidden_ch)) + + for i in range(num_hidden_layers): + self.net.append(FCLayer(in_features=hidden_ch, out_features=hidden_ch)) + + if outermost_linear: + self.net.append(nn.Linear(in_features=hidden_ch, out_features=out_features)) + else: + self.net.append(FCLayer(in_features=hidden_ch, out_features=out_features)) + + self.net = nn.Sequential(*self.net) + self.net.apply(self.init_weights) + + def __getitem__(self, item): + return self.net[item] + + def init_weights(self, m): + if type(m) == nn.Linear: + nn.init.kaiming_normal_(m.weight, a=0.0, nonlinearity="relu", mode="fan_in") + + def forward(self, input): + return self.net(input) + + +class DownBlock3D(nn.Module): + """A 3D convolutional downsampling block.""" + + def __init__(self, in_channels, out_channels, norm=nn.BatchNorm3d): + super().__init__() + + self.net = [ + nn.ReplicationPad3d(1), + nn.Conv3d( + in_channels, + out_channels, + kernel_size=4, + padding=0, + stride=2, + bias=False if norm is not None else True, + ), + ] + + if norm is not None: + self.net += [norm(out_channels, affine=True)] + + self.net += [nn.LeakyReLU(0.2, True)] + self.net = nn.Sequential(*self.net) + + def forward(self, x): + return self.net(x) + + +class UpBlock3D(nn.Module): + """A 3D convolutional upsampling block.""" + + def __init__(self, in_channels, out_channels, norm=nn.BatchNorm3d): + super().__init__() + + self.net = [ + nn.ConvTranspose3d( + in_channels, + out_channels, + kernel_size=4, + stride=2, + padding=1, + bias=False if norm is not None else True, + ), + ] + + if norm is not None: + self.net += [norm(out_channels, affine=True)] + + self.net += [nn.ReLU(True)] + self.net = nn.Sequential(*self.net) + + def forward(self, x, skipped=None): + if skipped is not None: + input = torch.cat([skipped, x], dim=1) + else: + input = x + return self.net(input) + + +class Conv3dSame(torch.nn.Module): + """3D convolution that pads to keep spatial dimensions equal. + Cannot deal with stride. Only quadratic kernels (=scalar kernel_size). + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + bias=True, + padding_layer=nn.ReplicationPad3d, + ): + """ + :param in_channels: Number of input channels + :param out_channels: Number of output channels + :param kernel_size: Scalar. Spatial dimensions of kernel (only quadratic kernels supported). + :param bias: Whether or not to use bias. + :param padding_layer: Which padding to use. Default is reflection padding. + """ + super().__init__() + ka = kernel_size // 2 + kb = ka - 1 if kernel_size % 2 == 0 else ka + self.net = nn.Sequential( + padding_layer((ka, kb, ka, kb, ka, kb)), + nn.Conv3d(in_channels, out_channels, kernel_size, bias=bias, stride=1), + ) + + def forward(self, x): + return self.net(x) + + +class Conv2dSame(torch.nn.Module): + """2D convolution that pads to keep spatial dimensions equal. + Cannot deal with stride. Only quadratic kernels (=scalar kernel_size). + """ + + def __init__( + self, + in_channels, + out_channels, + kernel_size, + bias=True, + padding_layer=nn.ReflectionPad2d, + ): + """ + :param in_channels: Number of input channels + :param out_channels: Number of output channels + :param kernel_size: Scalar. Spatial dimensions of kernel (only quadratic kernels supported). + :param bias: Whether or not to use bias. + :param padding_layer: Which padding to use. Default is reflection padding. + """ + super().__init__() + ka = kernel_size // 2 + kb = ka - 1 if kernel_size % 2 == 0 else ka + self.net = nn.Sequential( + padding_layer((ka, kb, ka, kb)), + nn.Conv2d(in_channels, out_channels, kernel_size, bias=bias, stride=1), + ) + + self.weight = self.net[1].weight + self.bias = self.net[1].bias + + def forward(self, x): + return self.net(x) + + +class UpBlock(nn.Module): + """A 2d-conv upsampling block with a variety of options for upsampling, and following best practices / with + reasonable defaults. (LeakyReLU, kernel size multiple of stride) + """ + + def __init__( + self, + in_channels, + out_channels, + post_conv=True, + use_dropout=False, + dropout_prob=0.1, + norm=nn.BatchNorm2d, + upsampling_mode="transpose", + ): + """ + :param in_channels: Number of input channels + :param out_channels: Number of output channels + :param post_conv: Whether to have another convolutional layer after the upsampling layer. + :param use_dropout: bool. Whether to use dropout or not. + :param dropout_prob: Float. The dropout probability (if use_dropout is True) + :param norm: Which norm to use. If None, no norm is used. Default is Batchnorm with affinity. + :param upsampling_mode: Which upsampling mode: + transpose: Upsampling with stride-2, kernel size 4 transpose convolutions. + bilinear: Feature map is upsampled with bilinear upsampling, then a conv layer. + nearest: Feature map is upsampled with nearest neighbor upsampling, then a conv layer. + shuffle: Feature map is upsampled with pixel shuffling, then a conv layer. + """ + super().__init__() + + net = list() + + if upsampling_mode == "transpose": + net += [ + nn.ConvTranspose2d( + in_channels, + out_channels, + kernel_size=4, + stride=2, + padding=1, + bias=True if norm is None else False, + ) + ] + elif upsampling_mode == "bilinear": + net += [nn.UpsamplingBilinear2d(scale_factor=2)] + net += [ + Conv2dSame( + in_channels, + out_channels, + kernel_size=3, + bias=True if norm is None else False, + ) + ] + elif upsampling_mode == "nearest": + net += [nn.UpsamplingNearest2d(scale_factor=2)] + net += [ + Conv2dSame( + in_channels, + out_channels, + kernel_size=3, + bias=True if norm is None else False, + ) + ] + elif upsampling_mode == "shuffle": + net += [nn.PixelShuffle(upscale_factor=2)] + net += [ + Conv2dSame( + in_channels // 4, + out_channels, + kernel_size=3, + bias=True if norm is None else False, + ) + ] + else: + raise ValueError("Unknown upsampling mode!") + + if norm is not None: + net += [norm(out_channels, affine=True)] + + net += [nn.ReLU(True)] + + if use_dropout: + net += [nn.Dropout2d(dropout_prob, False)] + + if post_conv: + net += [ + Conv2dSame( + out_channels, + out_channels, + kernel_size=3, + bias=True if norm is None else False, + ) + ] + + if norm is not None: + net += [norm(out_channels, affine=True)] + + net += [nn.ReLU(True)] + + if use_dropout: + net += [nn.Dropout2d(0.1, False)] + + self.net = nn.Sequential(*net) + + def forward(self, x, skipped=None): + if skipped is not None: + input = torch.cat([skipped, x], dim=1) + else: + input = x + return self.net(input) + + +class DownBlock(nn.Module): + """A 2D-conv downsampling block following best practices / with reasonable defaults + (LeakyReLU, kernel size multiple of stride) + """ + + def __init__( + self, + in_channels, + out_channels, + prep_conv=True, + middle_channels=None, + use_dropout=False, + dropout_prob=0.1, + norm=nn.BatchNorm2d, + ): + """ + :param in_channels: Number of input channels + :param out_channels: Number of output channels + :param prep_conv: Whether to have another convolutional layer before the downsampling layer. + :param middle_channels: If prep_conv is true, this sets the number of channels between the prep and downsampling + convs. + :param use_dropout: bool. Whether to use dropout or not. + :param dropout_prob: Float. The dropout probability (if use_dropout is True) + :param norm: Which norm to use. If None, no norm is used. Default is Batchnorm with affinity. + """ + super().__init__() + + if middle_channels is None: + middle_channels = in_channels + + net = list() + + if prep_conv: + net += [ + nn.ReflectionPad2d(1), + nn.Conv2d( + in_channels, + middle_channels, + kernel_size=3, + padding=0, + stride=1, + bias=True if norm is None else False, + ), + ] + + if norm is not None: + net += [norm(middle_channels, affine=True)] + + net += [nn.LeakyReLU(0.2, True)] + + if use_dropout: + net += [nn.Dropout2d(dropout_prob, False)] + + net += [ + nn.ReflectionPad2d(1), + nn.Conv2d( + middle_channels, + out_channels, + kernel_size=4, + padding=0, + stride=2, + bias=True if norm is None else False, + ), + ] + + if norm is not None: + net += [norm(out_channels, affine=True)] + + net += [nn.LeakyReLU(0.2, True)] + + if use_dropout: + net += [nn.Dropout2d(dropout_prob, False)] + + self.net = nn.Sequential(*net) + + def forward(self, x): + return self.net(x) + + +class Unet3d(nn.Module): + """A 3d-Unet implementation with sane defaults.""" + + def __init__( + self, + in_channels, + out_channels, + nf0, + num_down, + max_channels, + norm=nn.BatchNorm3d, + outermost_linear=False, + ): + """ + :param in_channels: Number of input channels + :param out_channels: Number of output channels + :param nf0: Number of features at highest level of U-Net + :param num_down: Number of downsampling stages. + :param max_channels: Maximum number of channels (channels multiply by 2 with every downsampling stage) + :param norm: Which norm to use. If None, no norm is used. Default is Batchnorm with affinity. + :param outermost_linear: Whether the output layer should be a linear layer or a nonlinear one. + """ + super().__init__() + + assert num_down > 0, "Need at least one downsampling layer in UNet3d." + + # Define the in block + self.in_layer = [Conv3dSame(in_channels, nf0, kernel_size=3, bias=False)] + + if norm is not None: + self.in_layer += [norm(nf0, affine=True)] + + self.in_layer += [nn.LeakyReLU(0.2, True)] + self.in_layer = nn.Sequential(*self.in_layer) + + # Define the center UNet block. The feature map has height and width 1 --> no batchnorm. + self.unet_block = UnetSkipConnectionBlock3d( + int(min(2 ** (num_down - 1) * nf0, max_channels)), + int(min(2 ** (num_down - 1) * nf0, max_channels)), + norm=None, + ) + for i in list(range(0, num_down - 1))[::-1]: + self.unet_block = UnetSkipConnectionBlock3d( + int(min(2 ** i * nf0, max_channels)), + int(min(2 ** (i + 1) * nf0, max_channels)), + submodule=self.unet_block, + norm=norm, + ) + + # Define the out layer. Each unet block concatenates its inputs with its outputs - so the output layer + # automatically receives the output of the in_layer and the output of the last unet layer. + self.out_layer = [ + Conv3dSame(2 * nf0, out_channels, kernel_size=3, bias=outermost_linear) + ] + + if not outermost_linear: + if norm is not None: + self.out_layer += [norm(out_channels, affine=True)] + self.out_layer += [nn.ReLU(True)] + self.out_layer = nn.Sequential(*self.out_layer) + + def forward(self, x): + in_layer = self.in_layer(x) + unet = self.unet_block(in_layer) + out_layer = self.out_layer(unet) + return out_layer + + +class UnetSkipConnectionBlock3d(nn.Module): + """Helper class for building a 3D unet.""" + + def __init__(self, outer_nc, inner_nc, norm=nn.BatchNorm3d, submodule=None): + super().__init__() + + if submodule is None: + model = [ + DownBlock3D(outer_nc, inner_nc, norm=norm), + UpBlock3D(inner_nc, outer_nc, norm=norm), + ] + else: + model = [ + DownBlock3D(outer_nc, inner_nc, norm=norm), + submodule, + UpBlock3D(2 * inner_nc, outer_nc, norm=norm), + ] + + self.model = nn.Sequential(*model) + + def forward(self, x): + forward_passed = self.model(x) + return torch.cat([x, forward_passed], 1) + + +class UnetSkipConnectionBlock(nn.Module): + """Helper class for building a 2D unet.""" + + def __init__( + self, + outer_nc, + inner_nc, + upsampling_mode, + norm=nn.BatchNorm2d, + submodule=None, + use_dropout=False, + dropout_prob=0.1, + ): + super().__init__() + + if submodule is None: + model = [ + DownBlock( + outer_nc, + inner_nc, + use_dropout=use_dropout, + dropout_prob=dropout_prob, + norm=norm, + ), + UpBlock( + inner_nc, + outer_nc, + use_dropout=use_dropout, + dropout_prob=dropout_prob, + norm=norm, + upsampling_mode=upsampling_mode, + ), + ] + else: + model = [ + DownBlock( + outer_nc, + inner_nc, + use_dropout=use_dropout, + dropout_prob=dropout_prob, + norm=norm, + ), + submodule, + UpBlock( + 2 * inner_nc, + outer_nc, + use_dropout=use_dropout, + dropout_prob=dropout_prob, + norm=norm, + upsampling_mode=upsampling_mode, + ), + ] + + self.model = nn.Sequential(*model) + + def forward(self, x): + forward_passed = self.model(x) + return torch.cat([x, forward_passed], 1) + + +class Unet(nn.Module): + """A 2d-Unet implementation with sane defaults.""" + + def __init__( + self, + in_channels, + out_channels, + nf0, + num_down, + max_channels, + use_dropout, + upsampling_mode="transpose", + dropout_prob=0.1, + norm=nn.BatchNorm2d, + outermost_linear=False, + ): + """ + :param in_channels: Number of input channels + :param out_channels: Number of output channels + :param nf0: Number of features at highest level of U-Net + :param num_down: Number of downsampling stages. + :param max_channels: Maximum number of channels (channels multiply by 2 with every downsampling stage) + :param use_dropout: Whether to use dropout or no. + :param dropout_prob: Dropout probability if use_dropout=True. + :param upsampling_mode: Which type of upsampling should be used. See "UpBlock" for documentation. + :param norm: Which norm to use. If None, no norm is used. Default is Batchnorm with affinity. + :param outermost_linear: Whether the output layer should be a linear layer or a nonlinear one. + """ + super().__init__() + + assert num_down > 0, "Need at least one downsampling layer in UNet." + + # Define the in block + self.in_layer = [ + Conv2dSame( + in_channels, nf0, kernel_size=3, bias=True if norm is None else False + ) + ] + if norm is not None: + self.in_layer += [norm(nf0, affine=True)] + self.in_layer += [nn.LeakyReLU(0.2, True)] + + if use_dropout: + self.in_layer += [nn.Dropout2d(dropout_prob)] + self.in_layer = nn.Sequential(*self.in_layer) + + # Define the center UNet block + self.unet_block = UnetSkipConnectionBlock( + min(2 ** (num_down - 1) * nf0, max_channels), + min(2 ** (num_down - 1) * nf0, max_channels), + use_dropout=use_dropout, + dropout_prob=dropout_prob, + norm=None, # Innermost has no norm (spatial dimension 1) + upsampling_mode=upsampling_mode, + ) + + for i in list(range(0, num_down - 1))[::-1]: + self.unet_block = UnetSkipConnectionBlock( + min(2 ** i * nf0, max_channels), + min(2 ** (i + 1) * nf0, max_channels), + use_dropout=use_dropout, + dropout_prob=dropout_prob, + submodule=self.unet_block, + norm=norm, + upsampling_mode=upsampling_mode, + ) + + # Define the out layer. Each unet block concatenates its inputs with its outputs - so the output layer + # automatically receives the output of the in_layer and the output of the last unet layer. + self.out_layer = [ + Conv2dSame( + 2 * nf0, + out_channels, + kernel_size=3, + bias=outermost_linear or (norm is None), + ) + ] + + if not outermost_linear: + if norm is not None: + self.out_layer += [norm(out_channels, affine=True)] + self.out_layer += [nn.ReLU(True)] + + if use_dropout: + self.out_layer += [nn.Dropout2d(dropout_prob)] + self.out_layer = nn.Sequential(*self.out_layer) + + self.out_layer_weight = self.out_layer[0].weight + + def forward(self, x): + in_layer = self.in_layer(x) + unet = self.unet_block(in_layer) + out_layer = self.out_layer(unet) + return out_layer + + +class Identity(nn.Module): + """Helper module to allow Downsampling and Upsampling nets to default to identity if they receive an empty list.""" + + def __init__(self): + super().__init__() + + def forward(self, input): + return input + + +class DownsamplingNet(nn.Module): + """A subnetwork that downsamples a 2D feature map with strided convolutions.""" + + def __init__( + self, + per_layer_out_ch, + in_channels, + use_dropout, + dropout_prob=0.1, + last_layer_one=False, + norm=nn.BatchNorm2d, + ): + """ + :param per_layer_out_ch: python list of integers. Defines the number of output channels per layer. Length of + list defines number of downsampling steps (each step dowsamples by factor of 2.) + :param in_channels: Number of input channels. + :param use_dropout: Whether or not to use dropout. + :param dropout_prob: Dropout probability. + :param last_layer_one: Whether the output of the last layer will have a spatial size of 1. In that case, + the last layer will not have batchnorm, else, it will. + :param norm: Which norm to use. Defaults to BatchNorm. + """ + super().__init__() + + if not len(per_layer_out_ch): + self.downs = Identity() + else: + self.downs = list() + self.downs.append( + DownBlock( + in_channels, + per_layer_out_ch[0], + use_dropout=use_dropout, + dropout_prob=dropout_prob, + middle_channels=per_layer_out_ch[0], + norm=norm, + ) + ) + for i in range(0, len(per_layer_out_ch) - 1): + if last_layer_one and (i == len(per_layer_out_ch) - 2): + norm = None + self.downs.append( + DownBlock( + per_layer_out_ch[i], + per_layer_out_ch[i + 1], + dropout_prob=dropout_prob, + use_dropout=use_dropout, + norm=norm, + ) + ) + self.downs = nn.Sequential(*self.downs) + + def forward(self, input): + return self.downs(input) + + +class UpsamplingNet(nn.Module): + """A subnetwork that upsamples a 2D feature map with a variety of upsampling options.""" + + def __init__( + self, + per_layer_out_ch, + in_channels, + upsampling_mode, + use_dropout, + dropout_prob=0.1, + first_layer_one=False, + norm=nn.BatchNorm2d, + ): + """ + :param per_layer_out_ch: python list of integers. Defines the number of output channels per layer. Length of + list defines number of upsampling steps (each step upsamples by factor of 2.) + :param in_channels: Number of input channels. + :param upsampling_mode: Mode of upsampling. For documentation, see class "UpBlock" + :param use_dropout: Whether or not to use dropout. + :param dropout_prob: Dropout probability. + :param first_layer_one: Whether the input to the last layer will have a spatial size of 1. In that case, + the first layer will not have a norm, else, it will. + :param norm: Which norm to use. Defaults to BatchNorm. + """ + super().__init__() + + if not len(per_layer_out_ch): + self.ups = Identity() + else: + self.ups = list() + self.ups.append( + UpBlock( + in_channels, + per_layer_out_ch[0], + use_dropout=use_dropout, + dropout_prob=dropout_prob, + norm=None if first_layer_one else norm, + upsampling_mode=upsampling_mode, + ) + ) + for i in range(0, len(per_layer_out_ch) - 1): + self.ups.append( + UpBlock( + per_layer_out_ch[i], + per_layer_out_ch[i + 1], + use_dropout=use_dropout, + dropout_prob=dropout_prob, + norm=norm, + upsampling_mode=upsampling_mode, + ) + ) + self.ups = nn.Sequential(*self.ups) + + def forward(self, input): + return self.ups(input) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/camera_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/camera_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0c76373dda4a47fc3ffe445a4b0b2136954dc140 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/camera_utils.py @@ -0,0 +1,144 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +# TODO: all this potentially goes to PyTorch3D + +import math +from typing import Tuple + +import pytorch3d as pt3d +import torch +from pytorch3d.renderer.cameras import CamerasBase + + +def jitter_extrinsics( + R: torch.Tensor, + T: torch.Tensor, + max_angle: float = (math.pi * 2.0), + translation_std: float = 1.0, + scale_std: float = 0.3, +): + """ + Jitter the extrinsic camera parameters `R` and `T` with a random similarity + transformation. The transformation rotates by a random angle between [0, max_angle]; + scales by a random factor exp(N(0, scale_std)), where N(0, scale_std) is + a random sample from a normal distrubtion with zero mean and variance scale_std; + and translates by a 3D offset sampled from N(0, translation_std). + """ + assert all(x >= 0.0 for x in (max_angle, translation_std, scale_std)) + N = R.shape[0] + R_jit = pt3d.transforms.random_rotations(1, device=R.device) + R_jit = pt3d.transforms.so3_exponential_map( + pt3d.transforms.so3_log_map(R_jit) * max_angle + ) + T_jit = torch.randn_like(R_jit[:1, :, 0]) * translation_std + rigid_transform = pt3d.ops.eyes(dim=4, N=N, device=R.device) + rigid_transform[:, :3, :3] = R_jit.expand(N, 3, 3) + rigid_transform[:, 3, :3] = T_jit.expand(N, 3) + scale_jit = torch.exp(torch.randn_like(T_jit[:, 0]) * scale_std).expand(N) + return apply_camera_alignment(R, T, rigid_transform, scale_jit) + + +def apply_camera_alignment( + R: torch.Tensor, + T: torch.Tensor, + rigid_transform: torch.Tensor, + scale: torch.Tensor, +): + """ + Args: + R: Camera rotation matrix of shape (N, 3, 3). + T: Camera translation of shape (N, 3). + rigid_transform: A tensor of shape (N, 4, 4) representing a batch of + N 4x4 tensors that map the scene pointcloud from misaligned coords + to the aligned space. + scale: A list of N scaling factors. A tensor of shape (N,) + + Returns: + R_aligned: The aligned rotations R. + T_aligned: The aligned translations T. + """ + R_rigid = rigid_transform[:, :3, :3] + T_rigid = rigid_transform[:, 3:, :3] + R_aligned = R_rigid.permute(0, 2, 1).bmm(R) + T_aligned = scale[:, None] * (T - (T_rigid @ R_aligned)[:, 0]) + return R_aligned, T_aligned + + +def get_min_max_depth_bounds(cameras, scene_center, scene_extent): + """ + Estimate near/far depth plane as: + near = dist(cam_center, self.scene_center) - self.scene_extent + far = dist(cam_center, self.scene_center) + self.scene_extent + """ + cam_center = cameras.get_camera_center() + center_dist = ( + ((cam_center - scene_center.to(cameras.R)[None]) ** 2) + .sum(dim=-1) + .clamp(0.001) + .sqrt() + ) + center_dist = center_dist.clamp(scene_extent + 1e-3) + min_depth = center_dist - scene_extent + max_depth = center_dist + scene_extent + return min_depth, max_depth + + +def volumetric_camera_overlaps( + cameras: CamerasBase, + scene_extent: float = 8.0, + scene_center: Tuple[float, float, float] = (0.0, 0.0, 0.0), + resol: int = 16, + weigh_by_ray_angle: bool = True, +): + """ + Compute the overlaps between viewing frustrums of all pairs of cameras + in `cameras`. + """ + device = cameras.device + ba = cameras.R.shape[0] + n_vox = int(resol**3) + grid = pt3d.structures.Volumes( + densities=torch.zeros([1, 1, resol, resol, resol], device=device), + volume_translation=-torch.FloatTensor(scene_center)[None].to(device), + voxel_size=2.0 * scene_extent / resol, + ).get_coord_grid(world_coordinates=True) + + grid = grid.view(1, n_vox, 3).expand(ba, n_vox, 3) + gridp = cameras.transform_points(grid, eps=1e-2) + proj_in_camera = ( + torch.prod((gridp[..., :2].abs() <= 1.0), dim=-1) + * (gridp[..., 2] > 0.0).float() + ) # ba x n_vox + + if weigh_by_ray_angle: + rays = torch.nn.functional.normalize( + grid - cameras.get_camera_center()[:, None], dim=-1 + ) + rays_masked = rays * proj_in_camera[..., None] + + # - slow and readable: + # inter = torch.zeros(ba, ba) + # for i1 in range(ba): + # for i2 in range(ba): + # inter[i1, i2] = ( + # 1 + (rays_masked[i1] * rays_masked[i2] + # ).sum(dim=-1)).sum() + + # - fast: + rays_masked = rays_masked.view(ba, n_vox * 3) + inter = n_vox + (rays_masked @ rays_masked.t()) + + else: + inter = proj_in_camera @ proj_in_camera.t() + + mass = torch.diag(inter) + iou = inter / (mass[:, None] + mass[None, :] - inter).clamp(0.1) + + return iou diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/circle_fitting.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/circle_fitting.py new file mode 100644 index 0000000000000000000000000000000000000000..7f6cd1a05ee2f0a691fa1540f5dc454282e7662c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/circle_fitting.py @@ -0,0 +1,240 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings +from dataclasses import dataclass +from math import pi +from typing import Optional + +import torch + + +def get_rotation_to_best_fit_xy( + points: torch.Tensor, centroid: Optional[torch.Tensor] = None +) -> torch.Tensor: + """ + Returns a rotation R such that `points @ R` has a best fit plane + parallel to the xy plane + + Args: + points: (*, N, 3) tensor of points in 3D + centroid: (*, 1, 3), (3,) or scalar: their centroid + + Returns: + (*, 3, 3) tensor rotation matrix + """ + if centroid is None: + centroid = points.mean(dim=-2, keepdim=True) + + points_centered = points - centroid + _, evec = torch.linalg.eigh(points_centered.transpose(-1, -2) @ points_centered) + # in general, evec can form either right- or left-handed basis, + # but we need the former to have a proper rotation (not reflection) + return torch.cat( + (evec[..., 1:], torch.cross(evec[..., 1], evec[..., 2])[..., None]), dim=-1 + ) + + +def _signed_area(path: torch.Tensor) -> torch.Tensor: + """ + Calculates the signed area / Lévy area of a 2D path. If the path is closed, + i.e. ends where it starts, this is the integral of the winding number over + the whole plane. If not, consider a closed path made by adding a straight + line from the end to the start; the signed area is the integral of the + winding number (also over the plane) with respect to that closed path. + + If this number is positive, it indicates in some sense that the path + turns anticlockwise more than clockwise, and vice versa. + + Args: + path: N x 2 tensor of points. + + Returns: + signed area, shape () + """ + # This calculation is a sum of areas of triangles of the form + # (path[0], path[i], path[i+1]), where each triangle is half a + # parallelogram. + x, y = (path[1:] - path[:1]).unbind(1) + return (y[1:] * x[:-1] - x[1:] * y[:-1]).sum() * 0.5 + + +@dataclass(frozen=True) +class Circle2D: + """ + Contains details of a circle in a plane. + Members + center: tensor shape (2,) + radius: tensor shape () + generated_points: points around the circle, shape (n_points, 2) + """ + + center: torch.Tensor + radius: torch.Tensor + generated_points: torch.Tensor + + +def fit_circle_in_2d( + points2d, *, n_points: int = 0, angles: Optional[torch.Tensor] = None +) -> Circle2D: + """ + Simple best fitting of a circle to 2D points. In particular, the circle which + minimizes the sum of the squares of the squared-distances to the circle. + + Finds (a,b) and r to minimize the sum of squares (over the x,y pairs) of + r**2 - [(x-a)**2+(y-b)**2] + i.e. + (2*a)*x + (2*b)*y + (r**2 - a**2 - b**2)*1 - (x**2 + y**2) + + In addition, generates points along the circle. If angles is None (default) + then n_points around the circle equally spaced are given. These begin at the + point closest to the first input point. They continue in the direction which + seems to match the movement of points in points2d, as judged by its + signed area. If `angles` are provided, then n_points is ignored, and points + along the circle at the given angles are returned, with the starting point + and direction as before. + + (Note that `generated_points` is affected by the order of the points in + points2d, but the other outputs are not.) + + Args: + points2d: N x 2 tensor of 2D points + n_points: number of points to generate on the circle, if angles not given + angles: optional angles in radians of points to generate. + + Returns: + Circle2D object + """ + design = torch.cat([points2d, torch.ones_like(points2d[:, :1])], dim=1) + rhs = (points2d**2).sum(1) + n_provided = points2d.shape[0] + if n_provided < 3: + raise ValueError(f"{n_provided} points are not enough to determine a circle") + solution = torch.linalg.lstsq(design, rhs[:, None]).solution + center = solution[:2, 0] / 2 + radius = torch.sqrt(solution[2, 0] + (center**2).sum()) + if n_points > 0: + if angles is not None: + warnings.warn("n_points ignored because angles provided") + else: + angles = torch.linspace(0, 2 * pi, n_points, device=points2d.device) + + if angles is not None: + initial_direction_xy = (points2d[0] - center).unbind() + initial_angle = torch.atan2(initial_direction_xy[1], initial_direction_xy[0]) + with torch.no_grad(): + anticlockwise = _signed_area(points2d) > 0 + if anticlockwise: + use_angles = initial_angle + angles + else: + use_angles = initial_angle - angles + generated_points = center[None] + radius * torch.stack( + [torch.cos(use_angles), torch.sin(use_angles)], dim=-1 + ) + else: + generated_points = points2d.new_zeros(0, 2) + return Circle2D(center=center, radius=radius, generated_points=generated_points) + + +@dataclass(frozen=True) +class Circle3D: + """ + Contains details of a circle in 3D. + Members + center: tensor shape (3,) + radius: tensor shape () + normal: tensor shape (3,) + generated_points: points around the circle, shape (n_points, 3) + """ + + center: torch.Tensor + radius: torch.Tensor + normal: torch.Tensor + generated_points: torch.Tensor + + +def fit_circle_in_3d( + points, + *, + n_points: int = 0, + angles: Optional[torch.Tensor] = None, + offset: Optional[torch.Tensor] = None, + up: Optional[torch.Tensor] = None, +) -> Circle3D: + """ + Simple best fit circle to 3D points. Uses circle_2d in the + least-squares best fit plane. + + In addition, generates points along the circle. If angles is None (default) + then n_points around the circle equally spaced are given. These begin at the + point closest to the first input point. They continue in the direction which + seems to be match the movement of points. If angles is provided, then n_points + is ignored, and points along the circle at the given angles are returned, + with the starting point and direction as before. + + Further, an offset can be given to add to the generated points; this is + interpreted in a rotated coordinate system where (0, 0, 1) is normal to the + circle, specifically the normal which is approximately in the direction of a + given `up` vector. The remaining rotation is disambiguated in an unspecified + but deterministic way. + + (Note that `generated_points` is affected by the order of the points in + points, but the other outputs are not.) + + Args: + points2d: N x 3 tensor of 3D points + n_points: number of points to generate on the circle + angles: optional angles in radians of points to generate. + offset: optional tensor (3,), a displacement expressed in a "canonical" + coordinate system to add to the generated points. + up: optional tensor (3,), a vector which helps define the + "canonical" coordinate system for interpretting `offset`. + Required if offset is used. + + + Returns: + Circle3D object + """ + centroid = points.mean(0) + r = get_rotation_to_best_fit_xy(points, centroid) + normal = r[:, 2] + rotated_points = (points - centroid) @ r + result_2d = fit_circle_in_2d( + rotated_points[:, :2], n_points=n_points, angles=angles + ) + center_3d = result_2d.center @ r[:, :2].t() + centroid + n_generated_points = result_2d.generated_points.shape[0] + if n_generated_points > 0: + generated_points_in_plane = torch.cat( + [ + result_2d.generated_points, + torch.zeros_like(result_2d.generated_points[:, :1]), + ], + dim=1, + ) + if offset is not None: + if up is None: + raise ValueError("Missing `up` input for interpreting offset") + with torch.no_grad(): + swap = torch.dot(up, normal) < 0 + if swap: + # We need some rotation which takes +z to -z. Here's one. + generated_points_in_plane += offset * offset.new_tensor([1, -1, -1]) + else: + generated_points_in_plane += offset + + generated_points = generated_points_in_plane @ r.t() + centroid + else: + generated_points = points.new_zeros(0, 3) + + return Circle3D( + radius=result_2d.radius, + center=center_3d, + normal=normal, + generated_points=generated_points, + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/config.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/config.py new file mode 100644 index 0000000000000000000000000000000000000000..4479fe65562fce952d39f15d520285650977299c --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/config.py @@ -0,0 +1,1210 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import dataclasses +import inspect +import itertools +import sys +import warnings +from collections import Counter, defaultdict +from enum import Enum +from functools import partial +from typing import ( + Any, + Callable, + Dict, + get_args, + get_origin, + List, + Optional, + Tuple, + Type, + TypeVar, + Union, +) + +from omegaconf import DictConfig, OmegaConf, open_dict + + +""" +This functionality allows a configurable system to be determined in a dataclass-type +way. It is a generalization of omegaconf's "structured", in the dataclass case. +Core functionality: + +- Configurable -- A base class used to label a class as being one which uses this + system. Uses class members and __post_init__ like a dataclass. + +- expand_args_fields -- Expands a class like `dataclasses.dataclass`. Runs automatically. + +- get_default_args -- gets an omegaconf.DictConfig for initializing a given class. + +- run_auto_creation -- Initialises nested members. To be called in __post_init__. + + +In addition, a Configurable may contain members whose type is decided at runtime. + +- ReplaceableBase -- As a base instead of Configurable, labels a class to say that + any child class can be used instead. + +- registry -- A global store of named child classes of ReplaceableBase classes. + Used as `@registry.register` decorator on class definition. + + +Additional utility functions: + +- remove_unused_components -- used for simplifying a DictConfig instance. +- get_default_args_field -- default for DictConfig member of another configurable. +- enable_get_default_args -- Allows get_default_args on a function or plain class. + + +1. The simplest usage of this functionality is as follows. First a schema is defined +in dataclass style. + + class A(Configurable): + n: int = 9 + + class B(Configurable): + a: A + + def __post_init__(self): + run_auto_creation(self) + +Then it can be used like + + b_args = get_default_args(B) + b = B(**b_args) + +In this case, get_default_args(B) returns an omegaconf.DictConfig with the right +members {"a_args": {"n": 9}}. It also modifies the definitions of the classes to +something like the following. (The modification itself is done by the function +`expand_args_fields`, which is called inside `get_default_args`.) + + @dataclasses.dataclass + class A: + n: int = 9 + + @dataclasses.dataclass + class B: + a_args: DictConfig = dataclasses.field(default_factory=lambda: DictConfig({"n": 9})) + + def __post_init__(self): + self.a = A(**self.a_args) + +2. Pluggability. Instead of a dataclass-style member being given a concrete class, +it can be given a base class and the implementation will be looked up by name in the +global `registry` in this module. E.g. + + class A(ReplaceableBase): + k: int = 1 + + @registry.register + class A1(A): + m: int = 3 + + @registry.register + class A2(A): + n: str = "2" + + class B(Configurable): + a: A + a_class_type: str = "A2" + b: Optional[A] + b_class_type: Optional[str] = "A2" + + def __post_init__(self): + run_auto_creation(self) + +will expand to + + @dataclasses.dataclass + class A: + k: int = 1 + + @dataclasses.dataclass + class A1(A): + m: int = 3 + + @dataclasses.dataclass + class A2(A): + n: str = "2" + + @dataclasses.dataclass + class B: + a_class_type: str = "A2" + a_A1_args: DictConfig = dataclasses.field( + default_factory=lambda: DictConfig({"k": 1, "m": 3} + ) + a_A2_args: DictConfig = dataclasses.field( + default_factory=lambda: DictConfig({"k": 1, "n": 2} + ) + b_class_type: Optional[str] = "A2" + b_A1_args: DictConfig = dataclasses.field( + default_factory=lambda: DictConfig({"k": 1, "m": 3} + ) + b_A2_args: DictConfig = dataclasses.field( + default_factory=lambda: DictConfig({"k": 1, "n": 2} + ) + + def __post_init__(self): + if self.a_class_type == "A1": + self.a = A1(**self.a_A1_args) + elif self.a_class_type == "A2": + self.a = A2(**self.a_A2_args) + else: + raise ValueError(...) + + if self.b_class_type is None: + self.b = None + elif self.b_class_type == "A1": + self.b = A1(**self.b_A1_args) + elif self.b_class_type == "A2": + self.b = A2(**self.b_A2_args) + else: + raise ValueError(...) + +3. Aside from these classes, the members of these classes should be things +which DictConfig is happy with: e.g. (bool, int, str, None, float) and what +can be built from them with `DictConfig`s and lists of them. + +In addition, you can call `get_default_args` on a function or class to get +the `DictConfig` of its defaulted arguments, assuming those are all things +which `DictConfig` is happy with, so long as you add a call to +`enable_get_default_args` after its definition. If you want to use such a +thing as the default for a member of another configured class, +`get_default_args_field` is a helper. +""" + + +TYPE_SUFFIX: str = "_class_type" +ARGS_SUFFIX: str = "_args" +ENABLED_SUFFIX: str = "_enabled" +CREATE_PREFIX: str = "create_" +IMPL_SUFFIX: str = "_impl" +TWEAK_SUFFIX: str = "_tweak_args" +_DATACLASS_INIT: str = "__dataclass_own_init__" +PRE_EXPAND_NAME: str = "pre_expand" + + +class ReplaceableBase: + """ + Base class for a class (a "replaceable") which is a base class for + dataclass-style implementations. The implementations can be stored + in the registry. They get expanded into dataclasses with expand_args_fields. + This expansion is delayed. + """ + + def __new__(cls, *args, **kwargs): + """ + These classes should be expanded only when needed (because processing + fixes the list of replaceable subclasses of members of the class). It + is safer if users expand the classes explicitly. But if the class gets + instantiated when it hasn't been processed, we expand it here. + """ + obj = super().__new__(cls) + if cls is not ReplaceableBase and not _is_actually_dataclass(cls): + expand_args_fields(cls) + return obj + + +class Configurable: + """ + Base class for dataclass-style classes which are not replaceable. These get + expanded into a dataclass with expand_args_fields. + This expansion is delayed. + """ + + def __new__(cls, *args, **kwargs): + """ + These classes should be expanded only when needed (because processing + fixes the list of replaceable subclasses of members of the class). It + is safer if users expand the classes explicitly. But if the class gets + instantiated when it hasn't been processed, we expand it here. + """ + obj = super().__new__(cls) + if cls is not Configurable and not _is_actually_dataclass(cls): + expand_args_fields(cls) + return obj + + +_X = TypeVar("X", bound=ReplaceableBase) +_Y = TypeVar("Y", bound=Union[ReplaceableBase, Configurable]) + + +class _Registry: + """ + Register from names to classes. In particular, we say that direct subclasses of + ReplaceableBase are "base classes" and we register subclasses of each base class + in a separate namespace. + """ + + def __init__(self) -> None: + self._mapping: Dict[Type[ReplaceableBase], Dict[str, Type[ReplaceableBase]]] = ( + defaultdict(dict) + ) + + def register(self, some_class: Type[_X]) -> Type[_X]: + """ + A class decorator, to register a class in self. + """ + name = some_class.__name__ + self._register(some_class, name=name) + return some_class + + def _register( + self, + some_class: Type[ReplaceableBase], + *, + base_class: Optional[Type[ReplaceableBase]] = None, + name: str, + ) -> None: + """ + Register a new member. + + Args: + cls: the new member + base_class: (optional) what the new member is a type for + name: name for the new member + """ + if base_class is None: + base_class = self._base_class_from_class(some_class) + if base_class is None: + raise ValueError( + f"Cannot register {some_class}. Cannot tell what it is." + ) + self._mapping[base_class][name] = some_class + + def get(self, base_class_wanted: Type[_X], name: str) -> Type[_X]: + """ + Retrieve a class from the registry by name + + Args: + base_class_wanted: parent type of type we are looking for. + It determines the namespace. + This will typically be a direct subclass of ReplaceableBase. + name: what to look for + + Returns: + class type + """ + if self._is_base_class(base_class_wanted): + base_class = base_class_wanted + else: + base_class = self._base_class_from_class(base_class_wanted) + if base_class is None: + raise ValueError( + f"Cannot look up {base_class_wanted}. Cannot tell what it is." + ) + if not isinstance(name, str): + raise ValueError( + f"Cannot look up a {type(name)} in the registry. Got {name}." + ) + result = self._mapping[base_class].get(name) + if result is None: + raise ValueError(f"{name} has not been registered.") + if not issubclass(result, base_class_wanted): + raise ValueError( + f"{name} resolves to {result} which does not subclass {base_class_wanted}" + ) + # pyre-ignore[7] + return result + + def get_all( + self, base_class_wanted: Type[ReplaceableBase] + ) -> List[Type[ReplaceableBase]]: + """ + Retrieve all registered implementations from the registry + + Args: + base_class_wanted: parent type of type we are looking for. + It determines the namespace. + This will typically be a direct subclass of ReplaceableBase. + Returns: + list of class types in alphabetical order of registered name. + """ + if self._is_base_class(base_class_wanted): + source = self._mapping[base_class_wanted] + return [source[key] for key in sorted(source)] + + base_class = self._base_class_from_class(base_class_wanted) + if base_class is None: + raise ValueError( + f"Cannot look up {base_class_wanted}. Cannot tell what it is." + ) + source = self._mapping[base_class] + return [ + source[key] + for key in sorted(source) + if issubclass(source[key], base_class_wanted) + and source[key] is not base_class_wanted + ] + + @staticmethod + def _is_base_class(some_class: Type[ReplaceableBase]) -> bool: + """ + Return whether the given type is a direct subclass of ReplaceableBase + and so gets used as a namespace. + """ + return ReplaceableBase in some_class.__bases__ + + @staticmethod + def _base_class_from_class( + some_class: Type[ReplaceableBase], + ) -> Optional[Type[ReplaceableBase]]: + """ + Find the parent class of some_class which inherits ReplaceableBase, or None + """ + for base in some_class.mro()[-3::-1]: + if base is not ReplaceableBase and issubclass(base, ReplaceableBase): + return base + return None + + +# Global instance of the registry +registry = _Registry() + + +class _ProcessType(Enum): + """ + Type of member which gets rewritten by expand_args_fields. + """ + + CONFIGURABLE = 1 + REPLACEABLE = 2 + OPTIONAL_CONFIGURABLE = 3 + OPTIONAL_REPLACEABLE = 4 + + +def _default_create( + name: str, type_: Type, process_type: _ProcessType +) -> Callable[[Any], None]: + """ + Return the default creation function for a member. This is a function which + could be called in __post_init__ to initialise the member, and will be called + from run_auto_creation. + + Args: + name: name of the member + type_: type of the member (with any Optional removed) + process_type: Shows whether member's declared type inherits ReplaceableBase, + in which case the actual type to be created is decided at + runtime. + + Returns: + Function taking one argument, the object whose member should be + initialized, i.e. self. + """ + impl_name = f"{CREATE_PREFIX}{name}{IMPL_SUFFIX}" + + def inner(self): + expand_args_fields(type_) + impl = getattr(self, impl_name) + args = getattr(self, name + ARGS_SUFFIX) + impl(True, args) + + def inner_optional(self): + expand_args_fields(type_) + impl = getattr(self, impl_name) + enabled = getattr(self, name + ENABLED_SUFFIX) + args = getattr(self, name + ARGS_SUFFIX) + impl(enabled, args) + + def inner_pluggable(self): + type_name = getattr(self, name + TYPE_SUFFIX) + impl = getattr(self, impl_name) + if type_name is None: + args = None + else: + args = getattr(self, f"{name}_{type_name}{ARGS_SUFFIX}", None) + impl(type_name, args) + + if process_type == _ProcessType.OPTIONAL_CONFIGURABLE: + return inner_optional + return inner if process_type == _ProcessType.CONFIGURABLE else inner_pluggable + + +def _default_create_impl( + name: str, type_: Type, process_type: _ProcessType +) -> Callable[[Any, Any, DictConfig], None]: + """ + Return the default internal function for initialising a member. This is a function + which could be called in the create_ function to initialise the member. + + Args: + name: name of the member + type_: type of the member (with any Optional removed) + process_type: Shows whether member's declared type inherits ReplaceableBase, + in which case the actual type to be created is decided at + runtime. + + Returns: + Function taking + - self, the object whose member should be initialized. + - option for what to do. This is + - for pluggables, the type to initialise or None to do nothing + - for non pluggables, a bool indicating whether to initialise. + - the args for initializing the member. + """ + + def create_configurable(self, enabled, args): + if enabled: + expand_args_fields(type_) + setattr(self, name, type_(**args)) + else: + setattr(self, name, None) + + def create_pluggable(self, type_name, args): + if type_name is None: + setattr(self, name, None) + return + + if not isinstance(type_name, str): + raise ValueError( + f"A {type(type_name)} was received as the type of {name}." + + f" Perhaps this is from {name}{TYPE_SUFFIX}?" + ) + chosen_class = registry.get(type_, type_name) + if self._known_implementations.get(type_name, chosen_class) is not chosen_class: + # If this warning is raised, it means that a new definition of + # the chosen class has been registered since our class was processed + # (i.e. expanded). A DictConfig which comes from our get_default_args + # (which might have triggered the processing) will contain the old default + # values for the members of the chosen class. Changes to those defaults which + # were made in the redefinition will not be reflected here. + warnings.warn(f"New implementation of {type_name} is being chosen.") + expand_args_fields(chosen_class) + setattr(self, name, chosen_class(**args)) + + if process_type in (_ProcessType.CONFIGURABLE, _ProcessType.OPTIONAL_CONFIGURABLE): + return create_configurable + return create_pluggable + + +def run_auto_creation(self: Any) -> None: + """ + Run all the functions named in self._creation_functions. + """ + for create_function in self._creation_functions: + getattr(self, create_function)() + + +def _is_configurable_class(C) -> bool: + return isinstance(C, type) and issubclass(C, (Configurable, ReplaceableBase)) + + +def get_default_args(C, *, _do_not_process: Tuple[type, ...] = ()) -> DictConfig: + """ + Get the DictConfig corresponding to the defaults in a dataclass or + configurable. Normal use is to provide a dataclass can be provided as C. + If enable_get_default_args has been called on a function or plain class, + then that function or class can be provided as C. + + If C is a subclass of Configurable or ReplaceableBase, we make sure + it has been processed with expand_args_fields. + + Args: + C: the class or function to be processed + _do_not_process: (internal use) When this function is called from + expand_args_fields, we specify any class currently being + processed, to make sure we don't try to process a class + while it is already being processed. + + Returns: + new DictConfig object, which is typed. + """ + if C is None: + return DictConfig({}) + + if _is_configurable_class(C): + if C in _do_not_process: + raise ValueError( + f"Internal recursion error. Need processed {C}," + f" but cannot get it. _do_not_process={_do_not_process}" + ) + # This is safe to run multiple times. It will return + # straight away if C has already been processed. + expand_args_fields(C, _do_not_process=_do_not_process) + + if dataclasses.is_dataclass(C): + # Note that if get_default_args_field is used somewhere in C, + # this call is recursive. No special care is needed, + # because in practice get_default_args_field is used for + # separate types than the outer type. + + try: + out: DictConfig = OmegaConf.structured(C) + except Exception: + print(f"### OmegaConf.structured({C}) failed ###") + # We don't use `raise From` here, because that gets the original + # exception hidden by the OC_CAUSE logic in the case where we are + # called by hydra. + raise + exclude = getattr(C, "_processed_members", ()) + with open_dict(out): + for field in exclude: + out.pop(field, None) + return out + + if _is_configurable_class(C): + raise ValueError(f"Failed to process {C}") + + if not inspect.isfunction(C) and not inspect.isclass(C): + raise ValueError(f"Unexpected {C}") + + dataclass_name = _dataclass_name_for_function(C) + dataclass = getattr(sys.modules[C.__module__], dataclass_name, None) + if dataclass is None: + raise ValueError( + f"Cannot get args for {C}. Was enable_get_default_args forgotten?" + ) + + try: + out: DictConfig = OmegaConf.structured(dataclass) + except Exception: + print(f"### OmegaConf.structured failed for {C.__name__} ###") + raise + return out + + +def _dataclass_name_for_function(C: Any) -> str: + """ + Returns the name of the dataclass which enable_get_default_args(C) + creates. + """ + name = f"_{C.__name__}_default_args_" + return name + + +def _field_annotations_for_default_args( + C: Any, +) -> List[Tuple[str, Any, dataclasses.Field]]: + """ + If C is a function or a plain class with an __init__ function, + return the fields which `enable_get_default_args(C)` will need + to make a dataclass with. + + Args: + C: a function, or a class with an __init__ function. Must + have types for all its defaulted args. + + Returns: + a list of fields for a dataclass. + """ + + field_annotations = [] + for pname, defval in _params_iter(C): + default = defval.default + if default == inspect.Parameter.empty: + # we do not have a default value for the parameter + continue + + if defval.annotation == inspect._empty: + raise ValueError( + "All arguments of the input to enable_get_default_args have to" + f" be typed. Argument '{pname}' does not have a type annotation." + ) + + _, annotation = _resolve_optional(defval.annotation) + + if isinstance(default, set): # force OmegaConf to convert it to ListConfig + default = tuple(default) + + if isinstance(default, (list, dict)): + # OmegaConf will convert to [Dict|List]Config, so it is safe to reuse the value + field_ = dataclasses.field(default_factory=lambda default=default: default) + elif not _is_immutable_type(annotation, default): + continue + else: + # we can use a simple default argument for dataclass.field + field_ = dataclasses.field(default=default) + field_annotations.append((pname, defval.annotation, field_)) + + return field_annotations + + +def enable_get_default_args(C: Any, *, overwrite: bool = True) -> None: + """ + If C is a function or a plain class with an __init__ function, + and you want get_default_args(C) to work, then add + `enable_get_default_args(C)` straight after the definition of C. + This makes a dataclass corresponding to the default arguments of C + and stores it in the same module as C. + + Args: + C: a function, or a class with an __init__ function. Must + have types for all its defaulted args. + overwrite: whether to allow calling this a second time on + the same function. + """ + if not inspect.isfunction(C) and not inspect.isclass(C): + raise ValueError(f"Unexpected {C}") + + field_annotations = _field_annotations_for_default_args(C) + + name = _dataclass_name_for_function(C) + module = sys.modules[C.__module__] + if hasattr(module, name): + if overwrite: + warnings.warn(f"Overwriting {name} in {C.__module__}.") + else: + raise ValueError(f"Cannot overwrite {name} in {C.__module__}.") + dc = dataclasses.make_dataclass(name, field_annotations) + dc.__module__ = C.__module__ + setattr(module, name, dc) + + +def _params_iter(C): + """Returns dict of keyword args of a class or function C.""" + if inspect.isclass(C): + return itertools.islice( # exclude `self` + inspect.signature(C.__init__).parameters.items(), 1, None + ) + + return inspect.signature(C).parameters.items() + + +def _is_immutable_type(type_: Type, val: Any) -> bool: + if val is None: + return True + + PRIMITIVE_TYPES = (int, float, bool, str, bytes, tuple) + # sometimes type can be too relaxed (e.g. Any), so we also check values + if isinstance(val, PRIMITIVE_TYPES): + return True + + return type_ in PRIMITIVE_TYPES or ( + inspect.isclass(type_) and issubclass(type_, Enum) + ) + + +# copied from OmegaConf +def _resolve_optional(type_: Any) -> Tuple[bool, Any]: + """Check whether `type_` is equivalent to `typing.Optional[T]` for some T.""" + if get_origin(type_) is Union: + args = get_args(type_) + if len(args) == 2 and args[1] == type(None): # noqa E721 + return True, args[0] + if type_ is Any: + return True, Any + + return False, type_ + + +def _is_actually_dataclass(some_class) -> bool: + # Return whether the class some_class has been processed with + # the dataclass annotation. This is more specific than + # dataclasses.is_dataclass which returns True on anything + # deriving from a dataclass. + + # Checking for __init__ would also work for our purpose. + return "__dataclass_fields__" in some_class.__dict__ + + +def expand_args_fields( + some_class: Type[_Y], *, _do_not_process: Tuple[type, ...] = () +) -> Type[_Y]: + """ + This expands a class which inherits Configurable or ReplaceableBase classes, + including dataclass processing. some_class is modified in place by this function. + If expand_args_fields(some_class) has already been called, subsequent calls do + nothing and return some_class unmodified. + For classes of type ReplaceableBase, you can add some_class to the registry before + or after calling this function. But potential inner classes need to be registered + before this function is run on the outer class. + + The transformations this function makes, before the concluding + dataclasses.dataclass, are as follows. If X is a base class with registered + subclasses Y and Z, replace a class member + + x: X + + and optionally + + x_class_type: str = "Y" + def create_x(self):... + + with + + x_Y_args: dict = dataclasses.field(default_factory=lambda: get_default_args(Y)) + x_Z_args: dict = dataclasses.field(default_factory=lambda: get_default_args(Z)) + def create_x(self): + args = self.getattr(f"x_{self.x_class_type}_args") + self.create_x_impl(self.x_class_type, args) + def create_x_impl(self, x_type, args): + x_type = registry.get(X, x_type) + expand_args_fields(x_type) + self.x = x_type(**args) + x_class_type: str = "UNDEFAULTED" + + without adding the optional attributes if they are already there. + + Similarly, replace + + x: Optional[X] + + and optionally + + x_class_type: Optional[str] = "Y" + def create_x(self):... + + with + + x_Y_args: dict = dataclasses.field(default_factory=lambda: get_default_args(Y)) + x_Z_args: dict = dataclasses.field(default_factory=lambda: get_default_args(Z)) + def create_x(self): + if self.x_class_type is None: + args = None + else: + args = self.getattr(f"x_{self.x_class_type}_args", None) + self.create_x_impl(self.x_class_type, args) + def create_x_impl(self, x_class_type, args): + if x_class_type is None: + self.x = None + return + + x_type = registry.get(X, x_class_type) + expand_args_fields(x_type) + assert args is not None + self.x = x_type(**args) + x_class_type: Optional[str] = "UNDEFAULTED" + + without adding the optional attributes if they are already there. + + Similarly, if X is a subclass of Configurable, + + x: X + + and optionally + + def create_x(self):... + + will be replaced with + + x_args: dict = dataclasses.field(default_factory=lambda: get_default_args(X)) + def create_x(self): + self.create_x_impl(True, self.x_args) + + def create_x_impl(self, enabled, args): + if enabled: + expand_args_fields(X) + self.x = X(**args) + else: + self.x = None + + Similarly, replace, + + x: Optional[X] + x_enabled: bool = ... + + and optionally + + def create_x(self):... + + with + + x_args: dict = dataclasses.field(default_factory=lambda: get_default_args(X)) + x_enabled: bool = ... + def create_x(self): + self.create_x_impl(self.x_enabled, self.x_args) + + def create_x_impl(self, enabled, args): + if enabled: + expand_args_fields(X) + self.x = X(**args) + else: + self.x = None + + + Also adds the following class members, unannotated so that dataclass + ignores them. + - _creation_functions: Tuple[str, ...] of all the create_ functions, + including those from base classes (not the create_x_impl ones). + - _known_implementations: Dict[str, Type] containing the classes which + have been found from the registry. + (used only to raise a warning if it one has been overwritten) + - _processed_members: a Dict[str, Any] of all the members which have been + transformed, with values giving the types they were declared to have. + (E.g. {"x": X} or {"x": Optional[X]} in the cases above.) + + In addition, if the class has a member function + + @classmethod + def x_tweak_args(cls, member_type: Type, args: DictConfig) -> None + + then the default_factory of x_args will also have a call to x_tweak_args(X, x_args) and + the default_factory of x_Y_args will also have a call to x_tweak_args(Y, x_Y_args). + + In addition, if the class inherits torch.nn.Module, the generated __init__ will + call torch.nn.Module's __init__ before doing anything else. + + Before any transformation of the class, if the class has a classmethod called + `pre_expand`, it will be called with no arguments. + + Note that although the *_args members are intended to have type DictConfig, they + are actually internally annotated as dicts. OmegaConf is happy to see a DictConfig + in place of a dict, but not vice-versa. Allowing dict lets a class user specify + x_args as an explicit dict without getting an incomprehensible error. + + Args: + some_class: the class to be processed + _do_not_process: Internal use for get_default_args: Because get_default_args calls + and is called by this function, we let it specify any class currently + being processed, to make sure we don't try to process a class while + it is already being processed. + + + Returns: + some_class itself, which has been modified in place. This + allows this function to be used as a class decorator. + """ + if _is_actually_dataclass(some_class): + return some_class + + if hasattr(some_class, PRE_EXPAND_NAME): + getattr(some_class, PRE_EXPAND_NAME)() + + # The functions this class's run_auto_creation will run. + creation_functions: List[str] = [] + # The classes which this type knows about from the registry + # We could use a weakref.WeakValueDictionary here which would mean + # that we don't warn if the class we should have expected is elsewhere + # unused. + known_implementations: Dict[str, Type] = {} + # Names of members which have been processed. + processed_members: Dict[str, Any] = {} + + # For all bases except ReplaceableBase and Configurable and object, + # we need to process them before our own processing. This is + # because dataclasses expect to inherit dataclasses and not unprocessed + # dataclasses. + for base in some_class.mro()[-3:0:-1]: + if base is ReplaceableBase: + continue + if base is Configurable: + continue + if not issubclass(base, (Configurable, ReplaceableBase)): + continue + expand_args_fields(base, _do_not_process=_do_not_process) + if "_creation_functions" in base.__dict__: + creation_functions.extend(base._creation_functions) + if "_known_implementations" in base.__dict__: + known_implementations.update(base._known_implementations) + if "_processed_members" in base.__dict__: + processed_members.update(base._processed_members) + + to_process: List[Tuple[str, Type, _ProcessType]] = [] + if "__annotations__" in some_class.__dict__: + for name, type_ in some_class.__annotations__.items(): + underlying_and_process_type = _get_type_to_process(type_) + if underlying_and_process_type is None: + continue + underlying_type, process_type = underlying_and_process_type + to_process.append((name, underlying_type, process_type)) + + for name, underlying_type, process_type in to_process: + processed_members[name] = some_class.__annotations__[name] + _process_member( + name=name, + type_=underlying_type, + process_type=process_type, + some_class=some_class, + creation_functions=creation_functions, + _do_not_process=_do_not_process, + known_implementations=known_implementations, + ) + + for key, count in Counter(creation_functions).items(): + if count > 1: + warnings.warn(f"Clash with {key} in a base class.") + some_class._creation_functions = tuple(creation_functions) + some_class._processed_members = processed_members + some_class._known_implementations = known_implementations + + dataclasses.dataclass(eq=False)(some_class) + _fixup_class_init(some_class) + return some_class + + +def _fixup_class_init(some_class) -> None: + """ + In-place modification of the some_class class which happens + after dataclass processing. + + If the dataclass some_class inherits torch.nn.Module, then + makes torch.nn.Module's __init__ be called before anything else + on instantiation of some_class. + This is a bit like attr's __pre_init__. + """ + + assert _is_actually_dataclass(some_class) + try: + import torch + except ModuleNotFoundError: + return + + if not issubclass(some_class, torch.nn.Module): + return + + def init(self, *args, **kwargs) -> None: + torch.nn.Module.__init__(self) + getattr(self, _DATACLASS_INIT)(*args, **kwargs) + + assert _DATACLASS_INIT not in some_class.__dict__ + + setattr(some_class, _DATACLASS_INIT, some_class.__init__) + some_class.__init__ = init + + +def get_default_args_field( + C, + *, + _do_not_process: Tuple[type, ...] = (), + _hook: Optional[Callable[[DictConfig], None]] = None, +): + """ + Get a dataclass field which defaults to get_default_args(...) + + Args: + C: As for get_default_args. + _do_not_process: As for get_default_args + _hook: Function called on the result before returning. + + Returns: + function to return new DictConfig object + """ + + def create(): + args = get_default_args(C, _do_not_process=_do_not_process) + if _hook is not None: + with open_dict(args): + _hook(args) + return args + + return dataclasses.field(default_factory=create) + + +def _get_default_args_field_from_registry( + *, + base_class_wanted: Type[_X], + name: str, + _do_not_process: Tuple[type, ...] = (), + _hook: Optional[Callable[[DictConfig], None]] = None, +): + """ + Get a dataclass field which defaults to + get_default_args(registry.get(base_class_wanted, name)). + + This is used internally in place of get_default_args_field in + order that default values are updated if a class is redefined. + + Args: + base_class_wanted: As for registry.get. + name: As for registry.get. + _do_not_process: As for get_default_args + _hook: Function called on the result before returning. + + Returns: + function to return new DictConfig object + """ + + def create(): + C = registry.get(base_class_wanted=base_class_wanted, name=name) + args = get_default_args(C, _do_not_process=_do_not_process) + if _hook is not None: + with open_dict(args): + _hook(args) + return args + + return dataclasses.field(default_factory=create) + + +def _get_type_to_process(type_) -> Optional[Tuple[Type, _ProcessType]]: + """ + If a member is annotated as `type_`, and that should expanded in + expand_args_fields, return how it should be expanded. + """ + if get_origin(type_) == Union: + # We look for Optional[X] which is a Union of X with None. + args = get_args(type_) + if len(args) != 2 or all(a is not type(None) for a in args): # noqa: E721 + return + underlying = args[0] if args[1] is type(None) else args[1] # noqa: E721 + if ( + isinstance(underlying, type) + and issubclass(underlying, ReplaceableBase) + and ReplaceableBase in underlying.__bases__ + ): + return underlying, _ProcessType.OPTIONAL_REPLACEABLE + + if isinstance(underlying, type) and issubclass(underlying, Configurable): + return underlying, _ProcessType.OPTIONAL_CONFIGURABLE + + if not isinstance(type_, type): + # e.g. any other Union or Tuple. Or ClassVar. + return + + if issubclass(type_, ReplaceableBase) and ReplaceableBase in type_.__bases__: + return type_, _ProcessType.REPLACEABLE + + if issubclass(type_, Configurable): + return type_, _ProcessType.CONFIGURABLE + + +def _process_member( + *, + name: str, + type_: Type, + process_type: _ProcessType, + some_class: Type, + creation_functions: List[str], + _do_not_process: Tuple[type, ...], + known_implementations: Dict[str, Type], +) -> None: + """ + Make the modification (of expand_args_fields) to some_class for a single member. + + Args: + name: member name + type_: member type (with Optional removed if needed) + process_type: whether member has dynamic type + some_class: (MODIFIED IN PLACE) the class being processed + creation_functions: (MODIFIED IN PLACE) the names of the create functions + _do_not_process: as for expand_args_fields. + known_implementations: (MODIFIED IN PLACE) known types from the registry + """ + # Because we are adding defaultable members, make + # sure they go at the end of __annotations__ in case + # there are non-defaulted standard class members. + del some_class.__annotations__[name] + hook = getattr(some_class, name + TWEAK_SUFFIX, None) + + if process_type in (_ProcessType.REPLACEABLE, _ProcessType.OPTIONAL_REPLACEABLE): + type_name = name + TYPE_SUFFIX + if type_name not in some_class.__annotations__: + if process_type == _ProcessType.OPTIONAL_REPLACEABLE: + some_class.__annotations__[type_name] = Optional[str] + else: + some_class.__annotations__[type_name] = str + setattr(some_class, type_name, "UNDEFAULTED") + + for derived_type in registry.get_all(type_): + if derived_type in _do_not_process: + continue + if issubclass(derived_type, some_class): + # When derived_type is some_class we have a simple + # recursion to avoid. When it's a strict subclass the + # situation is even worse. + continue + known_implementations[derived_type.__name__] = derived_type + args_name = f"{name}_{derived_type.__name__}{ARGS_SUFFIX}" + if args_name in some_class.__annotations__: + raise ValueError( + f"Cannot generate {args_name} because it is already present." + ) + some_class.__annotations__[args_name] = dict + if hook is not None: + hook_closed = partial(hook, derived_type) + else: + hook_closed = None + setattr( + some_class, + args_name, + _get_default_args_field_from_registry( + base_class_wanted=type_, + name=derived_type.__name__, + _do_not_process=_do_not_process + (some_class,), + _hook=hook_closed, + ), + ) + else: + args_name = name + ARGS_SUFFIX + if args_name in some_class.__annotations__: + raise ValueError( + f"Cannot generate {args_name} because it is already present." + ) + if issubclass(type_, some_class) or type_ in _do_not_process: + raise ValueError(f"Cannot process {type_} inside {some_class}") + + some_class.__annotations__[args_name] = dict + if hook is not None: + hook_closed = partial(hook, type_) + else: + hook_closed = None + setattr( + some_class, + args_name, + get_default_args_field( + type_, + _do_not_process=_do_not_process + (some_class,), + _hook=hook_closed, + ), + ) + if process_type == _ProcessType.OPTIONAL_CONFIGURABLE: + enabled_name = name + ENABLED_SUFFIX + if enabled_name not in some_class.__annotations__: + raise ValueError( + f"{name} is an Optional[{type_.__name__}] member " + f"but there is no corresponding member {enabled_name}." + ) + + creation_function_name = f"{CREATE_PREFIX}{name}" + if not hasattr(some_class, creation_function_name): + setattr( + some_class, + creation_function_name, + _default_create(name, type_, process_type), + ) + creation_functions.append(creation_function_name) + + creation_function_impl_name = f"{CREATE_PREFIX}{name}{IMPL_SUFFIX}" + if not hasattr(some_class, creation_function_impl_name): + setattr( + some_class, + creation_function_impl_name, + _default_create_impl(name, type_, process_type), + ) + + +def remove_unused_components(dict_: DictConfig) -> None: + """ + Assuming dict_ represents the state of a configurable, + modify it to remove all the portions corresponding to + pluggable parts which are not in use. + For example, if renderer_class_type is SignedDistanceFunctionRenderer, + the renderer_MultiPassEmissionAbsorptionRenderer_args will be + removed. Also, if chocolate_enabled is False, then chocolate_args will + be removed. + + Args: + dict_: (MODIFIED IN PLACE) a DictConfig instance + """ + keys = [key for key in dict_ if isinstance(key, str)] + suffix_length = len(TYPE_SUFFIX) + replaceables = [key[:-suffix_length] for key in keys if key.endswith(TYPE_SUFFIX)] + args_keys = [key for key in keys if key.endswith(ARGS_SUFFIX)] + for replaceable in replaceables: + selected_type = dict_[replaceable + TYPE_SUFFIX] + if selected_type is None: + expect = "" + else: + expect = replaceable + "_" + selected_type + ARGS_SUFFIX + with open_dict(dict_): + for key in args_keys: + if key.startswith(replaceable + "_") and key != expect: + del dict_[key] + + suffix_length = len(ENABLED_SUFFIX) + enableables = [key[:-suffix_length] for key in keys if key.endswith(ENABLED_SUFFIX)] + for enableable in enableables: + enabled = dict_[enableable + ENABLED_SUFFIX] + if not enabled: + with open_dict(dict_): + dict_.pop(enableable + ARGS_SUFFIX, None) + + for key in dict_: + if isinstance(dict_.get(key), DictConfig): + remove_unused_components(dict_[key]) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/depth_cleanup.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/depth_cleanup.py new file mode 100644 index 0000000000000000000000000000000000000000..76edf963333abf327c490b1dfa0a2939662b8361 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/depth_cleanup.py @@ -0,0 +1,114 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch +import torch.nn.functional as Fu +from pytorch3d.ops import wmean +from pytorch3d.renderer.cameras import CamerasBase +from pytorch3d.structures import Pointclouds + + +def cleanup_eval_depth( + point_cloud: Pointclouds, + camera: CamerasBase, + depth: torch.Tensor, + mask: torch.Tensor, + sigma: float = 0.01, + image=None, +): + ba, _, H, W = depth.shape + + pcl = point_cloud.points_padded() + n_pts = point_cloud.num_points_per_cloud() + pcl_mask = ( + torch.arange(pcl.shape[1], dtype=torch.int64, device=pcl.device)[None] + < n_pts[:, None] + ).type_as(pcl) + + pcl_proj = camera.transform_points(pcl, eps=1e-2)[..., :-1] + pcl_depth = camera.get_world_to_view_transform().transform_points(pcl)[..., -1] + + depth_and_idx = torch.cat( + ( + depth, + torch.arange(H * W).view(1, 1, H, W).expand(ba, 1, H, W).type_as(depth), + ), + dim=1, + ) + + depth_and_idx_sampled = Fu.grid_sample( + depth_and_idx, -pcl_proj[:, None], mode="nearest" + )[:, :, 0].view(ba, 2, -1) + + depth_sampled, idx_sampled = depth_and_idx_sampled.split([1, 1], dim=1) + df = (depth_sampled[:, 0] - pcl_depth).abs() + + # the threshold is a sigma-multiple of the standard deviation of the depth + mu = wmean(depth.view(ba, -1, 1), mask.view(ba, -1)).view(ba, 1) + std = ( + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + wmean((depth.view(ba, -1) - mu).view(ba, -1, 1) ** 2, mask.view(ba, -1)) + .clamp(1e-4) + .sqrt() + .view(ba, -1) + ) + good_df_thr = std * sigma + good_depth = (df <= good_df_thr).float() * pcl_mask + + # perc_kept = good_depth.sum(dim=1) / pcl_mask.sum(dim=1).clamp(1) + # print(f'Kept {100.0 * perc_kept.mean():1.3f} % points') + + good_depth_raster = torch.zeros_like(depth).view(ba, -1) + good_depth_raster.scatter_add_(1, torch.round(idx_sampled[:, 0]).long(), good_depth) + + good_depth_mask = (good_depth_raster.view(ba, 1, H, W) > 0).float() + + # if float(torch.rand(1)) > 0.95: + # depth_ok = depth * good_depth_mask + + # # visualize + # visdom_env = 'depth_cleanup_dbg' + # from visdom import Visdom + # # from tools.vis_utils import make_depth_image + # from pytorch3d.vis.plotly_vis import plot_scene + # viz = Visdom() + + # show_pcls = { + # 'pointclouds': point_cloud, + # } + # for d, nm in zip( + # (depth, depth_ok), + # ('pointclouds_unproj', 'pointclouds_unproj_ok'), + # ): + # pointclouds_unproj = get_rgbd_point_cloud( + # camera, image, d, + # ) + # if int(pointclouds_unproj.num_points_per_cloud()) > 0: + # show_pcls[nm] = pointclouds_unproj + + # scene_dict = {'1': { + # **show_pcls, + # 'cameras': camera, + # }} + # scene = plot_scene( + # scene_dict, + # pointcloud_max_points=5000, + # pointcloud_marker_size=1.5, + # camera_scale=1.0, + # ) + # viz.plotlyplot(scene, env=visdom_env, win='scene') + + # # depth_image_ok = make_depth_image(depths_ok, masks) + # # viz.images(depth_image_ok, env=visdom_env, win='depth_ok') + # # depth_image = make_depth_image(depths, masks) + # # viz.images(depth_image, env=visdom_env, win='depth') + # # # viz.images(rgb_rendered, env=visdom_env, win='images_render') + # # viz.images(images, env=visdom_env, win='images') + # import pdb; pdb.set_trace() + + return good_depth_mask diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/eval_video_trajectory.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/eval_video_trajectory.py new file mode 100644 index 0000000000000000000000000000000000000000..f46fe292f8e45c1d40ef157f444941ee23761e16 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/eval_video_trajectory.py @@ -0,0 +1,270 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import logging +import math +from typing import Optional, Tuple + +import torch +from pytorch3d.implicitron.tools import utils +from pytorch3d.implicitron.tools.circle_fitting import fit_circle_in_3d +from pytorch3d.renderer import look_at_view_transform, PerspectiveCameras +from pytorch3d.transforms import Scale + + +logger = logging.getLogger(__name__) + + +def generate_eval_video_cameras( + train_cameras, + n_eval_cams: int = 100, + trajectory_type: str = "figure_eight", + trajectory_scale: float = 0.2, + scene_center: Tuple[float, float, float] = (0.0, 0.0, 0.0), + up: Tuple[float, float, float] = (0.0, 0.0, 1.0), + focal_length: Optional[torch.Tensor] = None, + principal_point: Optional[torch.Tensor] = None, + time: Optional[torch.Tensor] = None, + infer_up_as_plane_normal: bool = True, + traj_offset: Optional[Tuple[float, float, float]] = None, + traj_offset_canonical: Optional[Tuple[float, float, float]] = None, + remove_outliers_rate: float = 0.0, +) -> PerspectiveCameras: + """ + Generate a camera trajectory rendering a scene from multiple viewpoints. + + Args: + train_cameras: The set of cameras from the training dataset object. + n_eval_cams: Number of cameras in the trajectory. + trajectory_type: The type of the camera trajectory. Can be one of: + circular_lsq_fit: Camera centers follow a trajectory obtained + by fitting a 3D circle to train_cameras centers. + All cameras are looking towards scene_center. + figure_eight: Figure-of-8 trajectory around the center of the + central camera of the training dataset. + trefoil_knot: Same as 'figure_eight', but the trajectory has a shape + of a trefoil knot (https://en.wikipedia.org/wiki/Trefoil_knot). + figure_eight_knot: Same as 'figure_eight', but the trajectory has a shape + of a figure-eight knot + (https://en.wikipedia.org/wiki/Figure-eight_knot_(mathematics)). + trajectory_scale: The extent of the trajectory. + scene_center: The center of the scene in world coordinates which all + the cameras from the generated trajectory look at. + up: The "circular_lsq_fit" vector of the scene (=the normal of the scene floor). + Active for the `trajectory_type="circular"`. + focal_length: The focal length of the output cameras. If `None`, an average + focal length of the train_cameras is used. + principal_point: The principal point of the output cameras. If `None`, an average + principal point of all train_cameras is used. + time: Defines the total length of the generated camera trajectory. All possible + trajectories (set with the `trajectory_type` argument) are periodic with + the period of `time=2pi`. + E.g. setting `trajectory_type=circular_lsq_fit` and `time=4pi`, will generate + a trajectory of camera poses rotating the total of 720 deg around the object. + infer_up_as_plane_normal: Infer the camera `up` vector automatically as the normal + of the plane fit to the optical centers of `train_cameras`. + traj_offset: 3D offset vector added to each point of the trajectory. + traj_offset_canonical: 3D offset vector expressed in the local coordinates of + the estimated trajectory which is added to each point of the trajectory. + remove_outliers_rate: the number between 0 and 1; if > 0, + some outlier train_cameras will be removed from trajectory estimation; + the filtering is based on camera center coordinates; top and + bottom `remove_outliers_rate` cameras on each dimension are removed. + Returns: + Batch of camera instances which can be used as the test dataset + """ + if remove_outliers_rate > 0.0: + train_cameras = _remove_outlier_cameras(train_cameras, remove_outliers_rate) + + if trajectory_type in ("figure_eight", "trefoil_knot", "figure_eight_knot"): + cam_centers = train_cameras.get_camera_center() + # get the nearest camera center to the mean of centers + mean_camera_idx = ( + ((cam_centers - cam_centers.mean(dim=0)[None]) ** 2) + .sum(dim=1) + .min(dim=0) + .indices + ) + # generate the knot trajectory in canonical coords + if time is None: + time = torch.linspace(0, 2 * math.pi, n_eval_cams + 1)[:n_eval_cams] + else: + assert time.numel() == n_eval_cams + if trajectory_type == "trefoil_knot": + traj = _trefoil_knot(time) + elif trajectory_type == "figure_eight_knot": + traj = _figure_eight_knot(time) + elif trajectory_type == "figure_eight": + traj = _figure_eight(time) + else: + raise ValueError(f"bad trajectory type: {trajectory_type}") + traj[:, 2] -= traj[:, 2].max() + + # transform the canonical knot to the coord frame of the mean camera + mean_camera = PerspectiveCameras( + **{ + k: getattr(train_cameras, k)[[int(mean_camera_idx)]] + for k in ("focal_length", "principal_point", "R", "T") + } + ) + traj_trans = Scale(cam_centers.std(dim=0).mean() * trajectory_scale).compose( + mean_camera.get_world_to_view_transform().inverse() + ) + + if traj_offset_canonical is not None: + traj_trans = traj_trans.translate( + torch.FloatTensor(traj_offset_canonical)[None].to(traj) + ) + + traj = traj_trans.transform_points(traj) + + plane_normal = _fit_plane(cam_centers)[:, 0] + if infer_up_as_plane_normal: + up = _disambiguate_normal(plane_normal, up) + + elif trajectory_type == "circular_lsq_fit": + ### fit plane to the camera centers + + # get the center of the plane as the median of the camera centers + cam_centers = train_cameras.get_camera_center() + + if time is not None: + angle = time + else: + angle = torch.linspace(0, 2.0 * math.pi, n_eval_cams).to(cam_centers) + + fit = fit_circle_in_3d( + cam_centers, + angles=angle, + offset=( + angle.new_tensor(traj_offset_canonical) + if traj_offset_canonical is not None + else None + ), + up=angle.new_tensor(up), + ) + traj = fit.generated_points + + # scalethe trajectory + _t_mu = traj.mean(dim=0, keepdim=True) + traj = (traj - _t_mu) * trajectory_scale + _t_mu + + plane_normal = fit.normal + + if infer_up_as_plane_normal: + up = _disambiguate_normal(plane_normal, up) + + else: + raise ValueError(f"Uknown trajectory_type {trajectory_type}.") + + if traj_offset is not None: + traj = traj + torch.FloatTensor(traj_offset)[None].to(traj) + + # point all cameras towards the center of the scene + R, T = look_at_view_transform( + eye=traj, + at=(scene_center,), # (1, 3) + up=(up,), # (1, 3) + device=traj.device, + ) + + # get the average focal length and principal point + if focal_length is None: + focal_length = train_cameras.focal_length.mean(dim=0).repeat(n_eval_cams, 1) + if principal_point is None: + principal_point = train_cameras.principal_point.mean(dim=0).repeat( + n_eval_cams, 1 + ) + + test_cameras = PerspectiveCameras( + focal_length=focal_length, + principal_point=principal_point, + R=R, + T=T, + device=focal_length.device, + ) + + # _visdom_plot_scene( + # train_cameras, + # test_cameras, + # ) + + return test_cameras + + +def _remove_outlier_cameras( + cameras: PerspectiveCameras, outlier_rate: float +) -> PerspectiveCameras: + keep_indices = utils.get_inlier_indicators( + cameras.get_camera_center(), dim=0, outlier_rate=outlier_rate + ) + # pyre-fixme[6]: For 1st param expected `Union[List[int], int, BoolTensor, + # LongTensor]` but got `Tensor`. + clean_cameras = cameras[keep_indices] + logger.info( + "Filtered outlier cameras when estimating the trajectory: " + f"{len(cameras)} → {len(clean_cameras)}" + ) + # pyre-fixme[7]: Expected `PerspectiveCameras` but got `CamerasBase`. + return clean_cameras + + +def _disambiguate_normal(normal, up): + up_t = torch.tensor(up).to(normal) + flip = (up_t * normal).sum().sign() + up = normal * flip + up = up.tolist() + return up + + +def _fit_plane(x): + x = x - x.mean(dim=0)[None] + cov = (x.t() @ x) / x.shape[0] + _, e_vec = torch.linalg.eigh(cov) + return e_vec + + +def _visdom_plot_scene( + train_cameras, + test_cameras, +) -> None: + from pytorch3d.vis.plotly_vis import plot_scene + + p = plot_scene( + { + "scene": { + "train_cams": train_cameras, + "test_cams": test_cameras, + } + } + ) + from visdom import Visdom + + viz = Visdom() + viz.plotlyplot(p, env="cam_traj_dbg", win="cam_trajs") + + +def _figure_eight_knot(t: torch.Tensor, z_scale: float = 0.5): + x = (2 + (2 * t).cos()) * (3 * t).cos() + y = (2 + (2 * t).cos()) * (3 * t).sin() + z = (4 * t).sin() * z_scale + return torch.stack((x, y, z), dim=-1) + + +def _trefoil_knot(t: torch.Tensor, z_scale: float = 0.5): + x = t.sin() + 2 * (2 * t).sin() + y = t.cos() - 2 * (2 * t).cos() + z = -(3 * t).sin() * z_scale + return torch.stack((x, y, z), dim=-1) + + +def _figure_eight(t: torch.Tensor, z_scale: float = 0.5): + x = t.cos() + y = (2 * t).sin() / 2 + z = t.sin() * z_scale + return torch.stack((x, y, z), dim=-1) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/image_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/image_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7e8b3b04dbb9c1c9fd59d88cbdef0cf39623bbf2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/image_utils.py @@ -0,0 +1,57 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import Sequence, Union + +import torch + + +def mask_background( + image_rgb: torch.Tensor, + mask_fg: torch.Tensor, + dim_color: int = 1, + bg_color: Union[torch.Tensor, Sequence, str, float] = 0.0, +) -> torch.Tensor: + """ + Mask the background input image tensor `image_rgb` with `bg_color`. + The background regions are obtained from the binary foreground segmentation + mask `mask_fg`. + """ + tgt_view = [1, 1, 1, 1] + tgt_view[dim_color] = 3 + # obtain the background color tensor + if isinstance(bg_color, torch.Tensor): + bg_color_t = bg_color.view(1, 3, 1, 1).clone().to(image_rgb) + elif isinstance(bg_color, (float, tuple, list)): + if isinstance(bg_color, float): + bg_color = [bg_color] * 3 + bg_color_t = torch.tensor( + bg_color, device=image_rgb.device, dtype=image_rgb.dtype + ).view(*tgt_view) + elif isinstance(bg_color, str): + if bg_color == "white": + bg_color_t = image_rgb.new_ones(tgt_view) + elif bg_color == "black": + bg_color_t = image_rgb.new_zeros(tgt_view) + else: + raise ValueError(_invalid_color_error_msg(bg_color)) + else: + raise ValueError(_invalid_color_error_msg(bg_color)) + # cast to the image_rgb's type + mask_fg = mask_fg.type_as(image_rgb) + # mask the bg + image_masked = mask_fg * image_rgb + (1 - mask_fg) * bg_color_t + return image_masked + + +def _invalid_color_error_msg(bg_color) -> str: + return ( + f"Invalid bg_color={bg_color}. Plese set bg_color to a 3-element" + + " tensor. or a string (white | black), or a float." + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/metric_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/metric_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bdb8cfaf2a231b030228d3d4b525113c59ad0a7d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/metric_utils.py @@ -0,0 +1,264 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import logging +import math +from typing import Optional, Tuple + +import torch +from torch.nn import functional as F + +logger = logging.getLogger(__name__) + + +def eval_depth( + pred: torch.Tensor, + gt: torch.Tensor, + crop: int = 1, + mask: Optional[torch.Tensor] = None, + get_best_scale: bool = True, + mask_thr: float = 0.5, + best_scale_clamp_thr: float = 1e-4, + use_disparity: bool = False, + disparity_eps: float = 1e-4, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Evaluate the depth error between the prediction `pred` and the ground + truth `gt`. + + Args: + pred: A tensor of shape (N, 1, H, W) denoting the predicted depth maps. + gt: A tensor of shape (N, 1, H, W) denoting the ground truth depth maps. + crop: The number of pixels to crop from the border. + mask: A mask denoting the valid regions of the gt depth. + get_best_scale: If `True`, estimates a scaling factor of the predicted depth + that yields the best mean squared error between `pred` and `gt`. + This is typically enabled for cases where predicted reconstructions + are inherently defined up to an arbitrary scaling factor. + mask_thr: A constant used to threshold the `mask` to specify the valid + regions. + best_scale_clamp_thr: The threshold for clamping the divisor in best + scale estimation. + + Returns: + mse_depth: Mean squared error between `pred` and `gt`. + abs_depth: Mean absolute difference between `pred` and `gt`. + """ + + # chuck out the border + if crop > 0: + gt = gt[:, :, crop:-crop, crop:-crop] + pred = pred[:, :, crop:-crop, crop:-crop] + + if mask is not None: + # mult gt by mask + if crop > 0: + mask = mask[:, :, crop:-crop, crop:-crop] + gt = gt * (mask > mask_thr).float() + + dmask = (gt > 0.0).float() + dmask_mass = torch.clamp(dmask.sum((1, 2, 3)), 1e-4) + + if get_best_scale: + # mult preds by a scalar "scale_best" + # s.t. we get best possible mse error + scale_best = estimate_depth_scale_factor(pred, gt, dmask, best_scale_clamp_thr) + pred = pred * scale_best[:, None, None, None] + if use_disparity: + gt = torch.div(1.0, (gt + disparity_eps)) + pred = torch.div(1.0, (pred + disparity_eps)) + scale_best = estimate_depth_scale_factor( + pred, gt, dmask, best_scale_clamp_thr + ).detach() + pred = pred * scale_best[:, None, None, None] + + df = gt - pred + + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + mse_depth = (dmask * (df**2)).sum((1, 2, 3)) / dmask_mass + abs_depth = (dmask * df.abs()).sum((1, 2, 3)) / dmask_mass + + return mse_depth, abs_depth + + +def estimate_depth_scale_factor(pred, gt, mask, clamp_thr): + xy = pred * gt * mask + xx = pred * pred * mask + scale_best = xy.mean((1, 2, 3)) / torch.clamp(xx.mean((1, 2, 3)), clamp_thr) + return scale_best + + +def calc_psnr( + x: torch.Tensor, + y: torch.Tensor, + mask: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Calculates the Peak-signal-to-noise ratio between tensors `x` and `y`. + """ + mse = calc_mse(x, y, mask=mask) + psnr = torch.log10(mse.clamp(1e-10)) * (-10.0) + return psnr + + +def calc_mse( + x: torch.Tensor, + y: torch.Tensor, + mask: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + Calculates the mean square error between tensors `x` and `y`. + """ + if mask is None: + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + return torch.mean((x - y) ** 2) + else: + # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`. + return (((x - y) ** 2) * mask).sum() / mask.expand_as(x).sum().clamp(1e-5) + + +def calc_bce( + pred: torch.Tensor, + gt: torch.Tensor, + equal_w: bool = True, + pred_eps: float = 0.01, + mask: Optional[torch.Tensor] = None, + lerp_bound: Optional[float] = None, + pred_logits: bool = False, +) -> torch.Tensor: + """ + Calculates the binary cross entropy. + """ + if pred_eps > 0.0: + # up/low bound the predictions + pred = torch.clamp(pred, pred_eps, 1.0 - pred_eps) + + if mask is None: + mask = torch.ones_like(gt) + + if equal_w: + mask_fg = (gt > 0.5).float() * mask + mask_bg = (1 - mask_fg) * mask + weight = mask_fg / mask_fg.sum().clamp(1.0) + mask_bg / mask_bg.sum().clamp(1.0) + # weight sum should be at this point ~2 + # pyre-fixme[58]: `/` is not supported for operand types `int` and `Tensor`. + weight = weight * (weight.numel() / weight.sum().clamp(1.0)) + else: + weight = torch.ones_like(gt) * mask + + if lerp_bound is not None: + # binary_cross_entropy_lerp requires pred to be in [0, 1] + if pred_logits: + pred = F.sigmoid(pred) + + return binary_cross_entropy_lerp(pred, gt, weight, lerp_bound) + else: + if pred_logits: + loss = F.binary_cross_entropy_with_logits( + pred, + gt, + reduction="none", + weight=weight, + ) + else: + loss = F.binary_cross_entropy(pred, gt, reduction="none", weight=weight) + + return loss.mean() + + +def binary_cross_entropy_lerp( + pred: torch.Tensor, + gt: torch.Tensor, + weight: torch.Tensor, + lerp_bound: float, +): + """ + Binary cross entropy which avoids exploding gradients by linearly + extrapolating the log function for log(1-pred) mad log(pred) whenever + pred or 1-pred is smaller than lerp_bound. + """ + loss = log_lerp(1 - pred, lerp_bound) * (1 - gt) + log_lerp(pred, lerp_bound) * gt + loss_reduced = -(loss * weight).sum() / weight.sum().clamp(1e-4) + return loss_reduced + + +def log_lerp(x: torch.Tensor, b: float): + """ + Linearly extrapolated log for x < b. + """ + assert b > 0 + return torch.where(x >= b, x.log(), math.log(b) + (x - b) / b) + + +def rgb_l1( + pred: torch.Tensor, target: torch.Tensor, mask: Optional[torch.Tensor] = None +) -> torch.Tensor: + """ + Calculates the mean absolute error between the predicted colors `pred` + and ground truth colors `target`. + """ + if mask is None: + mask = torch.ones_like(pred[:, :1]) + return ((pred - target).abs() * mask).sum(dim=(1, 2, 3)) / mask.sum( + dim=(1, 2, 3) + ).clamp(1) + + +def huber(dfsq: torch.Tensor, scaling: float = 0.03) -> torch.Tensor: + """ + Calculates the huber function of the input squared error `dfsq`. + The function smoothly transitions from a region with unit gradient + to a hyperbolic function at `dfsq=scaling`. + """ + loss = (safe_sqrt(1 + dfsq / (scaling * scaling), eps=1e-4) - 1) * scaling + return loss + + +def neg_iou_loss( + predict: torch.Tensor, + target: torch.Tensor, + mask: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + This is a great loss because it emphasizes on the active + regions of the predict and targets + """ + return 1.0 - iou(predict, target, mask=mask) + + +def safe_sqrt(A: torch.Tensor, eps: float = 1e-4) -> torch.Tensor: + """ + performs safe differentiable sqrt + """ + return (torch.clamp(A, float(0)) + eps).sqrt() + + +def iou( + predict: torch.Tensor, + target: torch.Tensor, + mask: Optional[torch.Tensor] = None, +) -> torch.Tensor: + """ + This is a great loss because it emphasizes on the active + regions of the predict and targets + """ + dims = tuple(range(predict.dim())[1:]) + if mask is not None: + predict = predict * mask + target = target * mask + intersect = (predict * target).sum(dims) + union = (predict + target - predict * target).sum(dims) + 1e-4 + return (intersect / union).sum() / intersect.numel() + + +def beta_prior(pred: torch.Tensor, cap: float = 0.1) -> torch.Tensor: + if cap <= 0.0: + raise ValueError("capping should be positive to avoid unbound loss") + + min_value = math.log(cap) + math.log(cap + 1.0) + return (torch.log(pred + cap) + torch.log(1.0 - pred + cap)).mean() - min_value diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/model_io.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/model_io.py new file mode 100644 index 0000000000000000000000000000000000000000..d7942d0c9aacad995c6bcebdf7c0b13a0e925a45 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/model_io.py @@ -0,0 +1,175 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import glob +import logging +import os +import shutil +import tempfile +from typing import Optional + +import torch + + +logger = logging.getLogger(__name__) + + +def load_stats(flstats): + from pytorch3d.implicitron.tools.stats import Stats + + if not os.path.isfile(flstats): + return None + + return Stats.load(flstats) + + +def get_model_path(fl) -> str: + fl = os.path.splitext(fl)[0] + flmodel = "%s.pth" % fl + return flmodel + + +def get_optimizer_path(fl) -> str: + fl = os.path.splitext(fl)[0] + flopt = "%s_opt.pth" % fl + return flopt + + +def get_stats_path(fl, eval_results: bool = False) -> str: + fl = os.path.splitext(fl)[0] + if eval_results: + for postfix in ("_2", ""): + flstats = os.path.join(os.path.dirname(fl), f"stats_test{postfix}.jgz") + if os.path.isfile(flstats): + break + else: + flstats = "%s_stats.jgz" % fl + # pyre-fixme[61]: `flstats` is undefined, or not always defined. + return flstats + + +def safe_save_model(model, stats, fl, optimizer=None, cfg=None) -> None: + """ + This functions stores model files safely so that no model files exist on the + file system in case the saving procedure gets interrupted. + + This is done first by saving the model files to a temporary directory followed + by (atomic) moves to the target location. Note, that this can still result + in a corrupt set of model files in case interruption happens while performing + the moves. It is however quite improbable that a crash would occur right at + this time. + """ + logger.info(f"saving model files safely to {fl}") + # first store everything to a tmpdir + with tempfile.TemporaryDirectory() as tmpdir: + tmpfl = os.path.join(tmpdir, os.path.split(fl)[-1]) + stored_tmp_fls = save_model(model, stats, tmpfl, optimizer=optimizer, cfg=cfg) + tgt_fls = [ + ( + os.path.join(os.path.split(fl)[0], os.path.split(tmpfl)[-1]) + if (tmpfl is not None) + else None + ) + for tmpfl in stored_tmp_fls + ] + # then move from the tmpdir to the right location + for tmpfl, tgt_fl in zip(stored_tmp_fls, tgt_fls): + if tgt_fl is None: + continue + shutil.move(tmpfl, tgt_fl) + + +def save_model(model, stats, fl, optimizer=None, cfg=None): + flstats = get_stats_path(fl) + flmodel = get_model_path(fl) + logger.info("saving model to %s" % flmodel) + torch.save(model.state_dict(), flmodel) + flopt = None + if optimizer is not None: + flopt = get_optimizer_path(fl) + logger.info("saving optimizer to %s" % flopt) + torch.save(optimizer.state_dict(), flopt) + logger.info("saving model stats to %s" % flstats) + stats.save(flstats) + + return flstats, flmodel, flopt + + +def save_stats(stats, fl, cfg=None): + flstats = get_stats_path(fl) + logger.info("saving model stats to %s" % flstats) + stats.save(flstats) + return flstats + + +def load_model(fl, map_location: Optional[dict]): + flstats = get_stats_path(fl) + flmodel = get_model_path(fl) + flopt = get_optimizer_path(fl) + model_state_dict = torch.load(flmodel, map_location=map_location, weights_only=True) + stats = load_stats(flstats) + if os.path.isfile(flopt): + optimizer = torch.load(flopt, map_location=map_location, weights_only=True) + else: + optimizer = None + + return model_state_dict, stats, optimizer + + +def parse_epoch_from_model_path(model_path) -> int: + return int( + os.path.split(model_path)[-1].replace(".pth", "").replace("model_epoch_", "") + ) + + +def get_checkpoint(exp_dir, epoch): + fl = os.path.join(exp_dir, "model_epoch_%08d.pth" % epoch) + return fl + + +def find_last_checkpoint( + exp_dir, any_path: bool = False, all_checkpoints: bool = False +): + if any_path: + exts = [".pth", "_stats.jgz", "_opt.pth"] + else: + exts = [".pth"] + + for ext in exts: + fls = sorted( + glob.glob( + os.path.join(glob.escape(exp_dir), "model_epoch_" + "[0-9]" * 8 + ext) + ) + ) + if len(fls) > 0: + break + # pyre-fixme[61]: `fls` is undefined, or not always defined. + if len(fls) == 0: + fl = None + else: + if all_checkpoints: + # pyre-fixme[61]: `fls` is undefined, or not always defined. + fl = [f[0 : -len(ext)] + ".pth" for f in fls] + else: + # pyre-fixme[61]: `ext` is undefined, or not always defined. + fl = fls[-1][0 : -len(ext)] + ".pth" + + return fl + + +def purge_epoch(exp_dir, epoch) -> None: + model_path = get_checkpoint(exp_dir, epoch) + + for file_path in [ + model_path, + get_optimizer_path(model_path), + get_stats_path(model_path), + ]: + if os.path.isfile(file_path): + logger.info("deleting %s" % file_path) + os.remove(file_path) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/point_cloud_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/point_cloud_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..de124cf2b6354846e483924c6ec718926f9cd90a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/point_cloud_utils.py @@ -0,0 +1,195 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import cast, Optional, Tuple + +import torch +import torch.nn.functional as Fu +from pytorch3d.renderer import ( + AlphaCompositor, + NDCMultinomialRaysampler, + PointsRasterizationSettings, + PointsRasterizer, + ray_bundle_to_ray_points, +) +from pytorch3d.renderer.cameras import CamerasBase +from pytorch3d.structures import Pointclouds + + +def get_rgbd_point_cloud( + camera: CamerasBase, + image_rgb: torch.Tensor, + depth_map: torch.Tensor, + mask: Optional[torch.Tensor] = None, + mask_thr: float = 0.5, + *, + euclidean: bool = False, +) -> Pointclouds: + """ + Given a batch of images, depths, masks and cameras, generate a single colored + point cloud by unprojecting depth maps and coloring with the source + pixel colors. + + Arguments: + camera: Batch of N cameras + image_rgb: Batch of N images of shape (N, C, H, W). + For RGB images C=3. + depth_map: Batch of N depth maps of shape (N, 1, H', W'). + Only positive values here are used to generate points. + If euclidean=False (default) this contains perpendicular distances + from each point to the camera plane (z-values). + If euclidean=True, this contains distances from each point to + the camera center. + mask: If provided, batch of N masks of the same shape as depth_map. + If provided, values in depth_map are ignored if the corresponding + element of mask is smaller than mask_thr. + mask_thr: used in interpreting mask + euclidean: used in interpreting depth_map. + + Returns: + Pointclouds object containing one point cloud. + """ + imh, imw = depth_map.shape[2:] + + # convert the depth maps to point clouds using the grid ray sampler + pts_3d = ray_bundle_to_ray_points( + NDCMultinomialRaysampler( + image_width=imw, + image_height=imh, + n_pts_per_ray=1, + min_depth=1.0, + max_depth=1.0, + unit_directions=euclidean, + )(camera)._replace(lengths=depth_map[:, 0, ..., None]) + ) + + pts_mask = depth_map > 0.0 + if mask is not None: + pts_mask *= mask > mask_thr + pts_mask = pts_mask.reshape(-1) + + pts_3d = pts_3d.reshape(-1, 3)[pts_mask] + + pts_colors = torch.nn.functional.interpolate( + image_rgb, + size=[imh, imw], + mode="bilinear", + align_corners=False, + ) + pts_colors = pts_colors.permute(0, 2, 3, 1).reshape(-1, image_rgb.shape[1])[ + pts_mask + ] + + return Pointclouds(points=pts_3d[None], features=pts_colors[None]) + + +def render_point_cloud_pytorch3d( + camera, + point_cloud, + render_size: Tuple[int, int], + point_radius: float = 0.03, + topk: int = 10, + eps: float = 1e-2, + bg_color=None, + bin_size: Optional[int] = None, + **kwargs, +): + # feature dimension + featdim = point_cloud.features_packed().shape[-1] + + # move to the camera coordinates; using identity cameras in the renderer + point_cloud = _transform_points(camera, point_cloud, eps, **kwargs) + camera_trivial = camera.clone() + camera_trivial.R[:] = torch.eye(3) + camera_trivial.T *= 0.0 + + bin_size = ( + bin_size + if bin_size is not None + else (64 if int(max(render_size)) > 1024 else None) + ) + rasterizer = PointsRasterizer( + cameras=camera_trivial, + raster_settings=PointsRasterizationSettings( + image_size=render_size, + radius=point_radius, + points_per_pixel=topk, + bin_size=bin_size, + ), + ) + + fragments = rasterizer(point_cloud, **kwargs) + + # Construct weights based on the distance of a point to the true point. + # However, this could be done differently: e.g. predicted as opposed + # to a function of the weights. + r = rasterizer.raster_settings.radius + + # set up the blending weights + dists2 = fragments.dists + weights = 1 - dists2 / (r * r) + ok = cast(torch.BoolTensor, (fragments.idx >= 0)).float() + + weights = weights * ok + + fragments_prm = fragments.idx.long().permute(0, 3, 1, 2) + weights_prm = weights.permute(0, 3, 1, 2) + images = AlphaCompositor()( + fragments_prm, + weights_prm, + point_cloud.features_packed().permute(1, 0), + background_color=bg_color if bg_color is not None else [0.0] * featdim, + **kwargs, + ) + + # get the depths ... + # weighted_fs[b,c,i,j] = sum_k cum_alpha_k * features[c,pointsidx[b,k,i,j]] + # cum_alpha_k = alphas[b,k,i,j] * prod_l=0..k-1 (1 - alphas[b,l,i,j]) + cumprod = torch.cumprod(1 - weights, dim=-1) + cumprod = torch.cat((torch.ones_like(cumprod[..., :1]), cumprod[..., :-1]), dim=-1) + depths = (weights * cumprod * fragments.zbuf).sum(dim=-1) + # add the rendering mask + render_mask = -torch.prod(1.0 - weights, dim=-1) + 1.0 + + # cat depths and render mask + rendered_blob = torch.cat((images, depths[:, None], render_mask[:, None]), dim=1) + + # reshape back + rendered_blob = Fu.interpolate( + rendered_blob, + size=tuple(render_size), + mode="bilinear", + align_corners=False, + ) + + data_rendered, depth_rendered, render_mask = rendered_blob.split( + [rendered_blob.shape[1] - 2, 1, 1], + dim=1, + ) + + return data_rendered, render_mask, depth_rendered + + +def _signed_clamp(x, eps): + sign = x.sign() + (x == 0.0).type_as(x) + x_clamp = sign * torch.clamp(x.abs(), eps) + return x_clamp + + +def _transform_points(cameras, point_clouds, eps, **kwargs): + pts_world = point_clouds.points_padded() + pts_view = cameras.get_world_to_view_transform(**kwargs).transform_points( + pts_world, eps=eps + ) + # it is crucial to actually clamp the points as well ... + pts_view = torch.cat( + (pts_view[..., :-1], _signed_clamp(pts_view[..., -1:], eps)), dim=-1 + ) + point_clouds = point_clouds.update_padded(pts_view) + return point_clouds diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/rasterize_mc.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/rasterize_mc.py new file mode 100644 index 0000000000000000000000000000000000000000..9615987eeda913011f06c4a21a2d1e7f24395998 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/rasterize_mc.py @@ -0,0 +1,147 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import math +from typing import Optional, Tuple + +import pytorch3d + +import torch +from pytorch3d.ops import packed_to_padded +from pytorch3d.renderer import PerspectiveCameras +from pytorch3d.structures import Pointclouds + +from .point_cloud_utils import render_point_cloud_pytorch3d + + +@torch.no_grad() +def rasterize_sparse_ray_bundle( + ray_bundle: "pytorch3d.implicitron.models.renderer.base.ImplicitronRayBundle", + features: torch.Tensor, + image_size_hw: Tuple[int, int], + depth: torch.Tensor, + masks: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Rasterizes sparse features corresponding to the coordinates defined by + the rays in the bundle. + + Args: + ray_bundle: ray bundle object with B x ... x 2 pixel coordinates, + it can be packed. + features: B x ... x C tensor containing per-point rendered features. + image_size_hw: Tuple[image_height, image_width] containing + the size of rasterized image. + depth: B x ... x 1 tensor containing per-point rendered depth. + masks: B x ... x 1 tensor containing the alpha mask of the + rendered features. + + Returns: + - image_render: B x C x H x W tensor of rasterized features + - depths_render: B x 1 x H x W tensor of rasterized depth maps + - masks_render: B x 1 x H x W tensor of opacities after splatting + """ + # Flatten the features and xy locations. + features_depth_ras = torch.cat( + (features.flatten(1, -2), depth.flatten(1, -2)), dim=-1 + ) + xys = ray_bundle.xys + masks_ras = None + if ray_bundle.is_packed(): + camera_counts = ray_bundle.camera_counts + assert camera_counts is not None + xys, first_idxs, _ = ray_bundle.get_padded_xys() + masks_ras = ( + torch.arange(xys.shape[1], device=xys.device)[:, None] + < camera_counts[:, None, None] + ) + + max_size = torch.max(camera_counts).item() + features_depth_ras = packed_to_padded( + features_depth_ras[:, 0], first_idxs, max_size + ) + if masks is not None: + padded_mask = packed_to_padded(masks.flatten(1, -1), first_idxs, max_size) + masks_ras = padded_mask * masks_ras + + xys_ras = xys.flatten(1, -2) + + if masks_ras is None: + assert not ray_bundle.is_packed() + masks_ras = masks.flatten(1, -2) if masks is not None else None + + if min(*image_size_hw) <= 0: + raise ValueError( + "Need to specify a positive output_size_hw for bundle rasterisation." + ) + + # Estimate the rasterization point radius so that we approximately fill + # the whole image given the number of rasterized points. + pt_radius = 2.0 / math.sqrt(xys.shape[1]) + + # Rasterize the samples. + features_depth_render, masks_render = rasterize_mc_samples( + xys_ras, + features_depth_ras, + image_size_hw, + radius=pt_radius, + masks=masks_ras, + ) + images_render = features_depth_render[:, :-1] + depths_render = features_depth_render[:, -1:] + return images_render, depths_render, masks_render + + +def rasterize_mc_samples( + xys: torch.Tensor, + feats: torch.Tensor, + image_size_hw: Tuple[int, int], + radius: float = 0.03, + topk: int = 5, + masks: Optional[torch.Tensor] = None, +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Rasterizes Monte-Carlo sampled features back onto the image. + + Specifically, the code uses the PyTorch3D point rasterizer to render + a z-flat point cloud composed of the xy MC locations and their features. + + Args: + xys: B x N x 2 2D point locations in PyTorch3D NDC convention + feats: B x N x dim tensor containing per-point rendered features. + image_size_hw: Tuple[image_height, image_width] containing + the size of rasterized image. + radius: Rasterization point radius. + topk: The maximum z-buffer size for the PyTorch3D point cloud rasterizer. + masks: B x N x 1 tensor containing the alpha mask of the + rendered features. + """ + + if masks is None: + masks = torch.ones_like(xys[..., :1]) + + feats = torch.cat((feats, masks), dim=-1) + pointclouds = Pointclouds( + points=torch.cat([xys, torch.ones_like(xys[..., :1])], dim=-1), + features=feats, + ) + + data_rendered, render_mask, _ = render_point_cloud_pytorch3d( + PerspectiveCameras(device=feats.device), + pointclouds, + render_size=image_size_hw, + point_radius=radius, + topk=topk, + ) + + data_rendered, masks_pt = data_rendered.split( + [data_rendered.shape[1] - 1, 1], dim=1 + ) + render_mask = masks_pt * render_mask + + return data_rendered, render_mask diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/stats.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/stats.py new file mode 100644 index 0000000000000000000000000000000000000000..06ca09a51cffa16b6d22a916fdff68db381ed66d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/stats.py @@ -0,0 +1,507 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import gzip +import json +import logging +import time +import warnings +from collections.abc import Iterable +from itertools import cycle + +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +from matplotlib import colors as mcolors +from pytorch3d.implicitron.tools.vis_utils import get_visdom_connection + +logger = logging.getLogger(__name__) + + +class AverageMeter: + """Computes and stores the average and current value""" + + def __init__(self): + self.history = [] + self.reset() + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1, epoch=0): + # make sure the history is of the same len as epoch + while len(self.history) <= epoch: + self.history.append([]) + + self.history[epoch].append(val / n) + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count + + def get_epoch_averages(self, epoch=-1): + if len(self.history) == 0: # no stats here + return None + elif epoch == -1: + return [ + (float(np.array(x).mean()) if len(x) > 0 else float("NaN")) + for x in self.history + ] + else: + return float(np.array(self.history[epoch]).mean()) + + def get_all_values(self): + all_vals = [np.array(x) for x in self.history] + all_vals = np.concatenate(all_vals) + return all_vals + + def get_epoch(self): + return len(self.history) + + @staticmethod + def from_json_str(json_str): + self = AverageMeter() + self.__dict__.update(json.loads(json_str)) + return self + + +class Stats: + # TODO: update this with context manager + """ + stats logging object useful for gathering statistics of training a deep net in pytorch + Example:: + + # init stats structure that logs statistics 'objective' and 'top1e' + stats = Stats( ('objective','top1e') ) + network = init_net() # init a pytorch module (=nueral network) + dataloader = init_dataloader() # init a dataloader + for epoch in range(10): + # start of epoch -> call new_epoch + stats.new_epoch() + + # iterate over batches + for batch in dataloader: + + output = network(batch) # run and save into a dict of output variables + + # stats.update() automatically parses the 'objective' and 'top1e' from + # the "output" dict and stores this into the db + stats.update(output) + # prints the metric averages over given epoch + std_out = stats.get_status_string() + logger.info(str_out) + # stores the training plots into '/tmp/epoch_stats.pdf' + # and plots into a visdom server running at localhost (if running) + stats.plot_stats(plot_file='/tmp/epoch_stats.pdf') + + """ + + def __init__( + self, + log_vars, + epoch=-1, + visdom_env="main", + do_plot=True, + plot_file=None, + visdom_server="http://localhost", + visdom_port=8097, + ): + self.log_vars = log_vars + self.visdom_env = visdom_env + self.visdom_server = visdom_server + self.visdom_port = visdom_port + self.plot_file = plot_file + self.do_plot = do_plot + self.hard_reset(epoch=epoch) + self._t_last_update = None + + @staticmethod + def from_json_str(json_str): + self = Stats([]) + # load the global state + self.__dict__.update(json.loads(json_str)) + # recover the AverageMeters + for stat_set in self.stats: + self.stats[stat_set] = { + log_var: AverageMeter.from_json_str(log_vals_json_str) + for log_var, log_vals_json_str in self.stats[stat_set].items() + } + return self + + @staticmethod + def load(flpath, postfix=".jgz"): + flpath = _get_postfixed_filename(flpath, postfix) + with gzip.open(flpath, "r") as fin: + data = json.loads(fin.read().decode("utf-8")) + return Stats.from_json_str(data) + + def save(self, flpath, postfix=".jgz"): + flpath = _get_postfixed_filename(flpath, postfix) + # store into a gzipped-json + with gzip.open(flpath, "w") as fout: + fout.write(json.dumps(self, cls=StatsJSONEncoder).encode("utf-8")) + + # some sugar to be used with "with stats:" at the beginning of the epoch + def __enter__(self): + if self.do_plot and self.epoch >= 0: + self.plot_stats(self.visdom_env) + self.new_epoch() + + def __exit__(self, type, value, traceback): + iserr = type is not None and issubclass(type, Exception) + iserr = iserr or (type is KeyboardInterrupt) + if iserr: + logger.error("error inside 'with' block") + return + if self.do_plot: + self.plot_stats(self.visdom_env) + + def reset(self): # to be called after each epoch + stat_sets = list(self.stats.keys()) + logger.debug(f"stats: epoch {self.epoch} - reset") + self.it = {k: -1 for k in stat_sets} + for stat_set in stat_sets: + for stat in self.stats[stat_set]: + self.stats[stat_set][stat].reset() + + def hard_reset(self, epoch=-1): # to be called during object __init__ + self.epoch = epoch + logger.debug(f"stats: epoch {self.epoch} - hard reset") + self.stats = {} + + # reset + self.reset() + + def new_epoch(self): + logger.debug(f"stats: new epoch {(self.epoch + 1)}") + self.epoch += 1 + self.reset() # zero the stats + increase epoch counter + + def gather_value(self, val): + if isinstance(val, (float, int)): + val = float(val) + else: + val = val.data.cpu().numpy() + val = float(val.sum()) + return val + + def add_log_vars(self, added_log_vars): + for add_log_var in added_log_vars: + if add_log_var not in self.stats: + logger.debug(f"Adding {add_log_var}") + self.log_vars.append(add_log_var) + + def update(self, preds, time_start=None, freeze_iter=False, stat_set="train"): + if self.epoch == -1: # uninitialized + logger.warning( + "epoch==-1 means uninitialized stats structure -> new_epoch() called" + ) + self.new_epoch() + + if stat_set not in self.stats: + self.stats[stat_set] = {} + self.it[stat_set] = -1 + + if not freeze_iter: + self.it[stat_set] += 1 + + epoch = self.epoch + + for stat in self.log_vars: + if stat not in self.stats[stat_set]: + self.stats[stat_set][stat] = AverageMeter() + + if stat == "sec/it": # compute speed + if time_start is None: + time_per_it = 0.0 + else: + now = time.time() + time_per_it = now - (self._t_last_update or time_start) + self._t_last_update = now + val = time_per_it + else: + if stat in preds: + try: + val = self.gather_value(preds[stat]) + except KeyError: + raise ValueError( + "could not extract prediction %s\ + from the prediction dictionary" + % stat + ) from None + else: + val = None + + if val is not None: + self.stats[stat_set][stat].update(val, epoch=epoch, n=1) + + def get_epoch_averages(self, epoch=None): + stat_sets = list(self.stats.keys()) + + if epoch is None: + epoch = self.epoch + if epoch == -1: + epoch = list(range(self.epoch)) + + outvals = {} + for stat_set in stat_sets: + outvals[stat_set] = { + "epoch": epoch, + "it": self.it[stat_set], + "epoch_max": self.epoch, + } + + for stat in self.stats[stat_set].keys(): + if self.stats[stat_set][stat].count == 0: + continue + if isinstance(epoch, Iterable): + avgs = self.stats[stat_set][stat].get_epoch_averages() + avgs = [avgs[e] for e in epoch] + else: + avgs = self.stats[stat_set][stat].get_epoch_averages(epoch=epoch) + outvals[stat_set][stat] = avgs + + return outvals + + def print( + self, + max_it=None, + stat_set="train", + vars_print=None, + get_str=False, + skip_nan=False, + stat_format=lambda s: s.replace("loss_", "").replace("prev_stage_", "ps_"), + ): + """ + stats.print() is deprecated. Please use get_status_string() instead. + example: + std_out = stats.get_status_string() + logger.info(str_out) + """ + + epoch = self.epoch + stats = self.stats + + str_out = "" + + it = self.it[stat_set] + stat_str = "" + stats_print = sorted(stats[stat_set].keys()) + for stat in stats_print: + if stats[stat_set][stat].count == 0: + continue + if skip_nan and not np.isfinite(stats[stat_set][stat].avg): + continue + stat_str += " {0:.12}: {1:1.3f} |".format( + stat_format(stat), stats[stat_set][stat].avg + ) + + head_str = "[%s] | epoch %3d | it %5d" % (stat_set, epoch, it) + if max_it: + head_str += "/ %d" % max_it + + str_out = "%s | %s" % (head_str, stat_str) + + if get_str: + return str_out + else: + warnings.warn( + "get_str=False is deprecated." + "Please enable this flag to get receive the output string.", + DeprecationWarning, + ) + print(str_out) + + def get_status_string( + self, + max_it=None, + stat_set="train", + vars_print=None, + skip_nan=False, + stat_format=lambda s: s.replace("loss_", "").replace("prev_stage_", "ps_"), + ): + return self.print( + max_it=max_it, + stat_set=stat_set, + vars_print=vars_print, + get_str=True, + skip_nan=skip_nan, + stat_format=stat_format, + ) + + def plot_stats( + self, visdom_env=None, plot_file=None, visdom_server=None, visdom_port=None + ): + # use the cached visdom env if none supplied + if visdom_env is None: + visdom_env = self.visdom_env + if visdom_server is None: + visdom_server = self.visdom_server + if visdom_port is None: + visdom_port = self.visdom_port + if plot_file is None: + plot_file = self.plot_file + + stat_sets = list(self.stats.keys()) + + logger.debug( + f"printing charts to visdom env '{visdom_env}' ({visdom_server}:{visdom_port})" + ) + + novisdom = False + + viz = get_visdom_connection(server=visdom_server, port=visdom_port) + if viz is None or not viz.check_connection(): + logger.info("no visdom server! -> skipping visdom plots") + novisdom = True + + lines = [] + + # plot metrics + if not novisdom: + viz.close(env=visdom_env, win=None) + + for stat in self.log_vars: + vals = [] + stat_sets_now = [] + for stat_set in stat_sets: + val = self.stats[stat_set][stat].get_epoch_averages() + if val is None: + continue + else: + val = np.array(val).reshape(-1) + stat_sets_now.append(stat_set) + vals.append(val) + + if len(vals) == 0: + continue + + lines.append((stat_sets_now, stat, vals)) + + if not novisdom: + for tmodes, stat, vals in lines: + title = "%s" % stat + opts = {"title": title, "legend": list(tmodes)} + for i, (tmode, val) in enumerate(zip(tmodes, vals)): + update = "append" if i > 0 else None + valid = np.where(np.isfinite(val))[0] + if len(valid) == 0: + continue + x = np.arange(len(val)) + viz.line( + Y=val[valid], + X=x[valid], + env=visdom_env, + opts=opts, + win=f"stat_plot_{title}", + name=tmode, + update=update, + ) + + if plot_file: + logger.info(f"plotting stats to {plot_file}") + ncol = 3 + nrow = int(np.ceil(float(len(lines)) / ncol)) + matplotlib.rcParams.update({"font.size": 5}) + color = cycle(plt.cm.tab10(np.linspace(0, 1, 10))) + fig = plt.figure(1) + plt.clf() + for idx, (tmodes, stat, vals) in enumerate(lines): + c = next(color) + plt.subplot(nrow, ncol, idx + 1) + plt.gca() + for vali, vals_ in enumerate(vals): + c_ = c * (1.0 - float(vali) * 0.3) + valid = np.where(np.isfinite(vals_))[0] + if len(valid) == 0: + continue + x = np.arange(len(vals_)) + plt.plot(x[valid], vals_[valid], c=c_, linewidth=1) + plt.ylabel(stat) + plt.xlabel("epoch") + plt.gca().yaxis.label.set_color(c[0:3] * 0.75) + plt.legend(tmodes) + gcolor = np.array(mcolors.to_rgba("lightgray")) + grid_params = {"visible": True, "color": gcolor} + plt.grid(**grid_params, which="major", linestyle="-", linewidth=0.4) + plt.grid(**grid_params, which="minor", linestyle="--", linewidth=0.2) + plt.minorticks_on() + + plt.tight_layout() + plt.show() + try: + fig.savefig(plot_file) + except PermissionError: + warnings.warn("Cant dump stats due to insufficient permissions!") + + def synchronize_logged_vars(self, log_vars, default_val=float("NaN")): + stat_sets = list(self.stats.keys()) + + # remove the additional log_vars + for stat_set in stat_sets: + for stat in self.stats[stat_set].keys(): + if stat not in log_vars: + logger.warning(f"additional stat {stat_set}:{stat} -> removing") + + self.stats[stat_set] = { + stat: v for stat, v in self.stats[stat_set].items() if stat in log_vars + } + + self.log_vars = log_vars # !!! + + for stat_set in stat_sets: + for stat in log_vars: + if stat not in self.stats[stat_set]: + logger.info( + "missing stat %s:%s -> filling with default values (%1.2f)" + % (stat_set, stat, default_val) + ) + elif len(self.stats[stat_set][stat].history) != self.epoch + 1: + h = self.stats[stat_set][stat].history + if len(h) == 0: # just never updated stat ... skip + continue + else: + logger.info( + "incomplete stat %s:%s -> reseting with default values (%1.2f)" + % (stat_set, stat, default_val) + ) + else: + continue + + self.stats[stat_set][stat] = AverageMeter() + self.stats[stat_set][stat].reset() + + lastep = self.epoch + 1 + for ep in range(lastep): + self.stats[stat_set][stat].update(default_val, n=1, epoch=ep) + epoch_generated = self.stats[stat_set][stat].get_epoch() + assert epoch_generated == self.epoch + 1, ( + "bad epoch of synchronized log_var! %d vs %d" + % ( + self.epoch + 1, + epoch_generated, + ) + ) + + +class StatsJSONEncoder(json.JSONEncoder): + def default(self, o): + if isinstance(o, (AverageMeter, Stats)): + enc = self.encode(o.__dict__) + return enc + else: + raise TypeError( + f"Object of type {o.__class__.__name__} " f"is not JSON serializable" + ) + + +def _get_postfixed_filename(fl, postfix): + return fl if fl.endswith(postfix) else fl + postfix diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b556d1eba47489c2c1dfc0e3af56b570bf9a80b2 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/utils.py @@ -0,0 +1,207 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import collections +import dataclasses +import time +from contextlib import contextmanager +from typing import Any, Callable, Dict, Iterable, Iterator + +import torch + + +@contextmanager +def evaluating(net: torch.nn.Module): + """Temporarily switch to evaluation mode.""" + istrain = net.training + try: + net.eval() + yield net + finally: + if istrain: + net.train() + + +def try_to_cuda(t: Any) -> Any: + """ + Try to move the input variable `t` to a cuda device. + + Args: + t: Input. + + Returns: + t_cuda: `t` moved to a cuda device, if supported. + """ + try: + t = t.cuda() + except AttributeError: + pass + return t + + +def try_to_cpu(t: Any) -> Any: + """ + Try to move the input variable `t` to a cpu device. + + Args: + t: Input. + + Returns: + t_cpu: `t` moved to a cpu device, if supported. + """ + try: + t = t.cpu() + except AttributeError: + pass + return t + + +def dict_to_cuda(batch: Dict[Any, Any]) -> Dict[Any, Any]: + """ + Move all values in a dictionary to cuda if supported. + + Args: + batch: Input dict. + + Returns: + batch_cuda: `batch` moved to a cuda device, if supported. + """ + return {k: try_to_cuda(v) for k, v in batch.items()} + + +def dict_to_cpu(batch): + """ + Move all values in a dictionary to cpu if supported. + + Args: + batch: Input dict. + + Returns: + batch_cpu: `batch` moved to a cpu device, if supported. + """ + return {k: try_to_cpu(v) for k, v in batch.items()} + + +def dataclass_to_cuda_(obj): + """ + Move all contents of a dataclass to cuda inplace if supported. + + Args: + batch: Input dataclass. + + Returns: + batch_cuda: `batch` moved to a cuda device, if supported. + """ + for f in dataclasses.fields(obj): + setattr(obj, f.name, try_to_cuda(getattr(obj, f.name))) + return obj + + +def dataclass_to_cpu_(obj): + """ + Move all contents of a dataclass to cpu inplace if supported. + + Args: + batch: Input dataclass. + + Returns: + batch_cuda: `batch` moved to a cpu device, if supported. + """ + for f in dataclasses.fields(obj): + setattr(obj, f.name, try_to_cpu(getattr(obj, f.name))) + return obj + + +# TODO: test it +def cat_dataclass(batch, tensor_collator: Callable): + """ + Concatenate all fields of a list of dataclasses `batch` to a single + dataclass object using `tensor_collator`. + + Args: + batch: Input list of dataclasses. + + Returns: + concatenated_batch: All elements of `batch` concatenated to a single + dataclass object. + tensor_collator: The function used to concatenate tensor fields. + """ + + elem = batch[0] + collated = {} + + for f in dataclasses.fields(elem): + elem_f = getattr(elem, f.name) + if elem_f is None: + collated[f.name] = None + elif torch.is_tensor(elem_f): + collated[f.name] = tensor_collator([getattr(e, f.name) for e in batch]) + elif dataclasses.is_dataclass(elem_f): + collated[f.name] = cat_dataclass( + [getattr(e, f.name) for e in batch], tensor_collator + ) + elif isinstance(elem_f, collections.abc.Mapping): + collated[f.name] = { + k: ( + tensor_collator([getattr(e, f.name)[k] for e in batch]) + if elem_f[k] is not None + else None + ) + for k in elem_f + } + else: + raise ValueError("Unsupported field type for concatenation") + + return type(elem)(**collated) + + +def recursive_visitor(it: Iterable[Any]) -> Iterator[Any]: + for x in it: + if isinstance(x, Iterable) and not isinstance(x, (str, bytes)): + yield from recursive_visitor(x) + else: + yield x + + +def get_inlier_indicators( + tensor: torch.Tensor, dim: int, outlier_rate: float +) -> torch.Tensor: + remove_elements = int(min(outlier_rate, 1.0) * tensor.shape[dim] / 2) + hi = torch.topk(tensor, remove_elements, dim=dim).indices.tolist() + lo = torch.topk(-tensor, remove_elements, dim=dim).indices.tolist() + remove_indices = set(recursive_visitor([hi, lo])) + keep_indices = tensor.new_ones(tensor.shape[dim : dim + 1], dtype=torch.bool) + keep_indices[list(remove_indices)] = False + return keep_indices + + +class Timer: + """ + A simple class for timing execution. + + Example:: + + with Timer(): + print("This print statement is timed.") + + """ + + def __init__(self, name="timer", quiet=False): + self.name = name + self.quiet = quiet + + def __enter__(self): + self.start = time.time() + return self + + def __exit__(self, *args): + self.end = time.time() + self.interval = self.end - self.start + if not self.quiet: + print("%20s: %1.6f sec" % (self.name, self.interval)) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/video_writer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/video_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..4969466ae7f0cd553349029a12958b14b9c5abd9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/video_writer.py @@ -0,0 +1,215 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import os +import shutil +import subprocess +import tempfile +import warnings +from typing import Optional, Tuple, Union + +import matplotlib +import matplotlib.pyplot as plt +import numpy as np +import torch + +from PIL import Image + +_NO_TORCHVISION = False +try: + import torchvision +except ImportError: + _NO_TORCHVISION = True + + +_DEFAULT_FFMPEG = os.environ.get("FFMPEG", "ffmpeg") + +matplotlib.use("Agg") + + +class VideoWriter: + """ + A class for exporting videos. + """ + + def __init__( + self, + cache_dir: Optional[str] = None, + ffmpeg_bin: str = _DEFAULT_FFMPEG, + out_path: str = "/tmp/video.mp4", + fps: int = 20, + output_format: str = "visdom", + rmdir_allowed: bool = False, + use_torchvision_video_writer: bool = False, + **kwargs, + ) -> None: + """ + Args: + cache_dir: A directory for storing the video frames. If `None`, + a temporary directory will be used. + ffmpeg_bin: The path to an `ffmpeg` executable. + out_path: The path to the output video. + fps: The speed of the generated video in frames-per-second. + output_format: Format of the output video. Currently only `"visdom"` + is supported. + rmdir_allowed: If `True` delete and create `cache_dir` in case + it is not empty. + use_torchvision_video_writer: If `True` use `torchvision.io.write_video` + to write the video + """ + self.rmdir_allowed = rmdir_allowed + self.output_format = output_format + self.fps = fps + self.out_path = out_path + self.cache_dir = cache_dir + self.ffmpeg_bin = ffmpeg_bin + self.use_torchvision_video_writer = use_torchvision_video_writer + self.frames = [] + self.regexp = "frame_%08d.png" + self.frame_num = 0 + + if self.use_torchvision_video_writer: + assert not _NO_TORCHVISION, "torchvision not available" + + if self.cache_dir is not None: + self.tmp_dir = None + if os.path.isdir(self.cache_dir): + if rmdir_allowed: + shutil.rmtree(self.cache_dir) + else: + warnings.warn( + f"Warning: cache directory not empty ({self.cache_dir})." + ) + os.makedirs(self.cache_dir, exist_ok=True) + else: + self.tmp_dir = tempfile.TemporaryDirectory() + self.cache_dir = self.tmp_dir.name + + def write_frame( + self, + frame: Union[matplotlib.figure.Figure, np.ndarray, Image.Image, str], + resize: Optional[Union[float, Tuple[int, int]]] = None, + ) -> None: + """ + Write a frame to the video. + + Args: + frame: An object containing the frame image. + resize: Either a floating defining the image rescaling factor + or a 2-tuple defining the size of the output image. + """ + + # pyre-fixme[6]: For 1st argument expected `Union[PathLike[str], str]` but + # got `Optional[str]`. + outfile = os.path.join(self.cache_dir, self.regexp % self.frame_num) + + if isinstance(frame, matplotlib.figure.Figure): + plt.savefig(outfile) + im = Image.open(outfile) + elif isinstance(frame, np.ndarray): + if frame.dtype in (np.float64, np.float32, float): + frame = (np.transpose(frame, (1, 2, 0)) * 255.0).astype(np.uint8) + im = Image.fromarray(frame) + elif isinstance(frame, Image.Image): + im = frame + elif isinstance(frame, str): + im = Image.open(frame).convert("RGB") + else: + raise ValueError("Cant convert type %s" % str(type(frame))) + + if im is not None: + if resize is not None: + if isinstance(resize, float): + resize = [int(resize * s) for s in im.size] + else: + resize = im.size + # make sure size is divisible by 2 + resize = tuple([resize[i] + resize[i] % 2 for i in (0, 1)]) + + im = im.resize(resize, Image.ANTIALIAS) + im.save(outfile) + + self.frames.append(outfile) + self.frame_num += 1 + + def get_video(self, quiet: bool = True) -> str: + """ + Generate the video from the written frames. + + Args: + quiet: If `True`, suppresses logging messages. + + Returns: + video_path: The path to the generated video if any frames were added. + Otherwise returns an empty string. + """ + if self.frame_num == 0: + return "" + + # pyre-fixme[6]: For 1st argument expected `Union[PathLike[str], str]` but + # got `Optional[str]`. + regexp = os.path.join(self.cache_dir, self.regexp) + + if self.output_format == "visdom": # works for ppt too + # Video codec parameters + video_codec = "h264" + crf = "18" + b = "2000k" + pix_fmt = "yuv420p" + + if self.use_torchvision_video_writer: + torchvision.io.write_video( + self.out_path, + torch.stack( + [torch.from_numpy(np.array(Image.open(f))) for f in self.frames] + ), + fps=self.fps, + video_codec=video_codec, + options={"crf": crf, "b": b, "pix_fmt": pix_fmt}, + ) + + else: + if shutil.which(self.ffmpeg_bin) is None: + raise ValueError( + f"Cannot find ffmpeg as `{self.ffmpeg_bin}`. " + + "Please set FFMPEG in the environment or ffmpeg_bin on this class." + ) + + args = [ + self.ffmpeg_bin, + "-r", + str(self.fps), + "-i", + regexp, + "-vcodec", + video_codec, + "-f", + "mp4", + "-y", + "-crf", + crf, + "-b", + b, + "-pix_fmt", + pix_fmt, + self.out_path, + ] + if quiet: + subprocess.check_call( + args, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL + ) + else: + subprocess.check_call(args) + else: + raise ValueError("no such output type %s" % str(self.output_format)) + + return self.out_path + + def __del__(self) -> None: + if self.tmp_dir is not None: + self.tmp_dir.cleanup() diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/vis_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/vis_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..662c138ccbb1900084daa3be1cede76501ee1b2e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/implicitron/tools/vis_utils.py @@ -0,0 +1,189 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import logging +from typing import Any, Dict, Optional, Tuple, TYPE_CHECKING + +import torch + +if TYPE_CHECKING: + from visdom import Visdom + + +logger = logging.getLogger(__name__) + + +def get_visdom_env(visdom_env: str, exp_dir: str) -> str: + """ + Parse out visdom environment name from the input config. + + Args: + visdom_env: Name of the wisdom environment, could be empty string. + exp_dir: Root experiment directory. + + Returns: + visdom_env: The name of the visdom environment. If the given visdom_env is + empty, return the name of the bottom directory in exp_dir. + """ + if len(visdom_env) == 0: + visdom_env = exp_dir.split("/")[-1] + else: + visdom_env = visdom_env + return visdom_env + + +# TODO: a proper singleton +_viz_singleton = None + + +def get_visdom_connection( + server: str = "http://localhost", + port: int = 8097, +) -> Optional["Visdom"]: + """ + Obtain a connection to a visdom server if visdom is installed. + + Args: + server: Server address. + port: Server port. + + Returns: + connection: The connection object. + """ + try: + from visdom import Visdom + except ImportError: + logger.debug("Cannot load visdom") + return None + + if server == "None": + return None + + global _viz_singleton + if _viz_singleton is None: + _viz_singleton = Visdom(server=server, port=port) + return _viz_singleton + + +def visualize_basics( + viz: "Visdom", + preds: Dict[str, Any], + visdom_env_imgs: str, + title: str = "", + visualize_preds_keys: Tuple[str, ...] = ( + "image_rgb", + "images_render", + "fg_probability", + "masks_render", + "depths_render", + "depth_map", + ), + store_history: bool = False, +) -> None: + """ + Visualize basic outputs of a `GenericModel` to visdom. + + Args: + viz: The visdom object. + preds: A dictionary containing `GenericModel` outputs. + visdom_env_imgs: Target visdom environment name. + title: The title of produced visdom window. + visualize_preds_keys: The list of keys of `preds` for visualization. + store_history: Store the history buffer in visdom windows. + """ + imout = {} + for k in visualize_preds_keys: + if k not in preds or preds[k] is None: + logger.info(f"cant show {k}") + continue + v = preds[k].cpu().detach().clone() + if k.startswith("depth"): + # divide by 95th percentile + normfac = ( + v.view(v.shape[0], -1) + .topk(k=int(0.05 * (v.numel() // v.shape[0])), dim=-1) + .values[:, -1] + ) + v = v / normfac[:, None, None, None].clamp(1e-4) + if v.shape[1] == 1: + v = v.repeat(1, 3, 1, 1) + v = torch.nn.functional.interpolate( + v, + scale_factor=( + 600.0 + if ( + "_eval" in visdom_env_imgs + and k in ("images_render", "depths_render") + ) + else 200.0 + ) + / v.shape[2], + mode="bilinear", + ) + imout[k] = v + + # TODO: handle errors on the outside + try: + imout = {"all": torch.cat(list(imout.values()), dim=2)} + except RuntimeError as e: + print("cant cat!", e.args) + + for k, v in imout.items(): + viz.images( + v.clamp(0.0, 1.0), + win=k, + env=visdom_env_imgs, + opts={"title": title + "_" + k, "store_history": store_history}, + ) + + +def make_depth_image( + depths: torch.Tensor, + masks: torch.Tensor, + max_quantile: float = 0.98, + min_quantile: float = 0.02, + min_out_depth: float = 0.1, + max_out_depth: float = 0.9, +) -> torch.Tensor: + """ + Convert a batch of depth maps to a grayscale image. + + Args: + depths: A tensor of shape `(B, 1, H, W)` containing a batch of depth maps. + masks: A tensor of shape `(B, 1, H, W)` containing a batch of foreground masks. + max_quantile: The quantile of the input depth values which will + be mapped to `max_out_depth`. + min_quantile: The quantile of the input depth values which will + be mapped to `min_out_depth`. + min_out_depth: The minimal value in each depth map will be assigned this color. + max_out_depth: The maximal value in each depth map will be assigned this color. + + Returns: + depth_image: A tensor of shape `(B, 1, H, W)` a batch of grayscale + depth images. + """ + normfacs = [] + for d, m in zip(depths, masks): + ok = (d.view(-1) > 1e-6) * (m.view(-1) > 0.5) + if ok.sum() <= 1: + logger.info("empty depth!") + normfacs.append(torch.zeros(2).type_as(depths)) + continue + dok = d.view(-1)[ok].view(-1) + _maxk = max(int(round((1 - max_quantile) * (dok.numel()))), 1) + _mink = max(int(round(min_quantile * (dok.numel()))), 1) + normfac_max = dok.topk(k=_maxk, dim=-1).values[-1] + normfac_min = dok.topk(k=_mink, dim=-1, largest=False).values[-1] + normfacs.append(torch.stack([normfac_min, normfac_max])) + normfacs = torch.stack(normfacs) + _min, _max = (normfacs[:, 0].view(-1, 1, 1, 1), normfacs[:, 1].view(-1, 1, 1, 1)) + depths = (depths - _min) / (_max - _min).clamp(1e-4) + depths = ( + (depths * (max_out_depth - min_out_depth) + min_out_depth) * masks.float() + ).clamp(0.0, 1.0) + return depths diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..94b012c45290aa772c9905b4cc7d1c80d1e8736a --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/__init__.py @@ -0,0 +1,20 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from .chamfer import chamfer_distance + +from .mesh_edge_loss import mesh_edge_loss + +from .mesh_laplacian_smoothing import mesh_laplacian_smoothing + +from .mesh_normal_consistency import mesh_normal_consistency +from .point_mesh_distance import point_mesh_edge_distance, point_mesh_face_distance + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/chamfer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/chamfer.py new file mode 100644 index 0000000000000000000000000000000000000000..7d7876690301ef55f5d84632592f9c156e455015 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/chamfer.py @@ -0,0 +1,292 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Union + +import torch +import torch.nn.functional as F +from pytorch3d.ops.knn import knn_gather, knn_points +from pytorch3d.structures.pointclouds import Pointclouds + + +def _validate_chamfer_reduction_inputs( + batch_reduction: Union[str, None], point_reduction: Union[str, None] +) -> None: + """Check the requested reductions are valid. + + Args: + batch_reduction: Reduction operation to apply for the loss across the + batch, can be one of ["mean", "sum"] or None. + point_reduction: Reduction operation to apply for the loss across the + points, can be one of ["mean", "sum"] or None. + """ + if batch_reduction is not None and batch_reduction not in ["mean", "sum"]: + raise ValueError('batch_reduction must be one of ["mean", "sum"] or None') + if point_reduction is not None and point_reduction not in ["mean", "sum", "max"]: + raise ValueError( + 'point_reduction must be one of ["mean", "sum", "max"] or None' + ) + if point_reduction is None and batch_reduction is not None: + raise ValueError("Batch reduction must be None if point_reduction is None") + + +def _handle_pointcloud_input( + points: Union[torch.Tensor, Pointclouds], + lengths: Union[torch.Tensor, None], + normals: Union[torch.Tensor, None], +): + """ + If points is an instance of Pointclouds, retrieve the padded points tensor + along with the number of points per batch and the padded normals. + Otherwise, return the input points (and normals) with the number of points per cloud + set to the size of the second dimension of `points`. + """ + if isinstance(points, Pointclouds): + X = points.points_padded() + lengths = points.num_points_per_cloud() + normals = points.normals_padded() # either a tensor or None + elif torch.is_tensor(points): + if points.ndim != 3: + raise ValueError("Expected points to be of shape (N, P, D)") + X = points + if lengths is not None: + if lengths.ndim != 1 or lengths.shape[0] != X.shape[0]: + raise ValueError("Expected lengths to be of shape (N,)") + if lengths.max() > X.shape[1]: + raise ValueError("A length value was too long") + if lengths is None: + lengths = torch.full( + (X.shape[0],), X.shape[1], dtype=torch.int64, device=points.device + ) + if normals is not None and normals.ndim != 3: + raise ValueError("Expected normals to be of shape (N, P, 3") + else: + raise ValueError( + "The input pointclouds should be either " + + "Pointclouds objects or torch.Tensor of shape " + + "(minibatch, num_points, 3)." + ) + return X, lengths, normals + + +def _chamfer_distance_single_direction( + x, + y, + x_lengths, + y_lengths, + x_normals, + y_normals, + weights, + point_reduction: Union[str, None], + norm: int, + abs_cosine: bool, +): + return_normals = x_normals is not None and y_normals is not None + + N, P1, D = x.shape + + # Check if inputs are heterogeneous and create a lengths mask. + is_x_heterogeneous = (x_lengths != P1).any() + x_mask = ( + torch.arange(P1, device=x.device)[None] >= x_lengths[:, None] + ) # shape [N, P1] + if y.shape[0] != N or y.shape[2] != D: + raise ValueError("y does not have the correct shape.") + if weights is not None: + if weights.size(0) != N: + raise ValueError("weights must be of shape (N,).") + if not (weights >= 0).all(): + raise ValueError("weights cannot be negative.") + if weights.sum() == 0.0: + weights = weights.view(N, 1) + return ((x.sum((1, 2)) * weights) * 0.0, (x.sum((1, 2)) * weights) * 0.0) + + cham_norm_x = x.new_zeros(()) + + x_nn = knn_points(x, y, lengths1=x_lengths, lengths2=y_lengths, norm=norm, K=1) + cham_x = x_nn.dists[..., 0] # (N, P1) + + if is_x_heterogeneous: + cham_x[x_mask] = 0.0 + + if weights is not None: + cham_x *= weights.view(N, 1) + + if return_normals: + # Gather the normals using the indices and keep only value for k=0 + x_normals_near = knn_gather(y_normals, x_nn.idx, y_lengths)[..., 0, :] + + cosine_sim = F.cosine_similarity(x_normals, x_normals_near, dim=2, eps=1e-6) + # If abs_cosine, ignore orientation and take the absolute value of the cosine sim. + cham_norm_x = 1 - (torch.abs(cosine_sim) if abs_cosine else cosine_sim) + + if is_x_heterogeneous: + cham_norm_x[x_mask] = 0.0 + + if weights is not None: + cham_norm_x *= weights.view(N, 1) + + if point_reduction == "max": + assert not return_normals + cham_x = cham_x.max(1).values # (N,) + elif point_reduction is not None: + # Apply point reduction + cham_x = cham_x.sum(1) # (N,) + if return_normals: + cham_norm_x = cham_norm_x.sum(1) # (N,) + if point_reduction == "mean": + x_lengths_clamped = x_lengths.clamp(min=1) + cham_x /= x_lengths_clamped + if return_normals: + cham_norm_x /= x_lengths_clamped + + cham_dist = cham_x + cham_normals = cham_norm_x if return_normals else None + return cham_dist, cham_normals + + +def _apply_batch_reduction( + cham_x, cham_norm_x, weights, batch_reduction: Union[str, None] +): + if batch_reduction is None: + return (cham_x, cham_norm_x) + # batch_reduction == "sum" + N = cham_x.shape[0] + cham_x = cham_x.sum() + if cham_norm_x is not None: + cham_norm_x = cham_norm_x.sum() + if batch_reduction == "mean": + if weights is None: + div = max(N, 1) + elif weights.sum() == 0.0: + div = 1 + else: + div = weights.sum() + cham_x /= div + if cham_norm_x is not None: + cham_norm_x /= div + return (cham_x, cham_norm_x) + + +def chamfer_distance( + x, + y, + x_lengths=None, + y_lengths=None, + x_normals=None, + y_normals=None, + weights=None, + batch_reduction: Union[str, None] = "mean", + point_reduction: Union[str, None] = "mean", + norm: int = 2, + single_directional: bool = False, + abs_cosine: bool = True, +): + """ + Chamfer distance between two pointclouds x and y. + + Args: + x: FloatTensor of shape (N, P1, D) or a Pointclouds object representing + a batch of point clouds with at most P1 points in each batch element, + batch size N and feature dimension D. + y: FloatTensor of shape (N, P2, D) or a Pointclouds object representing + a batch of point clouds with at most P2 points in each batch element, + batch size N and feature dimension D. + x_lengths: Optional LongTensor of shape (N,) giving the number of points in each + cloud in x. + y_lengths: Optional LongTensor of shape (N,) giving the number of points in each + cloud in y. + x_normals: Optional FloatTensor of shape (N, P1, D). + y_normals: Optional FloatTensor of shape (N, P2, D). + weights: Optional FloatTensor of shape (N,) giving weights for + batch elements for reduction operation. + batch_reduction: Reduction operation to apply for the loss across the + batch, can be one of ["mean", "sum"] or None. + point_reduction: Reduction operation to apply for the loss across the + points, can be one of ["mean", "sum", "max"] or None. Using "max" leads to the + Hausdorff distance. + norm: int indicates the norm used for the distance. Supports 1 for L1 and 2 for L2. + single_directional: If False (default), loss comes from both the distance between + each point in x and its nearest neighbor in y and each point in y and its nearest + neighbor in x. If True, loss is the distance between each point in x and its + nearest neighbor in y. + abs_cosine: If False, loss_normals is from one minus the cosine similarity. + If True (default), loss_normals is from one minus the absolute value of the + cosine similarity, which means that exactly opposite normals are considered + equivalent to exactly matching normals, i.e. sign does not matter. + + Returns: + 2-element tuple containing + + - **loss**: Tensor giving the reduced distance between the pointclouds + in x and the pointclouds in y. If point_reduction is None, a 2-element + tuple of Tensors containing forward and backward loss terms shaped (N, P1) + and (N, P2) (if single_directional is False) or a Tensor containing loss + terms shaped (N, P1) (if single_directional is True) is returned. + - **loss_normals**: Tensor giving the reduced cosine distance of normals + between pointclouds in x and pointclouds in y. Returns None if + x_normals and y_normals are None. If point_reduction is None, a 2-element + tuple of Tensors containing forward and backward loss terms shaped (N, P1) + and (N, P2) (if single_directional is False) or a Tensor containing loss + terms shaped (N, P1) (if single_directional is True) is returned. + """ + _validate_chamfer_reduction_inputs(batch_reduction, point_reduction) + + if not ((norm == 1) or (norm == 2)): + raise ValueError("Support for 1 or 2 norm.") + + if point_reduction == "max" and (x_normals is not None or y_normals is not None): + raise ValueError('Normals must be None if point_reduction is "max"') + + x, x_lengths, x_normals = _handle_pointcloud_input(x, x_lengths, x_normals) + y, y_lengths, y_normals = _handle_pointcloud_input(y, y_lengths, y_normals) + + cham_x, cham_norm_x = _chamfer_distance_single_direction( + x, + y, + x_lengths, + y_lengths, + x_normals, + y_normals, + weights, + point_reduction, + norm, + abs_cosine, + ) + if single_directional: + loss = cham_x + loss_normals = cham_norm_x + else: + cham_y, cham_norm_y = _chamfer_distance_single_direction( + y, + x, + y_lengths, + x_lengths, + y_normals, + x_normals, + weights, + point_reduction, + norm, + abs_cosine, + ) + if point_reduction == "max": + loss = torch.maximum(cham_x, cham_y) + loss_normals = None + elif point_reduction is not None: + loss = cham_x + cham_y + if cham_norm_x is not None: + loss_normals = cham_norm_x + cham_norm_y + else: + loss_normals = None + else: + loss = (cham_x, cham_y) + if cham_norm_x is not None: + loss_normals = (cham_norm_x, cham_norm_y) + else: + loss_normals = None + return _apply_batch_reduction(loss, loss_normals, weights, batch_reduction) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/mesh_edge_loss.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/mesh_edge_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..615fe5072b4411405c0f8e3fbf1b78ea9814d6c9 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/mesh_edge_loss.py @@ -0,0 +1,52 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch + + +def mesh_edge_loss(meshes, target_length: float = 0.0): + """ + Computes mesh edge length regularization loss averaged across all meshes + in a batch. Each mesh contributes equally to the final loss, regardless of + the number of edges per mesh in the batch by weighting each mesh with the + inverse number of edges. For example, if mesh 3 (out of N) has only E=4 + edges, then the loss for each edge in mesh 3 should be multiplied by 1/E to + contribute to the final loss. + + Args: + meshes: Meshes object with a batch of meshes. + target_length: Resting value for the edge length. + + Returns: + loss: Average loss across the batch. Returns 0 if meshes contains + no meshes or all empty meshes. + """ + if meshes.isempty(): + return torch.tensor( + [0.0], dtype=torch.float32, device=meshes.device, requires_grad=True + ) + + N = len(meshes) + edges_packed = meshes.edges_packed() # (sum(E_n), 3) + verts_packed = meshes.verts_packed() # (sum(V_n), 3) + edge_to_mesh_idx = meshes.edges_packed_to_mesh_idx() # (sum(E_n), ) + num_edges_per_mesh = meshes.num_edges_per_mesh() # N + + # Determine the weight for each edge based on the number of edges in the + # mesh it corresponds to. + # TODO (nikhilar) Find a faster way of computing the weights for each edge + # as this is currently a bottleneck for meshes with a large number of faces. + weights = num_edges_per_mesh.gather(0, edge_to_mesh_idx) + weights = 1.0 / weights.float() + + verts_edges = verts_packed[edges_packed] + v0, v1 = verts_edges.unbind(1) + loss = ((v0 - v1).norm(dim=1, p=2) - target_length) ** 2.0 + loss = loss * weights + + return loss.sum() / N diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/mesh_laplacian_smoothing.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/mesh_laplacian_smoothing.py new file mode 100644 index 0000000000000000000000000000000000000000..02037841d74f849c026e98192be3275be20522d7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/mesh_laplacian_smoothing.py @@ -0,0 +1,137 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +from pytorch3d.ops import cot_laplacian + + +def mesh_laplacian_smoothing(meshes, method: str = "uniform"): + r""" + Computes the laplacian smoothing objective for a batch of meshes. + This function supports three variants of Laplacian smoothing, + namely with uniform weights("uniform"), with cotangent weights ("cot"), + and cotangent curvature ("cotcurv").For more details read [1, 2]. + + Args: + meshes: Meshes object with a batch of meshes. + method: str specifying the method for the laplacian. + Returns: + loss: Average laplacian smoothing loss across the batch. + Returns 0 if meshes contains no meshes or all empty meshes. + + Consider a mesh M = (V, F), with verts of shape Nx3 and faces of shape Mx3. + The Laplacian matrix L is a NxN tensor such that LV gives a tensor of vectors: + for a uniform Laplacian, LuV[i] points to the centroid of its neighboring + vertices, a cotangent Laplacian LcV[i] is known to be an approximation of + the surface normal, while the curvature variant LckV[i] scales the normals + by the discrete mean curvature. For vertex i, assume S[i] is the set of + neighboring vertices to i, a_ij and b_ij are the "outside" angles in the + two triangles connecting vertex v_i and its neighboring vertex v_j + for j in S[i], as seen in the diagram below. + + .. code-block:: python + + a_ij + /\ + / \ + / \ + / \ + v_i /________\ v_j + \ / + \ / + \ / + \ / + \/ + b_ij + + The definition of the Laplacian is LV[i] = sum_j w_ij (v_j - v_i) + For the uniform variant, w_ij = 1 / |S[i]| + For the cotangent variant, + w_ij = (cot a_ij + cot b_ij) / (sum_k cot a_ik + cot b_ik) + For the cotangent curvature, w_ij = (cot a_ij + cot b_ij) / (4 A[i]) + where A[i] is the sum of the areas of all triangles containing vertex v_i. + + There is a nice trigonometry identity to compute cotangents. Consider a triangle + with side lengths A, B, C and angles a, b, c. + + .. code-block:: python + + c + /|\ + / | \ + / | \ + B / H| \ A + / | \ + / | \ + /a_____|_____b\ + C + + Then cot a = (B^2 + C^2 - A^2) / 4 * area + We know that area = CH/2, and by the law of cosines we have + + A^2 = B^2 + C^2 - 2BC cos a => B^2 + C^2 - A^2 = 2BC cos a + + Putting these together, we get: + + B^2 + C^2 - A^2 2BC cos a + _______________ = _________ = (B/H) cos a = cos a / sin a = cot a + 4 * area 2CH + + + [1] Desbrun et al, "Implicit fairing of irregular meshes using diffusion + and curvature flow", SIGGRAPH 1999. + + [2] Nealan et al, "Laplacian Mesh Optimization", Graphite 2006. + """ + + if meshes.isempty(): + return torch.tensor( + [0.0], dtype=torch.float32, device=meshes.device, requires_grad=True + ) + + N = len(meshes) + verts_packed = meshes.verts_packed() # (sum(V_n), 3) + faces_packed = meshes.faces_packed() # (sum(F_n), 3) + num_verts_per_mesh = meshes.num_verts_per_mesh() # (N,) + verts_packed_idx = meshes.verts_packed_to_mesh_idx() # (sum(V_n),) + weights = num_verts_per_mesh.gather(0, verts_packed_idx) # (sum(V_n),) + weights = 1.0 / weights.float() + + # We don't want to backprop through the computation of the Laplacian; + # just treat it as a magic constant matrix that is used to transform + # verts into normals + with torch.no_grad(): + if method == "uniform": + L = meshes.laplacian_packed() + elif method in ["cot", "cotcurv"]: + L, inv_areas = cot_laplacian(verts_packed, faces_packed) + if method == "cot": + norm_w = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1) + idx = norm_w > 0 + # pyre-fixme[58]: `/` is not supported for operand types `float` and + # `Tensor`. + norm_w[idx] = 1.0 / norm_w[idx] + else: + L_sum = torch.sparse.sum(L, dim=1).to_dense().view(-1, 1) + norm_w = 0.25 * inv_areas + else: + raise ValueError("Method should be one of {uniform, cot, cotcurv}") + + if method == "uniform": + loss = L.mm(verts_packed) + elif method == "cot": + # pyre-fixme[61]: `norm_w` is undefined, or not always defined. + loss = L.mm(verts_packed) * norm_w - verts_packed + elif method == "cotcurv": + # pyre-fixme[61]: `norm_w` may not be initialized here. + loss = (L.mm(verts_packed) - L_sum * verts_packed) * norm_w + loss = loss.norm(dim=1) + + loss = loss * weights + return loss.sum() / N diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/mesh_normal_consistency.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/mesh_normal_consistency.py new file mode 100644 index 0000000000000000000000000000000000000000..5466df30d19a62bd90984acdb91847f3c412eeab --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/mesh_normal_consistency.py @@ -0,0 +1,134 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import torch +from pytorch3d import _C + + +def mesh_normal_consistency(meshes): + r""" + Computes the normal consistency of each mesh in meshes. + We compute the normal consistency for each pair of neighboring faces. + If e = (v0, v1) is the connecting edge of two neighboring faces f0 and f1, + then the normal consistency between f0 and f1 + + .. code-block:: python + + a + /\ + / \ + / f0 \ + / \ + v0 /____e___\ v1 + \ / + \ / + \ f1 / + \ / + \/ + b + + The normal consistency is + + .. code-block:: python + + nc(f0, f1) = 1 - cos(n0, n1) + + where cos(n0, n1) = n0^n1 / ||n0|| / ||n1|| is the cosine of the angle + between the normals n0 and n1, and + + n0 = (v1 - v0) x (a - v0) + n1 = - (v1 - v0) x (b - v0) = (b - v0) x (v1 - v0) + + This means that if nc(f0, f1) = 0 then n0 and n1 point to the same + direction, while if nc(f0, f1) = 2 then n0 and n1 point opposite direction. + + .. note:: + For well-constructed meshes the assumption that only two faces share an + edge is true. This assumption could make the implementation easier and faster. + This implementation does not follow this assumption. All the faces sharing e, + which can be any in number, are discovered. + + Args: + meshes: Meshes object with a batch of meshes. + + Returns: + loss: Average normal consistency across the batch. + Returns 0 if meshes contains no meshes or all empty meshes. + """ + if meshes.isempty(): + return torch.tensor( + [0.0], dtype=torch.float32, device=meshes.device, requires_grad=True + ) + + N = len(meshes) + verts_packed = meshes.verts_packed() # (sum(V_n), 3) + faces_packed = meshes.faces_packed() # (sum(F_n), 3) + edges_packed = meshes.edges_packed() # (sum(E_n), 2) + verts_packed_to_mesh_idx = meshes.verts_packed_to_mesh_idx() # (sum(V_n),) + face_to_edge = meshes.faces_packed_to_edges_packed() # (sum(F_n), 3) + E = edges_packed.shape[0] # sum(E_n) + F = faces_packed.shape[0] # sum(F_n) + + # We don't want gradients for the following operation. The goal is to + # find for each edge e all the vertices associated with e. In the example + # above, the vertices associated with e are (a, b), i.e. the points connected + # on faces to e. + with torch.no_grad(): + edge_idx = face_to_edge.reshape(F * 3) # (3 * F,) indexes into edges + vert_idx = ( + faces_packed.view(1, F, 3).expand(3, F, 3).transpose(0, 1).reshape(3 * F, 3) + ) + edge_idx, edge_sort_idx = edge_idx.sort() + vert_idx = vert_idx[edge_sort_idx] + + # In well constructed meshes each edge is shared by precisely 2 faces + # However, in many meshes, this assumption is not always satisfied. + # We want to find all faces that share an edge, a number which can + # vary and which depends on the topology. + # In particular, we find the vertices not on the edge on the shared faces. + # In the example above, we want to associate edge e with vertices a and b. + # This operation is done more efficiently in cpu with lists. + # TODO(gkioxari) find a better way to do this. + + # edge_idx represents the index of the edge for each vertex. We can count + # the number of vertices which are associated with each edge. + # There can be a different number for each edge. + edge_num = edge_idx.bincount(minlength=E) + + # This calculates all pairs of vertices which are opposite to the same edge. + vert_edge_pair_idx = _C.mesh_normal_consistency_find_verts(edge_num.cpu()).to( + edge_num.device + ) + + if vert_edge_pair_idx.shape[0] == 0: + return torch.tensor( + [0.0], dtype=torch.float32, device=meshes.device, requires_grad=True + ) + + v0_idx = edges_packed[edge_idx, 0] + v0 = verts_packed[v0_idx] + v1_idx = edges_packed[edge_idx, 1] + v1 = verts_packed[v1_idx] + + # two of the following cross products are zeros as they are cross product + # with either (v1-v0)x(v1-v0) or (v1-v0)x(v0-v0) + n_temp0 = (v1 - v0).cross(verts_packed[vert_idx[:, 0]] - v0, dim=1) + n_temp1 = (v1 - v0).cross(verts_packed[vert_idx[:, 1]] - v0, dim=1) + n_temp2 = (v1 - v0).cross(verts_packed[vert_idx[:, 2]] - v0, dim=1) + n = n_temp0 + n_temp1 + n_temp2 + n0 = n[vert_edge_pair_idx[:, 0]] + n1 = -n[vert_edge_pair_idx[:, 1]] + loss = 1 - torch.cosine_similarity(n0, n1, dim=1) + + verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[vert_idx[:, 0]] + verts_packed_to_mesh_idx = verts_packed_to_mesh_idx[vert_edge_pair_idx[:, 0]] + num_normals = verts_packed_to_mesh_idx.bincount(minlength=N) + weights = 1.0 / num_normals[verts_packed_to_mesh_idx].float() + + loss = loss * weights + return loss.sum() / N diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/point_mesh_distance.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/point_mesh_distance.py new file mode 100644 index 0000000000000000000000000000000000000000..b30cd05e88544c2b255107bae6001c12fbfdf307 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/loss/point_mesh_distance.py @@ -0,0 +1,398 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from pytorch3d import _C +from pytorch3d.structures import Meshes, Pointclouds +from torch.autograd import Function +from torch.autograd.function import once_differentiable + + +""" +This file defines distances between meshes and pointclouds. +The functions make use of the definition of a distance between a point and +an edge segment or the distance of a point and a triangle (face). + +The exact mathematical formulations and implementations of these +distances can be found in `csrc/utils/geometry_utils.cuh`. +""" + +_DEFAULT_MIN_TRIANGLE_AREA: float = 5e-3 + + +# PointFaceDistance +class _PointFaceDistance(Function): + """ + Torch autograd Function wrapper PointFaceDistance Cuda implementation + """ + + @staticmethod + def forward( + ctx, + points, + points_first_idx, + tris, + tris_first_idx, + max_points, + min_triangle_area=_DEFAULT_MIN_TRIANGLE_AREA, + ): + """ + Args: + ctx: Context object used to calculate gradients. + points: FloatTensor of shape `(P, 3)` + points_first_idx: LongTensor of shape `(N,)` indicating the first point + index in each example in the batch + tris: FloatTensor of shape `(T, 3, 3)` of triangular faces. The `t`-th + triangular face is spanned by `(tris[t, 0], tris[t, 1], tris[t, 2])` + tris_first_idx: LongTensor of shape `(N,)` indicating the first face + index in each example in the batch + max_points: Scalar equal to maximum number of points in the batch + min_triangle_area: (float, defaulted) Triangles of area less than this + will be treated as points/lines. + Returns: + dists: FloatTensor of shape `(P,)`, where `dists[p]` is the squared + euclidean distance of `p`-th point to the closest triangular face + in the corresponding example in the batch + idxs: LongTensor of shape `(P,)` indicating the closest triangular face + in the corresponding example in the batch. + + `dists[p]` is + `d(points[p], tris[idxs[p], 0], tris[idxs[p], 1], tris[idxs[p], 2])` + where `d(u, v0, v1, v2)` is the distance of point `u` from the triangular + face `(v0, v1, v2)` + + """ + dists, idxs = _C.point_face_dist_forward( + points, + points_first_idx, + tris, + tris_first_idx, + max_points, + min_triangle_area, + ) + ctx.save_for_backward(points, tris, idxs) + ctx.min_triangle_area = min_triangle_area + return dists + + @staticmethod + @once_differentiable + def backward(ctx, grad_dists): + grad_dists = grad_dists.contiguous() + points, tris, idxs = ctx.saved_tensors + min_triangle_area = ctx.min_triangle_area + grad_points, grad_tris = _C.point_face_dist_backward( + points, tris, idxs, grad_dists, min_triangle_area + ) + return grad_points, None, grad_tris, None, None, None + + +point_face_distance = _PointFaceDistance.apply + + +# FacePointDistance +class _FacePointDistance(Function): + """ + Torch autograd Function wrapper FacePointDistance Cuda implementation + """ + + @staticmethod + def forward( + ctx, + points, + points_first_idx, + tris, + tris_first_idx, + max_tris, + min_triangle_area=_DEFAULT_MIN_TRIANGLE_AREA, + ): + """ + Args: + ctx: Context object used to calculate gradients. + points: FloatTensor of shape `(P, 3)` + points_first_idx: LongTensor of shape `(N,)` indicating the first point + index in each example in the batch + tris: FloatTensor of shape `(T, 3, 3)` of triangular faces. The `t`-th + triangular face is spanned by `(tris[t, 0], tris[t, 1], tris[t, 2])` + tris_first_idx: LongTensor of shape `(N,)` indicating the first face + index in each example in the batch + max_tris: Scalar equal to maximum number of faces in the batch + min_triangle_area: (float, defaulted) Triangles of area less than this + will be treated as points/lines. + Returns: + dists: FloatTensor of shape `(T,)`, where `dists[t]` is the squared + euclidean distance of `t`-th triangular face to the closest point in the + corresponding example in the batch + idxs: LongTensor of shape `(T,)` indicating the closest point in the + corresponding example in the batch. + + `dists[t] = d(points[idxs[t]], tris[t, 0], tris[t, 1], tris[t, 2])`, + where `d(u, v0, v1, v2)` is the distance of point `u` from the triangular + face `(v0, v1, v2)`. + """ + dists, idxs = _C.face_point_dist_forward( + points, points_first_idx, tris, tris_first_idx, max_tris, min_triangle_area + ) + ctx.save_for_backward(points, tris, idxs) + ctx.min_triangle_area = min_triangle_area + return dists + + @staticmethod + @once_differentiable + def backward(ctx, grad_dists): + grad_dists = grad_dists.contiguous() + points, tris, idxs = ctx.saved_tensors + min_triangle_area = ctx.min_triangle_area + grad_points, grad_tris = _C.face_point_dist_backward( + points, tris, idxs, grad_dists, min_triangle_area + ) + return grad_points, None, grad_tris, None, None, None + + +face_point_distance = _FacePointDistance.apply + + +# PointEdgeDistance +class _PointEdgeDistance(Function): + """ + Torch autograd Function wrapper PointEdgeDistance Cuda implementation + """ + + @staticmethod + def forward(ctx, points, points_first_idx, segms, segms_first_idx, max_points): + """ + Args: + ctx: Context object used to calculate gradients. + points: FloatTensor of shape `(P, 3)` + points_first_idx: LongTensor of shape `(N,)` indicating the first point + index for each example in the mesh + segms: FloatTensor of shape `(S, 2, 3)` of edge segments. The `s`-th + edge segment is spanned by `(segms[s, 0], segms[s, 1])` + segms_first_idx: LongTensor of shape `(N,)` indicating the first edge + index for each example in the mesh + max_points: Scalar equal to maximum number of points in the batch + Returns: + dists: FloatTensor of shape `(P,)`, where `dists[p]` is the squared + euclidean distance of `p`-th point to the closest edge in the + corresponding example in the batch + idxs: LongTensor of shape `(P,)` indicating the closest edge in the + corresponding example in the batch. + + `dists[p] = d(points[p], segms[idxs[p], 0], segms[idxs[p], 1])`, + where `d(u, v0, v1)` is the distance of point `u` from the edge segment + spanned by `(v0, v1)`. + """ + dists, idxs = _C.point_edge_dist_forward( + points, points_first_idx, segms, segms_first_idx, max_points + ) + ctx.save_for_backward(points, segms, idxs) + return dists + + @staticmethod + @once_differentiable + def backward(ctx, grad_dists): + grad_dists = grad_dists.contiguous() + points, segms, idxs = ctx.saved_tensors + grad_points, grad_segms = _C.point_edge_dist_backward( + points, segms, idxs, grad_dists + ) + return grad_points, None, grad_segms, None, None + + +point_edge_distance = _PointEdgeDistance.apply + + +# EdgePointDistance +class _EdgePointDistance(Function): + """ + Torch autograd Function wrapper EdgePointDistance Cuda implementation + """ + + @staticmethod + def forward(ctx, points, points_first_idx, segms, segms_first_idx, max_segms): + """ + Args: + ctx: Context object used to calculate gradients. + points: FloatTensor of shape `(P, 3)` + points_first_idx: LongTensor of shape `(N,)` indicating the first point + index for each example in the mesh + segms: FloatTensor of shape `(S, 2, 3)` of edge segments. The `s`-th + edge segment is spanned by `(segms[s, 0], segms[s, 1])` + segms_first_idx: LongTensor of shape `(N,)` indicating the first edge + index for each example in the mesh + max_segms: Scalar equal to maximum number of edges in the batch + Returns: + dists: FloatTensor of shape `(S,)`, where `dists[s]` is the squared + euclidean distance of `s`-th edge to the closest point in the + corresponding example in the batch + idxs: LongTensor of shape `(S,)` indicating the closest point in the + corresponding example in the batch. + + `dists[s] = d(points[idxs[s]], edges[s, 0], edges[s, 1])`, + where `d(u, v0, v1)` is the distance of point `u` from the segment + spanned by `(v0, v1)`. + """ + dists, idxs = _C.edge_point_dist_forward( + points, points_first_idx, segms, segms_first_idx, max_segms + ) + ctx.save_for_backward(points, segms, idxs) + return dists + + @staticmethod + @once_differentiable + def backward(ctx, grad_dists): + grad_dists = grad_dists.contiguous() + points, segms, idxs = ctx.saved_tensors + grad_points, grad_segms = _C.edge_point_dist_backward( + points, segms, idxs, grad_dists + ) + return grad_points, None, grad_segms, None, None + + +edge_point_distance = _EdgePointDistance.apply + + +def point_mesh_edge_distance(meshes: Meshes, pcls: Pointclouds): + """ + Computes the distance between a pointcloud and a mesh within a batch. + Given a pair `(mesh, pcl)` in the batch, we define the distance to be the + sum of two distances, namely `point_edge(mesh, pcl) + edge_point(mesh, pcl)` + + `point_edge(mesh, pcl)`: Computes the squared distance of each point p in pcl + to the closest edge segment in mesh and averages across all points in pcl + `edge_point(mesh, pcl)`: Computes the squared distance of each edge segment in mesh + to the closest point in pcl and averages across all edges in mesh. + + The above distance functions are applied for all `(mesh, pcl)` pairs in the batch + and then averaged across the batch. + + Args: + meshes: A Meshes data structure containing N meshes + pcls: A Pointclouds data structure containing N pointclouds + + Returns: + loss: The `point_edge(mesh, pcl) + edge_point(mesh, pcl)` distance + between all `(mesh, pcl)` in a batch averaged across the batch. + """ + if len(meshes) != len(pcls): + raise ValueError("meshes and pointclouds must be equal sized batches") + N = len(meshes) + + # packed representation for pointclouds + points = pcls.points_packed() # (P, 3) + points_first_idx = pcls.cloud_to_packed_first_idx() + max_points = pcls.num_points_per_cloud().max().item() + + # packed representation for edges + verts_packed = meshes.verts_packed() + edges_packed = meshes.edges_packed() + segms = verts_packed[edges_packed] # (S, 2, 3) + segms_first_idx = meshes.mesh_to_edges_packed_first_idx() + max_segms = meshes.num_edges_per_mesh().max().item() + + # point to edge distance: shape (P,) + point_to_edge = point_edge_distance( + points, points_first_idx, segms, segms_first_idx, max_points + ) + + # weight each example by the inverse of number of points in the example + point_to_cloud_idx = pcls.packed_to_cloud_idx() # (sum(P_i), ) + num_points_per_cloud = pcls.num_points_per_cloud() # (N,) + weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx) + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + weights_p = 1.0 / weights_p.float() + point_to_edge = point_to_edge * weights_p + point_dist = point_to_edge.sum() / N + + # edge to edge distance: shape (S,) + edge_to_point = edge_point_distance( + points, points_first_idx, segms, segms_first_idx, max_segms + ) + + # weight each example by the inverse of number of edges in the example + segm_to_mesh_idx = meshes.edges_packed_to_mesh_idx() # (sum(S_n),) + num_segms_per_mesh = meshes.num_edges_per_mesh() # (N,) + weights_s = num_segms_per_mesh.gather(0, segm_to_mesh_idx) + weights_s = 1.0 / weights_s.float() + edge_to_point = edge_to_point * weights_s + edge_dist = edge_to_point.sum() / N + + return point_dist + edge_dist + + +def point_mesh_face_distance( + meshes: Meshes, + pcls: Pointclouds, + min_triangle_area: float = _DEFAULT_MIN_TRIANGLE_AREA, +): + """ + Computes the distance between a pointcloud and a mesh within a batch. + Given a pair `(mesh, pcl)` in the batch, we define the distance to be the + sum of two distances, namely `point_face(mesh, pcl) + face_point(mesh, pcl)` + + `point_face(mesh, pcl)`: Computes the squared distance of each point p in pcl + to the closest triangular face in mesh and averages across all points in pcl + `face_point(mesh, pcl)`: Computes the squared distance of each triangular face in + mesh to the closest point in pcl and averages across all faces in mesh. + + The above distance functions are applied for all `(mesh, pcl)` pairs in the batch + and then averaged across the batch. + + Args: + meshes: A Meshes data structure containing N meshes + pcls: A Pointclouds data structure containing N pointclouds + min_triangle_area: (float, defaulted) Triangles of area less than this + will be treated as points/lines. + + Returns: + loss: The `point_face(mesh, pcl) + face_point(mesh, pcl)` distance + between all `(mesh, pcl)` in a batch averaged across the batch. + """ + + if len(meshes) != len(pcls): + raise ValueError("meshes and pointclouds must be equal sized batches") + N = len(meshes) + + # packed representation for pointclouds + points = pcls.points_packed() # (P, 3) + points_first_idx = pcls.cloud_to_packed_first_idx() + max_points = pcls.num_points_per_cloud().max().item() + + # packed representation for faces + verts_packed = meshes.verts_packed() + faces_packed = meshes.faces_packed() + tris = verts_packed[faces_packed] # (T, 3, 3) + tris_first_idx = meshes.mesh_to_faces_packed_first_idx() + max_tris = meshes.num_faces_per_mesh().max().item() + + # point to face distance: shape (P,) + point_to_face = point_face_distance( + points, points_first_idx, tris, tris_first_idx, max_points, min_triangle_area + ) + + # weight each example by the inverse of number of points in the example + point_to_cloud_idx = pcls.packed_to_cloud_idx() # (sum(P_i),) + num_points_per_cloud = pcls.num_points_per_cloud() # (N,) + weights_p = num_points_per_cloud.gather(0, point_to_cloud_idx) + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + weights_p = 1.0 / weights_p.float() + point_to_face = point_to_face * weights_p + point_dist = point_to_face.sum() / N + + # face to point distance: shape (T,) + face_to_point = face_point_distance( + points, points_first_idx, tris, tris_first_idx, max_tris, min_triangle_area + ) + + # weight each example by the inverse of number of faces in the example + tri_to_mesh_idx = meshes.faces_packed_to_mesh_idx() # (sum(T_n),) + num_tris_per_mesh = meshes.num_faces_per_mesh() # (N, ) + weights_t = num_tris_per_mesh.gather(0, tri_to_mesh_idx) + weights_t = 1.0 / weights_t.float() + face_to_point = face_to_point * weights_t + face_dist = face_to_point.sum() / N + + return point_dist + face_dist diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/camera_utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/camera_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e56563156098ffeacd74ad692069a46838006f37 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/camera_utils.py @@ -0,0 +1,209 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Sequence, Tuple + +import torch +from pytorch3d.transforms import Transform3d + +from .cameras import CamerasBase + + +def camera_to_eye_at_up( + world_to_view_transform: Transform3d, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Given a world to view transform, return the eye, at and up vectors which + represent its position. + + For example, if cam is a camera object, then after running + + .. code-block:: + + from cameras import look_at_view_transform + eye, at, up = camera_to_eye_at_up(cam.get_world_to_view_transform()) + R, T = look_at_view_transform(eye=eye, at=at, up=up) + + any other camera created from R and T will have the same world to view + transform as cam. + + Also, given a camera position R and T, then after running: + + .. code-block:: + + from cameras import get_world_to_view_transform, look_at_view_transform + eye, at, up = camera_to_eye_at_up(get_world_to_view_transform(R=R, T=T)) + R2, T2 = look_at_view_transform(eye=eye, at=at, up=up) + + R2 will equal R and T2 will equal T. + + Args: + world_to_view_transform: Transform3d representing the extrinsic + transformation of N cameras. + + Returns: + eye: FloatTensor of shape [N, 3] representing the camera centers in world space. + at: FloatTensor of shape [N, 3] representing points in world space directly in + front of the cameras e.g. the positions of objects to be viewed by the + cameras. + up: FloatTensor of shape [N, 3] representing vectors in world space which + when projected on to the camera plane point upwards. + """ + cam_trans = world_to_view_transform.inverse() + # In the PyTorch3D right handed coordinate system, the camera in view space + # is always at the origin looking along the +z axis. + + # The up vector is not a position so cannot be transformed with + # transform_points. However the position eye+up above the camera + # (whose position vector in the camera coordinate frame is an up vector) + # can be transformed with transform_points. + eye_at_up_view = torch.tensor( + [[0, 0, 0], [0, 0, 1], [0, 1, 0]], dtype=torch.float32, device=cam_trans.device + ) + eye_at_up_world = cam_trans.transform_points(eye_at_up_view).reshape(-1, 3, 3) + + eye, at, up_plus_eye = eye_at_up_world.unbind(1) + up = up_plus_eye - eye + return eye, at, up + + +def rotate_on_spot( + R: torch.Tensor, T: torch.Tensor, rotation: torch.Tensor +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Given a camera position as R and T (batched or not), + and a rotation matrix (batched or not) + return a new R and T representing camera position(s) + in the same location but rotated on the spot by the + given rotation. In particular the new world to view + rotation will be the previous one followed by the inverse + of the given rotation. + + For example, adding the following lines before constructing a camera + will make the camera point a little to the right of where it + otherwise would have been. + + .. code-block:: + + from math import radians + from pytorch3d.transforms import axis_angle_to_matrix + angles = [0, radians(10), 0] + rotation = axis_angle_to_matrix(torch.FloatTensor(angles)) + R, T = rotate_on_spot(R, T, rotation) + + Note here that if you have a column vector, then when you + premultiply it by this `rotation` (see the rotation_conversions doc), + then it will be rotated anticlockwise if facing the -y axis. + In our context, where we postmultiply row vectors to transform them, + `rotation` will rotate the camera clockwise around the -y axis + (i.e. when looking down), which is a turn to the right. + + If angles was [radians(10), 0, 0], the camera would get pointed + up a bit instead. + + If angles was [0, 0, radians(10)], the camera would be rotated anticlockwise + a bit, so the image would appear rotated clockwise from how it + otherwise would have been. + + If you want to translate the camera from the origin in camera + coordinates, this is simple and does not need a separate function. + In particular, a translation by X = [a, b, c] would cause + the camera to move a units left, b units up, and c units + forward. This is achieved by using T-X in place of T. + + Args: + R: FloatTensor of shape [3, 3] or [N, 3, 3] + T: FloatTensor of shape [3] or [N, 3] + rotation: FloatTensor of shape [3, 3] or [n, 3, 3] + where if neither n nor N is 1, then n and N must be equal. + + Returns: + R: FloatTensor of shape [max(N, n), 3, 3] + T: FloatTensor of shape [max(N, n), 3] + """ + if R.ndim == 2: + R = R[None] + if T.ndim == 1: + T = T[None] + if rotation.ndim == 2: + rotation = rotation[None] + + if R.ndim != 3 or R.shape[1:] != (3, 3): + raise ValueError("Invalid R") + if T.ndim != 2 or T.shape[1] != 3: + raise ValueError("Invalid T") + if rotation.ndim != 3 or rotation.shape[1:] != (3, 3): + raise ValueError("Invalid rotation") + + new_R = R @ rotation.transpose(1, 2) + old_RT = torch.bmm(R, T[:, :, None]) + new_T = torch.matmul(new_R.transpose(1, 2), old_RT)[:, :, 0] + + return new_R, new_T + + +def join_cameras_as_batch(cameras_list: Sequence[CamerasBase]) -> CamerasBase: + """ + Create a batched cameras object by concatenating a list of input + cameras objects. All the tensor attributes will be joined along + the batch dimension. + + Args: + cameras_list: List of camera classes all of the same type and + on the same device. Each represents one or more cameras. + Returns: + cameras: single batched cameras object of the same + type as all the objects in the input list. + """ + # Get the type and fields to join from the first camera in the batch + c0 = cameras_list[0] + fields = c0._FIELDS + shared_fields = c0._SHARED_FIELDS + + if not all(isinstance(c, CamerasBase) for c in cameras_list): + raise ValueError("cameras in cameras_list must inherit from CamerasBase") + + if not all(type(c) is type(c0) for c in cameras_list[1:]): + raise ValueError("All cameras must be of the same type") + + if not all(c.device == c0.device for c in cameras_list[1:]): + raise ValueError("All cameras in the batch must be on the same device") + + # Concat the fields to make a batched tensor + kwargs = {} + kwargs["device"] = c0.device + + for field in fields: + field_not_none = [(getattr(c, field) is not None) for c in cameras_list] + if not any(field_not_none): + continue + if not all(field_not_none): + raise ValueError(f"Attribute {field} is inconsistently present") + + attrs_list = [getattr(c, field) for c in cameras_list] + + if field in shared_fields: + # Only needs to be set once + if not all(a == attrs_list[0] for a in attrs_list): + raise ValueError(f"Attribute {field} is not constant across inputs") + + # e.g. "in_ndc" is set as attribute "_in_ndc" on the class + # but provided as "in_ndc" in the input args + if field.startswith("_"): + field = field[1:] + + kwargs[field] = attrs_list[0] + elif isinstance(attrs_list[0], torch.Tensor): + # In the init, all inputs will be converted to + # batched tensors before set as attributes + # Join as a tensor along the batch dimension + kwargs[field] = torch.cat(attrs_list, dim=0) + else: + raise ValueError(f"Field {field} type is not supported for batching") + + return c0.__class__(**kwargs) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/cameras.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/cameras.py new file mode 100644 index 0000000000000000000000000000000000000000..db4842dcb3b1caeeb2a93ad009397c70f9640f7d --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/cameras.py @@ -0,0 +1,1883 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import math +import warnings +from typing import Any, Dict, List, Optional, Sequence, Tuple, Union + +import numpy as np +import torch +import torch.nn.functional as F +from pytorch3d.common.datatypes import Device +from pytorch3d.transforms import Rotate, Transform3d, Translate + +from .utils import convert_to_tensors_and_broadcast, TensorProperties + + +# Default values for rotation and translation matrices. +_R = torch.eye(3)[None] # (1, 3, 3) +_T = torch.zeros(1, 3) # (1, 3) + +# An input which is a float per batch element +_BatchFloatType = Union[float, Sequence[float], torch.Tensor] + +# one or two floats per batch element +_FocalLengthType = Union[ + float, Sequence[Tuple[float]], Sequence[Tuple[float, float]], torch.Tensor +] + + +class CamerasBase(TensorProperties): + """ + `CamerasBase` implements a base class for all cameras. + + For cameras, there are four different coordinate systems (or spaces) + - World coordinate system: This is the system the object lives - the world. + - Camera view coordinate system: This is the system that has its origin on + the camera and the Z-axis perpendicular to the image plane. + In PyTorch3D, we assume that +X points left, and +Y points up and + +Z points out from the image plane. + The transformation from world --> view happens after applying a rotation (R) + and translation (T) + - NDC coordinate system: This is the normalized coordinate system that confines + points in a volume the rendered part of the object or scene, also known as + view volume. For square images, given the PyTorch3D convention, (+1, +1, znear) + is the top left near corner, and (-1, -1, zfar) is the bottom right far + corner of the volume. + The transformation from view --> NDC happens after applying the camera + projection matrix (P) if defined in NDC space. + For non square images, we scale the points such that smallest side + has range [-1, 1] and the largest side has range [-u, u], with u > 1. + - Screen coordinate system: This is another representation of the view volume with + the XY coordinates defined in image space instead of a normalized space. + + An illustration of the coordinate systems can be found in pytorch3d/docs/notes/cameras.md. + + CameraBase defines methods that are common to all camera models: + - `get_camera_center` that returns the optical center of the camera in + world coordinates + - `get_world_to_view_transform` which returns a 3D transform from + world coordinates to the camera view coordinates (R, T) + - `get_full_projection_transform` which composes the projection + transform (P) with the world-to-view transform (R, T) + - `transform_points` which takes a set of input points in world coordinates and + projects to the space the camera is defined in (NDC or screen) + - `get_ndc_camera_transform` which defines the transform from screen/NDC to + PyTorch3D's NDC space + - `transform_points_ndc` which takes a set of points in world coordinates and + projects them to PyTorch3D's NDC space + - `transform_points_screen` which takes a set of points in world coordinates and + projects them to screen space + + For each new camera, one should implement the `get_projection_transform` + routine that returns the mapping from camera view coordinates to camera + coordinates (NDC or screen). + + Another useful function that is specific to each camera model is + `unproject_points` which sends points from camera coordinates (NDC or screen) + back to camera view or world coordinates depending on the `world_coordinates` + boolean argument of the function. + """ + + # Used in __getitem__ to index the relevant fields + # When creating a new camera, this should be set in the __init__ + _FIELDS: Tuple[str, ...] = () + + # Names of fields which are a constant property of the whole batch, rather + # than themselves a batch of data. + # When joining objects into a batch, they will have to agree. + _SHARED_FIELDS: Tuple[str, ...] = () + + def get_projection_transform(self, **kwargs): + """ + Calculate the projective transformation matrix. + + Args: + **kwargs: parameters for the projection can be passed in as keyword + arguments to override the default values set in `__init__`. + + Return: + a `Transform3d` object which represents a batch of projection + matrices of shape (N, 3, 3) + """ + raise NotImplementedError() + + def unproject_points(self, xy_depth: torch.Tensor, **kwargs): + """ + Transform input points from camera coordinates (NDC or screen) + to the world / camera coordinates. + + Each of the input points `xy_depth` of shape (..., 3) is + a concatenation of the x, y location and its depth. + + For instance, for an input 2D tensor of shape `(num_points, 3)` + `xy_depth` takes the following form: + `xy_depth[i] = [x[i], y[i], depth[i]]`, + for a each point at an index `i`. + + The following example demonstrates the relationship between + `transform_points` and `unproject_points`: + + .. code-block:: python + + cameras = # camera object derived from CamerasBase + xyz = # 3D points of shape (batch_size, num_points, 3) + # transform xyz to the camera view coordinates + xyz_cam = cameras.get_world_to_view_transform().transform_points(xyz) + # extract the depth of each point as the 3rd coord of xyz_cam + depth = xyz_cam[:, :, 2:] + # project the points xyz to the camera + xy = cameras.transform_points(xyz)[:, :, :2] + # append depth to xy + xy_depth = torch.cat((xy, depth), dim=2) + # unproject to the world coordinates + xyz_unproj_world = cameras.unproject_points(xy_depth, world_coordinates=True) + print(torch.allclose(xyz, xyz_unproj_world)) # True + # unproject to the camera coordinates + xyz_unproj = cameras.unproject_points(xy_depth, world_coordinates=False) + print(torch.allclose(xyz_cam, xyz_unproj)) # True + + Args: + xy_depth: torch tensor of shape (..., 3). + world_coordinates: If `True`, unprojects the points back to world + coordinates using the camera extrinsics `R` and `T`. + `False` ignores `R` and `T` and unprojects to + the camera view coordinates. + from_ndc: If `False` (default), assumes xy part of input is in + NDC space if self.in_ndc(), otherwise in screen space. If + `True`, assumes xy is in NDC space even if the camera + is defined in screen space. + + Returns + new_points: unprojected points with the same shape as `xy_depth`. + """ + raise NotImplementedError() + + def get_camera_center(self, **kwargs) -> torch.Tensor: + """ + Return the 3D location of the camera optical center + in the world coordinates. + + Args: + **kwargs: parameters for the camera extrinsics can be passed in + as keyword arguments to override the default values + set in __init__. + + Setting R or T here will update the values set in init as these + values may be needed later on in the rendering pipeline e.g. for + lighting calculations. + + Returns: + C: a batch of 3D locations of shape (N, 3) denoting + the locations of the center of each camera in the batch. + """ + w2v_trans = self.get_world_to_view_transform(**kwargs) + P = w2v_trans.inverse().get_matrix() + # the camera center is the translation component (the first 3 elements + # of the last row) of the inverted world-to-view + # transform (4x4 RT matrix) + C = P[:, 3, :3] + return C + + def get_world_to_view_transform(self, **kwargs) -> Transform3d: + """ + Return the world-to-view transform. + + Args: + **kwargs: parameters for the camera extrinsics can be passed in + as keyword arguments to override the default values + set in __init__. + + Setting R and T here will update the values set in init as these + values may be needed later on in the rendering pipeline e.g. for + lighting calculations. + + Returns: + A Transform3d object which represents a batch of transforms + of shape (N, 3, 3) + """ + R: torch.Tensor = kwargs.get("R", self.R) + T: torch.Tensor = kwargs.get("T", self.T) + # pyre-fixme[16]: `CamerasBase` has no attribute `R`. + self.R = R + # pyre-fixme[16]: `CamerasBase` has no attribute `T`. + self.T = T + world_to_view_transform = get_world_to_view_transform(R=R, T=T) + return world_to_view_transform + + def get_full_projection_transform(self, **kwargs) -> Transform3d: + """ + Return the full world-to-camera transform composing the + world-to-view and view-to-camera transforms. + If camera is defined in NDC space, the projected points are in NDC space. + If camera is defined in screen space, the projected points are in screen space. + + Args: + **kwargs: parameters for the projection transforms can be passed in + as keyword arguments to override the default values + set in __init__. + + Setting R and T here will update the values set in init as these + values may be needed later on in the rendering pipeline e.g. for + lighting calculations. + + Returns: + a Transform3d object which represents a batch of transforms + of shape (N, 3, 3) + """ + # pyre-fixme[16]: `CamerasBase` has no attribute `R`. + self.R: torch.Tensor = kwargs.get("R", self.R) + # pyre-fixme[16]: `CamerasBase` has no attribute `T`. + self.T: torch.Tensor = kwargs.get("T", self.T) + world_to_view_transform = self.get_world_to_view_transform(R=self.R, T=self.T) + view_to_proj_transform = self.get_projection_transform(**kwargs) + return world_to_view_transform.compose(view_to_proj_transform) + + def transform_points( + self, points, eps: Optional[float] = None, **kwargs + ) -> torch.Tensor: + """ + Transform input points from world to camera space. + If camera is defined in NDC space, the projected points are in NDC space. + If camera is defined in screen space, the projected points are in screen space. + + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the camera plane. + + Args: + points: torch tensor of shape (..., 3). + eps: If eps!=None, the argument is used to clamp the + divisor in the homogeneous normalization of the points + transformed to the ndc space. Please see + `transforms.Transform3d.transform_points` for details. + + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the + camera plane. + + Returns + new_points: transformed points with the same shape as the input. + """ + world_to_proj_transform = self.get_full_projection_transform(**kwargs) + return world_to_proj_transform.transform_points(points, eps=eps) + + def get_ndc_camera_transform(self, **kwargs) -> Transform3d: + """ + Returns the transform from camera projection space (screen or NDC) to NDC space. + For cameras that can be specified in screen space, this transform + allows points to be converted from screen to NDC space. + The default transform scales the points from [0, W]x[0, H] + to [-1, 1]x[-u, u] or [-u, u]x[-1, 1] where u > 1 is the aspect ratio of the image. + This function should be modified per camera definitions if need be, + e.g. for Perspective/Orthographic cameras we provide a custom implementation. + This transform assumes PyTorch3D coordinate system conventions for + both the NDC space and the input points. + + This transform interfaces with the PyTorch3D renderer which assumes + input points to the renderer to be in NDC space. + """ + if self.in_ndc(): + return Transform3d(device=self.device, dtype=torch.float32) + else: + # For custom cameras which can be defined in screen space, + # users might might have to implement the screen to NDC transform based + # on the definition of the camera parameters. + # See PerspectiveCameras/OrthographicCameras for an example. + # We don't flip xy because we assume that world points are in + # PyTorch3D coordinates, and thus conversion from screen to ndc + # is a mere scaling from image to [-1, 1] scale. + image_size = kwargs.get("image_size", self.get_image_size()) + return get_screen_to_ndc_transform( + self, with_xyflip=False, image_size=image_size + ) + + def transform_points_ndc( + self, points, eps: Optional[float] = None, **kwargs + ) -> torch.Tensor: + """ + Transforms points from PyTorch3D world/camera space to NDC space. + Input points follow the PyTorch3D coordinate system conventions: +X left, +Y up. + Output points are in NDC space: +X left, +Y up, origin at image center. + + Args: + points: torch tensor of shape (..., 3). + eps: If eps!=None, the argument is used to clamp the + divisor in the homogeneous normalization of the points + transformed to the ndc space. Please see + `transforms.Transform3d.transform_points` for details. + + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the + camera plane. + + Returns + new_points: transformed points with the same shape as the input. + """ + world_to_ndc_transform = self.get_full_projection_transform(**kwargs) + if not self.in_ndc(): + to_ndc_transform = self.get_ndc_camera_transform(**kwargs) + world_to_ndc_transform = world_to_ndc_transform.compose(to_ndc_transform) + + return world_to_ndc_transform.transform_points(points, eps=eps) + + def transform_points_screen( + self, points, eps: Optional[float] = None, with_xyflip: bool = True, **kwargs + ) -> torch.Tensor: + """ + Transforms points from PyTorch3D world/camera space to screen space. + Input points follow the PyTorch3D coordinate system conventions: +X left, +Y up. + Output points are in screen space: +X right, +Y down, origin at top left corner. + + Args: + points: torch tensor of shape (..., 3). + eps: If eps!=None, the argument is used to clamp the + divisor in the homogeneous normalization of the points + transformed to the ndc space. Please see + `transforms.Transform3d.transform_points` for details. + + For `CamerasBase.transform_points`, setting `eps > 0` + stabilizes gradients since it leads to avoiding division + by excessively low numbers for points close to the + camera plane. + with_xyflip: If True, flip x and y directions. In world/camera/ndc coords, + +x points to the left and +y up. If with_xyflip is true, in screen + coords +x points right, and +y down, following the usual RGB image + convention. Warning: do not set to False unless you know what you're + doing! + + Returns + new_points: transformed points with the same shape as the input. + """ + points_ndc = self.transform_points_ndc(points, eps=eps, **kwargs) + image_size = kwargs.get("image_size", self.get_image_size()) + return get_ndc_to_screen_transform( + self, with_xyflip=with_xyflip, image_size=image_size + ).transform_points(points_ndc, eps=eps) + + def clone(self): + """ + Returns a copy of `self`. + """ + cam_type = type(self) + other = cam_type(device=self.device) + return super().clone(other) + + def is_perspective(self): + raise NotImplementedError() + + def in_ndc(self): + """ + Specifies whether the camera is defined in NDC space + or in screen (image) space + """ + raise NotImplementedError() + + def get_znear(self): + return getattr(self, "znear", None) + + def get_image_size(self): + """ + Returns the image size, if provided, expected in the form of (height, width) + The image size is used for conversion of projected points to screen coordinates. + """ + return getattr(self, "image_size", None) + + def __getitem__( + self, index: Union[int, List[int], torch.BoolTensor, torch.LongTensor] + ) -> "CamerasBase": + """ + Override for the __getitem__ method in TensorProperties which needs to be + refactored. + + Args: + index: an integer index, list/tensor of integer indices, or tensor of boolean + indicators used to filter all the fields in the cameras given by self._FIELDS. + Returns: + an instance of the current cameras class with only the values at the selected index. + """ + + kwargs = {} + + tensor_types = { + # pyre-fixme[16]: Module `cuda` has no attribute `BoolTensor`. + "bool": (torch.BoolTensor, torch.cuda.BoolTensor), + # pyre-fixme[16]: Module `cuda` has no attribute `LongTensor`. + "long": (torch.LongTensor, torch.cuda.LongTensor), + } + if not isinstance( + index, (int, list, *tensor_types["bool"], *tensor_types["long"]) + ) or ( + isinstance(index, list) + and not all(isinstance(i, int) and not isinstance(i, bool) for i in index) + ): + msg = ( + "Invalid index type, expected int, List[int] or Bool/LongTensor; got %r" + ) + raise ValueError(msg % type(index)) + + if isinstance(index, int): + index = [index] + + if isinstance(index, tensor_types["bool"]): + # pyre-fixme[16]: Item `List` of `Union[List[int], BoolTensor, + # LongTensor]` has no attribute `ndim`. + # pyre-fixme[16]: Item `List` of `Union[List[int], BoolTensor, + # LongTensor]` has no attribute `shape`. + if index.ndim != 1 or index.shape[0] != len(self): + raise ValueError( + # pyre-fixme[16]: Item `List` of `Union[List[int], BoolTensor, + # LongTensor]` has no attribute `shape`. + f"Boolean index of shape {index.shape} does not match cameras" + ) + elif max(index) >= len(self): + raise IndexError(f"Index {max(index)} is out of bounds for select cameras") + + for field in self._FIELDS: + val = getattr(self, field, None) + if val is None: + continue + + # e.g. "in_ndc" is set as attribute "_in_ndc" on the class + # but provided as "in_ndc" on initialization + if field.startswith("_"): + field = field[1:] + + if isinstance(val, (str, bool)): + kwargs[field] = val + elif isinstance(val, torch.Tensor): + # In the init, all inputs will be converted to + # tensors before setting as attributes + kwargs[field] = val[index] + else: + raise ValueError(f"Field {field} type is not supported for indexing") + + kwargs["device"] = self.device + return self.__class__(**kwargs) + + +############################################################ +# Field of View Camera Classes # +############################################################ + + +def OpenGLPerspectiveCameras( + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + aspect_ratio: _BatchFloatType = 1.0, + fov: _BatchFloatType = 60.0, + degrees: bool = True, + R: torch.Tensor = _R, + T: torch.Tensor = _T, + device: Device = "cpu", +) -> "FoVPerspectiveCameras": + """ + OpenGLPerspectiveCameras has been DEPRECATED. Use FoVPerspectiveCameras instead. + Preserving OpenGLPerspectiveCameras for backward compatibility. + """ + + warnings.warn( + """OpenGLPerspectiveCameras is deprecated, + Use FoVPerspectiveCameras instead. + OpenGLPerspectiveCameras will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return FoVPerspectiveCameras( + znear=znear, + zfar=zfar, + aspect_ratio=aspect_ratio, + fov=fov, + degrees=degrees, + R=R, + T=T, + device=device, + ) + + +class FoVPerspectiveCameras(CamerasBase): + """ + A class which stores a batch of parameters to generate a batch of + projection matrices by specifying the field of view. + The definitions of the parameters follow the OpenGL perspective camera. + + The extrinsics of the camera (R and T matrices) can also be set in the + initializer or passed in to `get_full_projection_transform` to get + the full transformation from world -> ndc. + + The `transform_points` method calculates the full world -> ndc transform + and then applies it to the input points. + + The transforms can also be returned separately as Transform3d objects. + + * Setting the Aspect Ratio for Non Square Images * + + If the desired output image size is non square (i.e. a tuple of (H, W) where H != W) + the aspect ratio needs special consideration: There are two aspect ratios + to be aware of: + - the aspect ratio of each pixel + - the aspect ratio of the output image + The `aspect_ratio` setting in the FoVPerspectiveCameras sets the + pixel aspect ratio. When using this camera with the differentiable rasterizer + be aware that in the rasterizer we assume square pixels, but allow + variable image aspect ratio (i.e rectangle images). + + In most cases you will want to set the camera `aspect_ratio=1.0` + (i.e. square pixels) and only vary the output image dimensions in pixels + for rasterization. + """ + + # For __getitem__ + _FIELDS = ( + "K", + "znear", + "zfar", + "aspect_ratio", + "fov", + "R", + "T", + "degrees", + ) + + _SHARED_FIELDS = ("degrees",) + + def __init__( + self, + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + aspect_ratio: _BatchFloatType = 1.0, + fov: _BatchFloatType = 60.0, + degrees: bool = True, + R: torch.Tensor = _R, + T: torch.Tensor = _T, + K: Optional[torch.Tensor] = None, + device: Device = "cpu", + ) -> None: + """ + + Args: + znear: near clipping plane of the view frustrum. + zfar: far clipping plane of the view frustrum. + aspect_ratio: aspect ratio of the image pixels. + 1.0 indicates square pixels. + fov: field of view angle of the camera. + degrees: bool, set to True if fov is specified in degrees. + R: Rotation matrix of shape (N, 3, 3) + T: Translation matrix of shape (N, 3) + K: (optional) A calibration matrix of shape (N, 4, 4) + If provided, don't need znear, zfar, fov, aspect_ratio, degrees + device: Device (as str or torch.device) + """ + # The initializer formats all inputs to torch tensors and broadcasts + # all the inputs to have the same batch dimension where necessary. + super().__init__( + device=device, + znear=znear, + zfar=zfar, + aspect_ratio=aspect_ratio, + fov=fov, + R=R, + T=T, + K=K, + ) + + # No need to convert to tensor or broadcast. + self.degrees = degrees + + def compute_projection_matrix( + self, znear, zfar, fov, aspect_ratio, degrees: bool + ) -> torch.Tensor: + """ + Compute the calibration matrix K of shape (N, 4, 4) + + Args: + znear: near clipping plane of the view frustrum. + zfar: far clipping plane of the view frustrum. + fov: field of view angle of the camera. + aspect_ratio: aspect ratio of the image pixels. + 1.0 indicates square pixels. + degrees: bool, set to True if fov is specified in degrees. + + Returns: + torch.FloatTensor of the calibration matrix with shape (N, 4, 4) + """ + K = torch.zeros((self._N, 4, 4), device=self.device, dtype=torch.float32) + ones = torch.ones((self._N), dtype=torch.float32, device=self.device) + if degrees: + fov = (np.pi / 180) * fov + + if not torch.is_tensor(fov): + fov = torch.tensor(fov, device=self.device) + tanHalfFov = torch.tan((fov / 2)) + max_y = tanHalfFov * znear + min_y = -max_y + max_x = max_y * aspect_ratio + min_x = -max_x + + # NOTE: In OpenGL the projection matrix changes the handedness of the + # coordinate frame. i.e the NDC space positive z direction is the + # camera space negative z direction. This is because the sign of the z + # in the projection matrix is set to -1.0. + # In pytorch3d we maintain a right handed coordinate system throughout + # so the so the z sign is 1.0. + z_sign = 1.0 + + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + K[:, 0, 0] = 2.0 * znear / (max_x - min_x) + # pyre-fixme[58]: `/` is not supported for operand types `float` and `Tensor`. + K[:, 1, 1] = 2.0 * znear / (max_y - min_y) + K[:, 0, 2] = (max_x + min_x) / (max_x - min_x) + K[:, 1, 2] = (max_y + min_y) / (max_y - min_y) + K[:, 3, 2] = z_sign * ones + + # NOTE: This maps the z coordinate from [0, 1] where z = 0 if the point + # is at the near clipping plane and z = 1 when the point is at the far + # clipping plane. + K[:, 2, 2] = z_sign * zfar / (zfar - znear) + K[:, 2, 3] = -(zfar * znear) / (zfar - znear) + + return K + + def get_projection_transform(self, **kwargs) -> Transform3d: + """ + Calculate the perspective projection matrix with a symmetric + viewing frustrum. Use column major order. + The viewing frustrum will be projected into ndc, s.t. + (max_x, max_y) -> (+1, +1) + (min_x, min_y) -> (-1, -1) + + Args: + **kwargs: parameters for the projection can be passed in as keyword + arguments to override the default values set in `__init__`. + + Return: + a Transform3d object which represents a batch of projection + matrices of shape (N, 4, 4) + + .. code-block:: python + + h1 = (max_y + min_y)/(max_y - min_y) + w1 = (max_x + min_x)/(max_x - min_x) + tanhalffov = tan((fov/2)) + s1 = 1/tanhalffov + s2 = 1/(tanhalffov * (aspect_ratio)) + + # To map z to the range [0, 1] use: + f1 = far / (far - near) + f2 = -(far * near) / (far - near) + + # Projection matrix + K = [ + [s1, 0, w1, 0], + [0, s2, h1, 0], + [0, 0, f1, f2], + [0, 0, 1, 0], + ] + """ + K = kwargs.get("K", self.K) + if K is not None: + if K.shape != (self._N, 4, 4): + msg = "Expected K to have shape of (%r, 4, 4)" + raise ValueError(msg % (self._N)) + else: + K = self.compute_projection_matrix( + kwargs.get("znear", self.znear), + kwargs.get("zfar", self.zfar), + kwargs.get("fov", self.fov), + kwargs.get("aspect_ratio", self.aspect_ratio), + kwargs.get("degrees", self.degrees), + ) + + # Transpose the projection matrix as PyTorch3D transforms use row vectors. + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=self.device + ) + return transform + + def unproject_points( + self, + xy_depth: torch.Tensor, + world_coordinates: bool = True, + scaled_depth_input: bool = False, + **kwargs, + ) -> torch.Tensor: + """>! + FoV cameras further allow for passing depth in world units + (`scaled_depth_input=False`) or in the [0, 1]-normalized units + (`scaled_depth_input=True`) + + Args: + scaled_depth_input: If `True`, assumes the input depth is in + the [0, 1]-normalized units. If `False` the input depth is in + the world units. + """ + + # obtain the relevant transformation to ndc + if world_coordinates: + to_ndc_transform = self.get_full_projection_transform() + else: + to_ndc_transform = self.get_projection_transform() + + if scaled_depth_input: + # the input is scaled depth, so we don't have to do anything + xy_sdepth = xy_depth + else: + # parse out important values from the projection matrix + K_matrix = self.get_projection_transform(**kwargs.copy()).get_matrix() + # parse out f1, f2 from K_matrix + unsqueeze_shape = [1] * xy_depth.dim() + unsqueeze_shape[0] = K_matrix.shape[0] + f1 = K_matrix[:, 2, 2].reshape(unsqueeze_shape) + f2 = K_matrix[:, 3, 2].reshape(unsqueeze_shape) + # get the scaled depth + sdepth = (f1 * xy_depth[..., 2:3] + f2) / xy_depth[..., 2:3] + # concatenate xy + scaled depth + xy_sdepth = torch.cat((xy_depth[..., 0:2], sdepth), dim=-1) + + # unproject with inverse of the projection + unprojection_transform = to_ndc_transform.inverse() + return unprojection_transform.transform_points(xy_sdepth) + + def is_perspective(self): + return True + + def in_ndc(self): + return True + + +def OpenGLOrthographicCameras( + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + top: _BatchFloatType = 1.0, + bottom: _BatchFloatType = -1.0, + left: _BatchFloatType = -1.0, + right: _BatchFloatType = 1.0, + scale_xyz=((1.0, 1.0, 1.0),), # (1, 3) + R: torch.Tensor = _R, + T: torch.Tensor = _T, + device: Device = "cpu", +) -> "FoVOrthographicCameras": + """ + OpenGLOrthographicCameras has been DEPRECATED. Use FoVOrthographicCameras instead. + Preserving OpenGLOrthographicCameras for backward compatibility. + """ + + warnings.warn( + """OpenGLOrthographicCameras is deprecated, + Use FoVOrthographicCameras instead. + OpenGLOrthographicCameras will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return FoVOrthographicCameras( + znear=znear, + zfar=zfar, + max_y=top, + min_y=bottom, + max_x=right, + min_x=left, + scale_xyz=scale_xyz, + R=R, + T=T, + device=device, + ) + + +class FoVOrthographicCameras(CamerasBase): + """ + A class which stores a batch of parameters to generate a batch of + projection matrices by specifying the field of view. + The definitions of the parameters follow the OpenGL orthographic camera. + """ + + # For __getitem__ + _FIELDS = ( + "K", + "znear", + "zfar", + "R", + "T", + "max_y", + "min_y", + "max_x", + "min_x", + "scale_xyz", + ) + + def __init__( + self, + znear: _BatchFloatType = 1.0, + zfar: _BatchFloatType = 100.0, + max_y: _BatchFloatType = 1.0, + min_y: _BatchFloatType = -1.0, + max_x: _BatchFloatType = 1.0, + min_x: _BatchFloatType = -1.0, + scale_xyz=((1.0, 1.0, 1.0),), # (1, 3) + R: torch.Tensor = _R, + T: torch.Tensor = _T, + K: Optional[torch.Tensor] = None, + device: Device = "cpu", + ): + """ + + Args: + znear: near clipping plane of the view frustrum. + zfar: far clipping plane of the view frustrum. + max_y: maximum y coordinate of the frustrum. + min_y: minimum y coordinate of the frustrum. + max_x: maximum x coordinate of the frustrum. + min_x: minimum x coordinate of the frustrum + scale_xyz: scale factors for each axis of shape (N, 3). + R: Rotation matrix of shape (N, 3, 3). + T: Translation of shape (N, 3). + K: (optional) A calibration matrix of shape (N, 4, 4) + If provided, don't need znear, zfar, max_y, min_y, max_x, min_x, scale_xyz + device: torch.device or string. + + Only need to set min_x, max_x, min_y, max_y for viewing frustrums + which are non symmetric about the origin. + """ + # The initializer formats all inputs to torch tensors and broadcasts + # all the inputs to have the same batch dimension where necessary. + super().__init__( + device=device, + znear=znear, + zfar=zfar, + max_y=max_y, + min_y=min_y, + max_x=max_x, + min_x=min_x, + scale_xyz=scale_xyz, + R=R, + T=T, + K=K, + ) + + def compute_projection_matrix( + self, znear, zfar, max_x, min_x, max_y, min_y, scale_xyz + ) -> torch.Tensor: + """ + Compute the calibration matrix K of shape (N, 4, 4) + + Args: + znear: near clipping plane of the view frustrum. + zfar: far clipping plane of the view frustrum. + max_x: maximum x coordinate of the frustrum. + min_x: minimum x coordinate of the frustrum + max_y: maximum y coordinate of the frustrum. + min_y: minimum y coordinate of the frustrum. + scale_xyz: scale factors for each axis of shape (N, 3). + """ + K = torch.zeros((self._N, 4, 4), dtype=torch.float32, device=self.device) + ones = torch.ones((self._N), dtype=torch.float32, device=self.device) + # NOTE: OpenGL flips handedness of coordinate system between camera + # space and NDC space so z sign is -ve. In PyTorch3D we maintain a + # right handed coordinate system throughout. + z_sign = +1.0 + + K[:, 0, 0] = (2.0 / (max_x - min_x)) * scale_xyz[:, 0] + K[:, 1, 1] = (2.0 / (max_y - min_y)) * scale_xyz[:, 1] + K[:, 0, 3] = -(max_x + min_x) / (max_x - min_x) + K[:, 1, 3] = -(max_y + min_y) / (max_y - min_y) + K[:, 3, 3] = ones + + # NOTE: This maps the z coordinate to the range [0, 1] and replaces the + # the OpenGL z normalization to [-1, 1] + K[:, 2, 2] = z_sign * (1.0 / (zfar - znear)) * scale_xyz[:, 2] + K[:, 2, 3] = -znear / (zfar - znear) + + return K + + def get_projection_transform(self, **kwargs) -> Transform3d: + """ + Calculate the orthographic projection matrix. + Use column major order. + + Args: + **kwargs: parameters for the projection can be passed in to + override the default values set in __init__. + Return: + a Transform3d object which represents a batch of projection + matrices of shape (N, 4, 4) + + .. code-block:: python + + scale_x = 2 / (max_x - min_x) + scale_y = 2 / (max_y - min_y) + scale_z = 2 / (far-near) + mid_x = (max_x + min_x) / (max_x - min_x) + mix_y = (max_y + min_y) / (max_y - min_y) + mid_z = (far + near) / (far - near) + + K = [ + [scale_x, 0, 0, -mid_x], + [0, scale_y, 0, -mix_y], + [0, 0, -scale_z, -mid_z], + [0, 0, 0, 1], + ] + """ + K = kwargs.get("K", self.K) + if K is not None: + if K.shape != (self._N, 4, 4): + msg = "Expected K to have shape of (%r, 4, 4)" + raise ValueError(msg % (self._N)) + else: + K = self.compute_projection_matrix( + kwargs.get("znear", self.znear), + kwargs.get("zfar", self.zfar), + kwargs.get("max_x", self.max_x), + kwargs.get("min_x", self.min_x), + kwargs.get("max_y", self.max_y), + kwargs.get("min_y", self.min_y), + kwargs.get("scale_xyz", self.scale_xyz), + ) + + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=self.device + ) + return transform + + def unproject_points( + self, + xy_depth: torch.Tensor, + world_coordinates: bool = True, + scaled_depth_input: bool = False, + **kwargs, + ) -> torch.Tensor: + """>! + FoV cameras further allow for passing depth in world units + (`scaled_depth_input=False`) or in the [0, 1]-normalized units + (`scaled_depth_input=True`) + + Args: + scaled_depth_input: If `True`, assumes the input depth is in + the [0, 1]-normalized units. If `False` the input depth is in + the world units. + """ + + if world_coordinates: + to_ndc_transform = self.get_full_projection_transform(**kwargs.copy()) + else: + to_ndc_transform = self.get_projection_transform(**kwargs.copy()) + + if scaled_depth_input: + # the input depth is already scaled + xy_sdepth = xy_depth + else: + # we have to obtain the scaled depth first + K = self.get_projection_transform(**kwargs).get_matrix() + unsqueeze_shape = [1] * K.dim() + unsqueeze_shape[0] = K.shape[0] + mid_z = K[:, 3, 2].reshape(unsqueeze_shape) + scale_z = K[:, 2, 2].reshape(unsqueeze_shape) + scaled_depth = scale_z * xy_depth[..., 2:3] + mid_z + # cat xy and scaled depth + xy_sdepth = torch.cat((xy_depth[..., :2], scaled_depth), dim=-1) + # finally invert the transform + unprojection_transform = to_ndc_transform.inverse() + return unprojection_transform.transform_points(xy_sdepth) + + def is_perspective(self): + return False + + def in_ndc(self): + return True + + +############################################################ +# MultiView Camera Classes # +############################################################ +""" +Note that the MultiView Cameras accept parameters in NDC space. +""" + + +def SfMPerspectiveCameras( + focal_length: _FocalLengthType = 1.0, + principal_point=((0.0, 0.0),), + R: torch.Tensor = _R, + T: torch.Tensor = _T, + device: Device = "cpu", +) -> "PerspectiveCameras": + """ + SfMPerspectiveCameras has been DEPRECATED. Use PerspectiveCameras instead. + Preserving SfMPerspectiveCameras for backward compatibility. + """ + + warnings.warn( + """SfMPerspectiveCameras is deprecated, + Use PerspectiveCameras instead. + SfMPerspectiveCameras will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return PerspectiveCameras( + focal_length=focal_length, + principal_point=principal_point, + R=R, + T=T, + device=device, + ) + + +class PerspectiveCameras(CamerasBase): + """ + A class which stores a batch of parameters to generate a batch of + transformation matrices using the multi-view geometry convention for + perspective camera. + + Parameters for this camera are specified in NDC if `in_ndc` is set to True. + If parameters are specified in screen space, `in_ndc` must be set to False. + """ + + # For __getitem__ + _FIELDS = ( + "K", + "R", + "T", + "focal_length", + "principal_point", + "_in_ndc", # arg is in_ndc but attribute set as _in_ndc + "image_size", + ) + + _SHARED_FIELDS = ("_in_ndc",) + + def __init__( + self, + focal_length: _FocalLengthType = 1.0, + principal_point=((0.0, 0.0),), + R: torch.Tensor = _R, + T: torch.Tensor = _T, + K: Optional[torch.Tensor] = None, + device: Device = "cpu", + in_ndc: bool = True, + image_size: Optional[Union[List, Tuple, torch.Tensor]] = None, + ) -> None: + """ + + Args: + focal_length: Focal length of the camera in world units. + A tensor of shape (N, 1) or (N, 2) for + square and non-square pixels respectively. + principal_point: xy coordinates of the center of + the principal point of the camera in pixels. + A tensor of shape (N, 2). + in_ndc: True if camera parameters are specified in NDC. + If camera parameters are in screen space, it must + be set to False. + R: Rotation matrix of shape (N, 3, 3) + T: Translation matrix of shape (N, 3) + K: (optional) A calibration matrix of shape (N, 4, 4) + If provided, don't need focal_length, principal_point + image_size: (height, width) of image size. + A tensor of shape (N, 2) or a list/tuple. Required for screen cameras. + device: torch.device or string + """ + # The initializer formats all inputs to torch tensors and broadcasts + # all the inputs to have the same batch dimension where necessary. + kwargs = {"image_size": image_size} if image_size is not None else {} + super().__init__( + device=device, + focal_length=focal_length, + principal_point=principal_point, + R=R, + T=T, + K=K, + _in_ndc=in_ndc, + **kwargs, # pyre-ignore + ) + if image_size is not None: + if (self.image_size < 1).any(): # pyre-ignore + raise ValueError("Image_size provided has invalid values") + else: + self.image_size = None + + # When focal length is provided as one value, expand to + # create (N, 2) shape tensor + if self.focal_length.ndim == 1: # (N,) + self.focal_length = self.focal_length[:, None] # (N, 1) + self.focal_length = self.focal_length.expand(-1, 2) # (N, 2) + + def get_projection_transform(self, **kwargs) -> Transform3d: + """ + Calculate the projection matrix using the + multi-view geometry convention. + + Args: + **kwargs: parameters for the projection can be passed in as keyword + arguments to override the default values set in __init__. + + Returns: + A `Transform3d` object with a batch of `N` projection transforms. + + .. code-block:: python + + fx = focal_length[:, 0] + fy = focal_length[:, 1] + px = principal_point[:, 0] + py = principal_point[:, 1] + + K = [ + [fx, 0, px, 0], + [0, fy, py, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + ] + """ + K = kwargs.get("K", self.K) + if K is not None: + if K.shape != (self._N, 4, 4): + msg = "Expected K to have shape of (%r, 4, 4)" + raise ValueError(msg % (self._N)) + else: + K = _get_sfm_calibration_matrix( + self._N, + self.device, + kwargs.get("focal_length", self.focal_length), + kwargs.get("principal_point", self.principal_point), + orthographic=False, + ) + + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=self.device + ) + return transform + + def unproject_points( + self, + xy_depth: torch.Tensor, + world_coordinates: bool = True, + from_ndc: bool = False, + **kwargs, + ) -> torch.Tensor: + """ + Args: + from_ndc: If `False` (default), assumes xy part of input is in + NDC space if self.in_ndc(), otherwise in screen space. If + `True`, assumes xy is in NDC space even if the camera + is defined in screen space. + """ + if world_coordinates: + to_camera_transform = self.get_full_projection_transform(**kwargs) + else: + to_camera_transform = self.get_projection_transform(**kwargs) + if from_ndc: + to_camera_transform = to_camera_transform.compose( + self.get_ndc_camera_transform() + ) + + unprojection_transform = to_camera_transform.inverse() + xy_inv_depth = torch.cat( + # pyre-fixme[6]: For 1st argument expected `Union[List[Tensor], + # tuple[Tensor, ...]]` but got `Tuple[Tensor, float]`. + # pyre-fixme[58]: `/` is not supported for operand types `float` and + # `Tensor`. + (xy_depth[..., :2], 1.0 / xy_depth[..., 2:3]), + dim=-1, # type: ignore + ) + return unprojection_transform.transform_points(xy_inv_depth) + + def get_principal_point(self, **kwargs) -> torch.Tensor: + """ + Return the camera's principal point + + Args: + **kwargs: parameters for the camera extrinsics can be passed in + as keyword arguments to override the default values + set in __init__. + """ + proj_mat = self.get_projection_transform(**kwargs).get_matrix() + return proj_mat[:, 2, :2] + + def get_ndc_camera_transform(self, **kwargs) -> Transform3d: + """ + Returns the transform from camera projection space (screen or NDC) to NDC space. + If the camera is defined already in NDC space, the transform is identity. + For cameras defined in screen space, we adjust the principal point computation + which is defined in the image space (commonly) and scale the points to NDC space. + + This transform leaves the depth unchanged. + + Important: This transforms assumes PyTorch3D conventions for the input points, + i.e. +X left, +Y up. + """ + if self.in_ndc(): + ndc_transform = Transform3d(device=self.device, dtype=torch.float32) + else: + # when cameras are defined in screen/image space, the principal point is + # provided in the (+X right, +Y down), aka image, coordinate system. + # Since input points are defined in the PyTorch3D system (+X left, +Y up), + # we need to adjust for the principal point transform. + pr_point_fix = torch.zeros( + (self._N, 4, 4), device=self.device, dtype=torch.float32 + ) + pr_point_fix[:, 0, 0] = 1.0 + pr_point_fix[:, 1, 1] = 1.0 + pr_point_fix[:, 2, 2] = 1.0 + pr_point_fix[:, 3, 3] = 1.0 + pr_point_fix[:, :2, 3] = -2.0 * self.get_principal_point(**kwargs) + pr_point_fix_transform = Transform3d( + matrix=pr_point_fix.transpose(1, 2).contiguous(), device=self.device + ) + image_size = kwargs.get("image_size", self.get_image_size()) + screen_to_ndc_transform = get_screen_to_ndc_transform( + self, with_xyflip=False, image_size=image_size + ) + ndc_transform = pr_point_fix_transform.compose(screen_to_ndc_transform) + + return ndc_transform + + def is_perspective(self): + return True + + def in_ndc(self): + return self._in_ndc + + +def SfMOrthographicCameras( + focal_length: _FocalLengthType = 1.0, + principal_point=((0.0, 0.0),), + R: torch.Tensor = _R, + T: torch.Tensor = _T, + device: Device = "cpu", +) -> "OrthographicCameras": + """ + SfMOrthographicCameras has been DEPRECATED. Use OrthographicCameras instead. + Preserving SfMOrthographicCameras for backward compatibility. + """ + + warnings.warn( + """SfMOrthographicCameras is deprecated, + Use OrthographicCameras instead. + SfMOrthographicCameras will be removed in future releases.""", + PendingDeprecationWarning, + ) + + return OrthographicCameras( + focal_length=focal_length, + principal_point=principal_point, + R=R, + T=T, + device=device, + ) + + +class OrthographicCameras(CamerasBase): + """ + A class which stores a batch of parameters to generate a batch of + transformation matrices using the multi-view geometry convention for + orthographic camera. + + Parameters for this camera are specified in NDC if `in_ndc` is set to True. + If parameters are specified in screen space, `in_ndc` must be set to False. + """ + + # For __getitem__ + _FIELDS = ( + "K", + "R", + "T", + "focal_length", + "principal_point", + "_in_ndc", + "image_size", + ) + + _SHARED_FIELDS = ("_in_ndc",) + + def __init__( + self, + focal_length: _FocalLengthType = 1.0, + principal_point=((0.0, 0.0),), + R: torch.Tensor = _R, + T: torch.Tensor = _T, + K: Optional[torch.Tensor] = None, + device: Device = "cpu", + in_ndc: bool = True, + image_size: Optional[Union[List, Tuple, torch.Tensor]] = None, + ) -> None: + """ + + Args: + focal_length: Focal length of the camera in world units. + A tensor of shape (N, 1) or (N, 2) for + square and non-square pixels respectively. + principal_point: xy coordinates of the center of + the principal point of the camera in pixels. + A tensor of shape (N, 2). + in_ndc: True if camera parameters are specified in NDC. + If False, then camera parameters are in screen space. + R: Rotation matrix of shape (N, 3, 3) + T: Translation matrix of shape (N, 3) + K: (optional) A calibration matrix of shape (N, 4, 4) + If provided, don't need focal_length, principal_point, image_size + image_size: (height, width) of image size. + A tensor of shape (N, 2) or list/tuple. Required for screen cameras. + device: torch.device or string + """ + # The initializer formats all inputs to torch tensors and broadcasts + # all the inputs to have the same batch dimension where necessary. + kwargs = {"image_size": image_size} if image_size is not None else {} + super().__init__( + device=device, + focal_length=focal_length, + principal_point=principal_point, + R=R, + T=T, + K=K, + _in_ndc=in_ndc, + **kwargs, # pyre-ignore + ) + if image_size is not None: + if (self.image_size < 1).any(): # pyre-ignore + raise ValueError("Image_size provided has invalid values") + else: + self.image_size = None + + # When focal length is provided as one value, expand to + # create (N, 2) shape tensor + if self.focal_length.ndim == 1: # (N,) + self.focal_length = self.focal_length[:, None] # (N, 1) + self.focal_length = self.focal_length.expand(-1, 2) # (N, 2) + + def get_projection_transform(self, **kwargs) -> Transform3d: + """ + Calculate the projection matrix using + the multi-view geometry convention. + + Args: + **kwargs: parameters for the projection can be passed in as keyword + arguments to override the default values set in __init__. + + Returns: + A `Transform3d` object with a batch of `N` projection transforms. + + .. code-block:: python + + fx = focal_length[:,0] + fy = focal_length[:,1] + px = principal_point[:,0] + py = principal_point[:,1] + + K = [ + [fx, 0, 0, px], + [0, fy, 0, py], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + """ + K = kwargs.get("K", self.K) + if K is not None: + if K.shape != (self._N, 4, 4): + msg = "Expected K to have shape of (%r, 4, 4)" + raise ValueError(msg % (self._N)) + else: + K = _get_sfm_calibration_matrix( + self._N, + self.device, + kwargs.get("focal_length", self.focal_length), + kwargs.get("principal_point", self.principal_point), + orthographic=True, + ) + + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=self.device + ) + return transform + + def unproject_points( + self, + xy_depth: torch.Tensor, + world_coordinates: bool = True, + from_ndc: bool = False, + **kwargs, + ) -> torch.Tensor: + """ + Args: + from_ndc: If `False` (default), assumes xy part of input is in + NDC space if self.in_ndc(), otherwise in screen space. If + `True`, assumes xy is in NDC space even if the camera + is defined in screen space. + """ + if world_coordinates: + to_camera_transform = self.get_full_projection_transform(**kwargs) + else: + to_camera_transform = self.get_projection_transform(**kwargs) + if from_ndc: + to_camera_transform = to_camera_transform.compose( + self.get_ndc_camera_transform() + ) + + unprojection_transform = to_camera_transform.inverse() + return unprojection_transform.transform_points(xy_depth) + + def get_principal_point(self, **kwargs) -> torch.Tensor: + """ + Return the camera's principal point + + Args: + **kwargs: parameters for the camera extrinsics can be passed in + as keyword arguments to override the default values + set in __init__. + """ + proj_mat = self.get_projection_transform(**kwargs).get_matrix() + return proj_mat[:, 3, :2] + + def get_ndc_camera_transform(self, **kwargs) -> Transform3d: + """ + Returns the transform from camera projection space (screen or NDC) to NDC space. + If the camera is defined already in NDC space, the transform is identity. + For cameras defined in screen space, we adjust the principal point computation + which is defined in the image space (commonly) and scale the points to NDC space. + + Important: This transforms assumes PyTorch3D conventions for the input points, + i.e. +X left, +Y up. + """ + if self.in_ndc(): + ndc_transform = Transform3d(device=self.device, dtype=torch.float32) + else: + # when cameras are defined in screen/image space, the principal point is + # provided in the (+X right, +Y down), aka image, coordinate system. + # Since input points are defined in the PyTorch3D system (+X left, +Y up), + # we need to adjust for the principal point transform. + pr_point_fix = torch.zeros( + (self._N, 4, 4), device=self.device, dtype=torch.float32 + ) + pr_point_fix[:, 0, 0] = 1.0 + pr_point_fix[:, 1, 1] = 1.0 + pr_point_fix[:, 2, 2] = 1.0 + pr_point_fix[:, 3, 3] = 1.0 + pr_point_fix[:, :2, 3] = -2.0 * self.get_principal_point(**kwargs) + pr_point_fix_transform = Transform3d( + matrix=pr_point_fix.transpose(1, 2).contiguous(), device=self.device + ) + image_size = kwargs.get("image_size", self.get_image_size()) + screen_to_ndc_transform = get_screen_to_ndc_transform( + self, with_xyflip=False, image_size=image_size + ) + ndc_transform = pr_point_fix_transform.compose(screen_to_ndc_transform) + + return ndc_transform + + def is_perspective(self): + return False + + def in_ndc(self): + return self._in_ndc + + +################################################ +# Helper functions for cameras # +################################################ + + +def _get_sfm_calibration_matrix( + N: int, + device: Device, + focal_length, + principal_point, + orthographic: bool = False, +) -> torch.Tensor: + """ + Returns a calibration matrix of a perspective/orthographic camera. + + Args: + N: Number of cameras. + focal_length: Focal length of the camera. + principal_point: xy coordinates of the center of + the principal point of the camera in pixels. + orthographic: Boolean specifying if the camera is orthographic or not + + The calibration matrix `K` is set up as follows: + + .. code-block:: python + + fx = focal_length[:,0] + fy = focal_length[:,1] + px = principal_point[:,0] + py = principal_point[:,1] + + for orthographic==True: + K = [ + [fx, 0, 0, px], + [0, fy, 0, py], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + else: + K = [ + [fx, 0, px, 0], + [0, fy, py, 0], + [0, 0, 0, 1], + [0, 0, 1, 0], + ] + + Returns: + A calibration matrix `K` of the SfM-conventioned camera + of shape (N, 4, 4). + """ + + if not torch.is_tensor(focal_length): + focal_length = torch.tensor(focal_length, device=device) + + if focal_length.ndim in (0, 1) or focal_length.shape[1] == 1: + fx = fy = focal_length + else: + fx, fy = focal_length.unbind(1) + + if not torch.is_tensor(principal_point): + principal_point = torch.tensor(principal_point, device=device) + + px, py = principal_point.unbind(1) + + K = fx.new_zeros(N, 4, 4) + K[:, 0, 0] = fx + K[:, 1, 1] = fy + if orthographic: + K[:, 0, 3] = px + K[:, 1, 3] = py + K[:, 2, 2] = 1.0 + K[:, 3, 3] = 1.0 + else: + K[:, 0, 2] = px + K[:, 1, 2] = py + K[:, 3, 2] = 1.0 + K[:, 2, 3] = 1.0 + + return K + + +################################################ +# Helper functions for world to view transforms +################################################ + + +def get_world_to_view_transform( + R: torch.Tensor = _R, T: torch.Tensor = _T +) -> Transform3d: + """ + This function returns a Transform3d representing the transformation + matrix to go from world space to view space by applying a rotation and + a translation. + + PyTorch3D uses the same convention as Hartley & Zisserman. + I.e., for camera extrinsic parameters R (rotation) and T (translation), + we map a 3D point `X_world` in world coordinates to + a point `X_cam` in camera coordinates with: + `X_cam = X_world R + T` + + Args: + R: (N, 3, 3) matrix representing the rotation. + T: (N, 3) matrix representing the translation. + + Returns: + a Transform3d object which represents the composed RT transformation. + + """ + # TODO: also support the case where RT is specified as one matrix + # of shape (N, 4, 4). + + if T.shape[0] != R.shape[0]: + msg = "Expected R, T to have the same batch dimension; got %r, %r" + raise ValueError(msg % (R.shape[0], T.shape[0])) + if T.dim() != 2 or T.shape[1:] != (3,): + msg = "Expected T to have shape (N, 3); got %r" + raise ValueError(msg % repr(T.shape)) + if R.dim() != 3 or R.shape[1:] != (3, 3): + msg = "Expected R to have shape (N, 3, 3); got %r" + raise ValueError(msg % repr(R.shape)) + + # Create a Transform3d object + T_ = Translate(T, device=T.device) + R_ = Rotate(R, device=R.device) + return R_.compose(T_) + + +def camera_position_from_spherical_angles( + distance: float, + elevation: float, + azimuth: float, + degrees: bool = True, + device: Device = "cpu", +) -> torch.Tensor: + """ + Calculate the location of the camera based on the distance away from + the target point, the elevation and azimuth angles. + + Args: + distance: distance of the camera from the object. + elevation, azimuth: angles. + The inputs distance, elevation and azimuth can be one of the following + - Python scalar + - Torch scalar + - Torch tensor of shape (N) or (1) + degrees: bool, whether the angles are specified in degrees or radians. + device: str or torch.device, device for new tensors to be placed on. + + The vectors are broadcast against each other so they all have shape (N, 1). + + Returns: + camera_position: (N, 3) xyz location of the camera. + """ + broadcasted_args = convert_to_tensors_and_broadcast( + distance, elevation, azimuth, device=device + ) + dist, elev, azim = broadcasted_args + if degrees: + elev = math.pi / 180.0 * elev + azim = math.pi / 180.0 * azim + x = dist * torch.cos(elev) * torch.sin(azim) + y = dist * torch.sin(elev) + z = dist * torch.cos(elev) * torch.cos(azim) + camera_position = torch.stack([x, y, z], dim=1) + if camera_position.dim() == 0: + camera_position = camera_position.view(1, -1) # add batch dim. + return camera_position.view(-1, 3) + + +def look_at_rotation( + camera_position, at=((0, 0, 0),), up=((0, 1, 0),), device: Device = "cpu" +) -> torch.Tensor: + """ + This function takes a vector 'camera_position' which specifies the location + of the camera in world coordinates and two vectors `at` and `up` which + indicate the position of the object and the up directions of the world + coordinate system respectively. The object is assumed to be centered at + the origin. + + The output is a rotation matrix representing the transformation + from world coordinates -> view coordinates. + + Args: + camera_position: position of the camera in world coordinates + at: position of the object in world coordinates + up: vector specifying the up direction in the world coordinate frame. + + The inputs camera_position, at and up can each be a + - 3 element tuple/list + - torch tensor of shape (1, 3) + - torch tensor of shape (N, 3) + + The vectors are broadcast against each other so they all have shape (N, 3). + + Returns: + R: (N, 3, 3) batched rotation matrices + """ + # Format input and broadcast + broadcasted_args = convert_to_tensors_and_broadcast( + camera_position, at, up, device=device + ) + camera_position, at, up = broadcasted_args + for t, n in zip([camera_position, at, up], ["camera_position", "at", "up"]): + if t.shape[-1] != 3: + msg = "Expected arg %s to have shape (N, 3); got %r" + raise ValueError(msg % (n, t.shape)) + z_axis = F.normalize(at - camera_position, eps=1e-5) + x_axis = F.normalize(torch.cross(up, z_axis, dim=1), eps=1e-5) + y_axis = F.normalize(torch.cross(z_axis, x_axis, dim=1), eps=1e-5) + is_close = torch.isclose(x_axis, torch.tensor(0.0), atol=5e-3).all( + dim=1, keepdim=True + ) + if is_close.any(): + replacement = F.normalize(torch.cross(y_axis, z_axis, dim=1), eps=1e-5) + x_axis = torch.where(is_close, replacement, x_axis) + R = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1) + return R.transpose(1, 2) + + +def look_at_view_transform( + dist: _BatchFloatType = 1.0, + elev: _BatchFloatType = 0.0, + azim: _BatchFloatType = 0.0, + degrees: bool = True, + eye: Optional[Union[Sequence, torch.Tensor]] = None, + at=((0, 0, 0),), # (1, 3) + up=((0, 1, 0),), # (1, 3) + device: Device = "cpu", +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + This function returns a rotation and translation matrix + to apply the 'Look At' transformation from world -> view coordinates [0]. + + Args: + dist: distance of the camera from the object + elev: angle in degrees or radians. This is the angle between the + vector from the object to the camera, and the horizontal plane y = 0 (xz-plane). + azim: angle in degrees or radians. The vector from the object to + the camera is projected onto a horizontal plane y = 0. + azim is the angle between the projected vector and a + reference vector at (0, 0, 1) on the reference plane (the horizontal plane). + dist, elev and azim can be of shape (1), (N). + degrees: boolean flag to indicate if the elevation and azimuth + angles are specified in degrees or radians. + eye: the position of the camera(s) in world coordinates. If eye is not + None, it will override the camera position derived from dist, elev, azim. + up: the direction of the x axis in the world coordinate system. + at: the position of the object(s) in world coordinates. + eye, up and at can be of shape (1, 3) or (N, 3). + + Returns: + 2-element tuple containing + + - **R**: the rotation to apply to the points to align with the camera. + - **T**: the translation to apply to the points to align with the camera. + + References: + [0] https://www.scratchapixel.com + """ + + if eye is not None: + broadcasted_args = convert_to_tensors_and_broadcast(eye, at, up, device=device) + eye, at, up = broadcasted_args + C = eye + else: + broadcasted_args = convert_to_tensors_and_broadcast( + dist, elev, azim, at, up, device=device + ) + dist, elev, azim, at, up = broadcasted_args + C = ( + camera_position_from_spherical_angles( + dist, elev, azim, degrees=degrees, device=device + ) + + at + ) + + R = look_at_rotation(C, at, up, device=device) + T = -torch.bmm(R.transpose(1, 2), C[:, :, None])[:, :, 0] + return R, T + + +def get_ndc_to_screen_transform( + cameras, + with_xyflip: bool = False, + image_size: Optional[Union[List, Tuple, torch.Tensor]] = None, +) -> Transform3d: + """ + PyTorch3D NDC to screen conversion. + Conversion from PyTorch3D's NDC space (+X left, +Y up) to screen/image space + (+X right, +Y down, origin top left). + + Args: + cameras + with_xyflip: flips x- and y-axis if set to True. + Optional kwargs: + image_size: ((height, width),) specifying the height, width + of the image. If not provided, it reads it from cameras. + + We represent the NDC to screen conversion as a Transform3d + with projection matrix + + K = [ + [s, 0, 0, cx], + [0, s, 0, cy], + [0, 0, 1, 0], + [0, 0, 0, 1], + ] + + """ + # We require the image size, which is necessary for the transform + if image_size is None: + msg = "For NDC to screen conversion, image_size=(height, width) needs to be specified." + raise ValueError(msg) + + K = torch.zeros((cameras._N, 4, 4), device=cameras.device, dtype=torch.float32) + if not torch.is_tensor(image_size): + image_size = torch.tensor(image_size, device=cameras.device) + image_size = image_size.view(-1, 2) # of shape (1 or B)x2 + height, width = image_size.unbind(1) + + # For non square images, we scale the points such that smallest side + # has range [-1, 1] and the largest side has range [-u, u], with u > 1. + # This convention is consistent with the PyTorch3D renderer + scale = (image_size.min(dim=1).values - 0.0) / 2.0 + + K[:, 0, 0] = scale + K[:, 1, 1] = scale + K[:, 0, 3] = -1.0 * (width - 0.0) / 2.0 + K[:, 1, 3] = -1.0 * (height - 0.0) / 2.0 + K[:, 2, 2] = 1.0 + K[:, 3, 3] = 1.0 + + # Transpose the projection matrix as PyTorch3D transforms use row vectors. + transform = Transform3d( + matrix=K.transpose(1, 2).contiguous(), device=cameras.device + ) + + if with_xyflip: + # flip x, y axis + xyflip = torch.eye(4, device=cameras.device, dtype=torch.float32) + xyflip[0, 0] = -1.0 + xyflip[1, 1] = -1.0 + xyflip = xyflip.view(1, 4, 4).expand(cameras._N, -1, -1) + xyflip_transform = Transform3d( + matrix=xyflip.transpose(1, 2).contiguous(), device=cameras.device + ) + transform = transform.compose(xyflip_transform) + return transform + + +def get_screen_to_ndc_transform( + cameras, + with_xyflip: bool = False, + image_size: Optional[Union[List, Tuple, torch.Tensor]] = None, +) -> Transform3d: + """ + Screen to PyTorch3D NDC conversion. + Conversion from screen/image space (+X right, +Y down, origin top left) + to PyTorch3D's NDC space (+X left, +Y up). + + Args: + cameras + with_xyflip: flips x- and y-axis if set to True. + Optional kwargs: + image_size: ((height, width),) specifying the height, width + of the image. If not provided, it reads it from cameras. + + We represent the screen to NDC conversion as a Transform3d + with projection matrix + + K = [ + [1/s, 0, 0, cx/s], + [ 0, 1/s, 0, cy/s], + [ 0, 0, 1, 0], + [ 0, 0, 0, 1], + ] + + """ + transform = get_ndc_to_screen_transform( + cameras, + with_xyflip=with_xyflip, + image_size=image_size, + ).inverse() + return transform + + +def try_get_projection_transform( + cameras: CamerasBase, cameras_kwargs: Dict[str, Any] +) -> Optional[Transform3d]: + """ + Try block to get projection transform from cameras and cameras_kwargs. + + Args: + cameras: cameras instance, can be linear cameras or nonliear cameras + cameras_kwargs: camera parameters to be passed to cameras + + Returns: + If the camera implemented projection_transform, return the + projection transform; Otherwise, return None + """ + + transform = None + try: + transform = cameras.get_projection_transform(**cameras_kwargs) + except NotImplementedError: + pass + return transform diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bf8ced062dafb53a046e673e7e1d1c4bd5a85061 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/__init__.py @@ -0,0 +1,40 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .clip import ( + clip_faces, + ClipFrustum, + ClippedFaces, + convert_clipped_rasterization_to_original_faces, +) + +from .rasterize_meshes import rasterize_meshes +from .rasterizer import MeshRasterizer, RasterizationSettings +from .renderer import MeshRenderer, MeshRendererWithFragments +from .shader import ( # DEPRECATED + BlendParams, + HardFlatShader, + HardGouraudShader, + HardPhongShader, + SoftGouraudShader, + SoftPhongShader, + SoftSilhouetteShader, + SplatterPhongShader, + TexturedSoftPhongShader, +) +from .shading import gouraud_shading, phong_shading +from .textures import ( # DEPRECATED + Textures, + TexturesAtlas, + TexturesBase, + TexturesUV, + TexturesVertex, +) + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/clip.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/clip.py new file mode 100644 index 0000000000000000000000000000000000000000..3c4b7cca349e3fba1cb258c55d8ab5f5b15bdefd --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/clip.py @@ -0,0 +1,728 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Any, List, Optional, Tuple + +import torch + + +""" +Mesh clipping is done before rasterization and is implemented using 4 cases +(these will be referred to throughout the functions below) + +Case 1: the triangle is completely in front of the clipping plane (it is left + unchanged) +Case 2: the triangle is completely behind the clipping plane (it is culled) +Case 3: the triangle has exactly two vertices behind the clipping plane (it is + clipped into a smaller triangle) +Case 4: the triangle has exactly one vertex behind the clipping plane (it is clipped + into a smaller quadrilateral and divided into two triangular faces) + +After rasterization, the Fragments from the clipped/modified triangles +are mapped back to the triangles in the original mesh. The indices, +barycentric coordinates and distances are all relative to original mesh triangles. + +NOTE: It is assumed that all z-coordinates are in world coordinates (not NDC +coordinates), while x and y coordinates may be in NDC/screen coordinates +(i.e after applying a projective transform e.g. cameras.transform_points(points)). +""" + + +class ClippedFaces: + """ + Helper class to store the data for the clipped version of a Meshes object + (face_verts, mesh_to_face_first_idx, num_faces_per_mesh) along with + conversion information (faces_clipped_to_unclipped_idx, barycentric_conversion, + faces_clipped_to_conversion_idx, clipped_faces_neighbor_idx) required to convert + barycentric coordinates from rasterization of the clipped Meshes to barycentric + coordinates in terms of the unclipped Meshes. + + Args: + face_verts: FloatTensor of shape (F_clipped, 3, 3) giving the verts of + each of the clipped faces + mesh_to_face_first_idx: an tensor of shape (N,), where N is the number of meshes + in the batch. The ith element stores the index into face_verts + of the first face of the ith mesh. + num_faces_per_mesh: a tensor of shape (N,) storing the number of faces in each mesh. + faces_clipped_to_unclipped_idx: (F_clipped,) shaped LongTensor mapping each clipped + face back to the face in faces_unclipped (i.e. the faces in the original meshes + obtained using meshes.faces_packed()) + barycentric_conversion: (T, 3, 3) FloatTensor, where barycentric_conversion[i, :, k] + stores the barycentric weights in terms of the world coordinates of the original + (big) unclipped triangle for the kth vertex in the clipped (small) triangle. + If the rasterizer then expresses some NDC coordinate in terms of barycentric + world coordinates for the clipped (small) triangle as alpha_clipped[i,:], + alpha_unclipped[i, :] = barycentric_conversion[i, :, :]*alpha_clipped[i, :] + faces_clipped_to_conversion_idx: (F_clipped,) shaped LongTensor mapping each clipped + face to the applicable row of barycentric_conversion (or set to -1 if conversion is + not needed). + clipped_faces_neighbor_idx: LongTensor of shape (F_clipped,) giving the index of the + neighboring face for each case 4 triangle. e.g. for a case 4 face with f split + into two triangles (t1, t2): clipped_faces_neighbor_idx[t1_idx] = t2_idx. + Faces which are not clipped and subdivided are set to -1 (i.e cases 1/2/3). + """ + + __slots__ = [ + "face_verts", + "mesh_to_face_first_idx", + "num_faces_per_mesh", + "faces_clipped_to_unclipped_idx", + "barycentric_conversion", + "faces_clipped_to_conversion_idx", + "clipped_faces_neighbor_idx", + ] + + def __init__( + self, + face_verts: torch.Tensor, + mesh_to_face_first_idx: torch.Tensor, + num_faces_per_mesh: torch.Tensor, + faces_clipped_to_unclipped_idx: Optional[torch.Tensor] = None, + barycentric_conversion: Optional[torch.Tensor] = None, + faces_clipped_to_conversion_idx: Optional[torch.Tensor] = None, + clipped_faces_neighbor_idx: Optional[torch.Tensor] = None, + ) -> None: + self.face_verts = face_verts + self.mesh_to_face_first_idx = mesh_to_face_first_idx + self.num_faces_per_mesh = num_faces_per_mesh + self.faces_clipped_to_unclipped_idx = faces_clipped_to_unclipped_idx + self.barycentric_conversion = barycentric_conversion + self.faces_clipped_to_conversion_idx = faces_clipped_to_conversion_idx + self.clipped_faces_neighbor_idx = clipped_faces_neighbor_idx + + +class ClipFrustum: + """ + Helper class to store the information needed to represent a view frustum + (left, right, top, bottom, znear, zfar), which is used to clip or cull triangles. + Values left as None mean that culling should not be performed for that axis. + The parameters perspective_correct, cull, and z_clip_value are used to define + behavior for clipping triangles to the frustum. + + Args: + left: NDC coordinate of the left clipping plane (along x axis) + right: NDC coordinate of the right clipping plane (along x axis) + top: NDC coordinate of the top clipping plane (along y axis) + bottom: NDC coordinate of the bottom clipping plane (along y axis) + znear: world space z coordinate of the near clipping plane + zfar: world space z coordinate of the far clipping plane + perspective_correct: should be set to True for a perspective camera + cull: if True, triangles outside the frustum should be culled + z_clip_value: if not None, then triangles should be clipped (possibly into + smaller triangles) such that z >= z_clip_value. This avoids projections + that go to infinity as z->0 + """ + + __slots__ = [ + "left", + "right", + "top", + "bottom", + "znear", + "zfar", + "perspective_correct", + "cull", + "z_clip_value", + ] + + def __init__( + self, + left: Optional[float] = None, + right: Optional[float] = None, + top: Optional[float] = None, + bottom: Optional[float] = None, + znear: Optional[float] = None, + zfar: Optional[float] = None, + perspective_correct: bool = False, + cull: bool = True, + z_clip_value: Optional[float] = None, + ) -> None: + self.left = left + self.right = right + self.top = top + self.bottom = bottom + self.znear = znear + self.zfar = zfar + self.perspective_correct = perspective_correct + self.cull = cull + self.z_clip_value = z_clip_value + + +def _get_culled_faces(face_verts: torch.Tensor, frustum: ClipFrustum) -> torch.Tensor: + """ + Helper function used to find all the faces in Meshes which are + fully outside the view frustum. A face is culled if all 3 vertices are outside + the same axis of the view frustum. + + Args: + face_verts: An (F,3,3) tensor, where F is the number of faces in + the packed representation of Meshes. The 2nd dimension represents the 3 vertices + of a triangle, and the 3rd dimension stores the xyz locations of each + vertex. + frustum: An instance of the ClipFrustum class with the information on the + position of the clipping planes. + + Returns: + faces_culled: boolean tensor of size F specifying whether or not each face should be + culled. + """ + clipping_planes = ( + (frustum.left, 0, "<"), + (frustum.right, 0, ">"), + (frustum.top, 1, "<"), + (frustum.bottom, 1, ">"), + (frustum.znear, 2, "<"), + (frustum.zfar, 2, ">"), + ) + faces_culled = torch.zeros( + [face_verts.shape[0]], dtype=torch.bool, device=face_verts.device + ) + for plane in clipping_planes: + clip_value, axis, op = plane + # If clip_value is None then don't clip along that plane + if frustum.cull and clip_value is not None: + if op == "<": + verts_clipped = face_verts[:, axis] < clip_value + else: + verts_clipped = face_verts[:, axis] > clip_value + + # If all verts are clipped then face is outside the frustum + faces_culled |= verts_clipped.sum(1) == 3 + + return faces_culled + + +def _find_verts_intersecting_clipping_plane( + face_verts: torch.Tensor, + p1_face_ind: torch.Tensor, + clip_value: float, + perspective_correct: bool, +) -> Tuple[Tuple[Any, Any, Any, Any, Any], List[Any]]: + r""" + Helper function to find the vertices used to form a new triangle for case 3/case 4 faces. + + Given a list of triangles that are already known to intersect the clipping plane, + solve for the two vertices p4 and p5 where the edges of the triangle intersects the + clipping plane. + + p1 + /\ + / \ + / t \ + _____________p4/______\p5__________ clip_value + / \ + /____ \ + p2 ---____\p3 + + Args: + face_verts: An (F,3,3) tensor, where F is the number of faces in + the packed representation of the Meshes, the 2nd dimension represents + the 3 vertices of the face, and the 3rd dimension stores the xyz locations of each + vertex. The z-coordinates must be represented in world coordinates, while + the xy-coordinates may be in NDC/screen coordinates (i.e. after projection). + p1_face_ind: A tensor of shape (N,) with values in the range of 0 to 2. In each + case 3/case 4 triangle, two vertices are on the same side of the + clipping plane and the 3rd is on the other side. p1_face_ind stores the index of + the vertex that is not on the same side as any other vertex in the triangle. + clip_value: Float, the z-value defining where to clip the triangle. + perspective_correct: Bool, Should be set to true if a perspective camera was + used and xy-coordinates of face_verts_unclipped are in NDC/screen coordinates. + + Returns: + A 2-tuple + p: (p1, p2, p3, p4, p5)) + p_barycentric (p1_bary, p2_bary, p3_bary, p4_bary, p5_bary) + + Each of p1...p5 is an (F,3) tensor of the xyz locations of the 5 points in the + diagram above for case 3/case 4 faces. Each p1_bary...p5_bary is an (F, 3) tensor + storing the barycentric weights used to encode p1...p5 in terms of the the original + unclipped triangle. + """ + + # Let T be number of triangles in face_verts (note that these correspond to the subset + # of case 1 or case 2 triangles). p1_face_ind, p2_face_ind, and p3_face_ind are (T) + # tensors with values in the range of 0 to 2. p1_face_ind stores the index of the + # vertex that is not on the same side as any other vertex in the triangle, and + # p2_face_ind and p3_face_ind are the indices of the other two vertices preserving + # the same counterclockwise or clockwise ordering + T = face_verts.shape[0] + p2_face_ind = torch.remainder(p1_face_ind + 1, 3) + p3_face_ind = torch.remainder(p1_face_ind + 2, 3) + + # p1, p2, p3 are (T, 3) tensors storing the corresponding (x, y, z) coordinates + # of p1_face_ind, p2_face_ind, p3_face_ind + p1 = face_verts.gather(1, p1_face_ind[:, None, None].expand(-1, -1, 3)).squeeze(1) + p2 = face_verts.gather(1, p2_face_ind[:, None, None].expand(-1, -1, 3)).squeeze(1) + p3 = face_verts.gather(1, p3_face_ind[:, None, None].expand(-1, -1, 3)).squeeze(1) + + ################################## + # Solve for intersection point p4 + ################################## + + # p4 is a (T, 3) tensor is the point on the segment between p1 and p2 that + # intersects the clipping plane. + # Solve for the weight w2 such that p1.z*(1-w2) + p2.z*w2 = clip_value. + # Then interpolate p4 = p1*(1-w2) + p2*w2 where it is assumed that z-coordinates + # are expressed in world coordinates (since we want to clip z in world coordinates). + w2 = (p1[:, 2] - clip_value) / (p1[:, 2] - p2[:, 2]) + p4 = p1 * (1 - w2[:, None]) + p2 * w2[:, None] + if perspective_correct: + # It is assumed that all z-coordinates are in world coordinates (not NDC + # coordinates), while x and y coordinates may be in NDC/screen coordinates. + # If x and y are in NDC/screen coordinates and a projective transform was used + # in a perspective camera, then we effectively want to: + # 1. Convert back to world coordinates (by multiplying by z) + # 2. Interpolate using w2 + # 3. Convert back to NDC/screen coordinates (by dividing by the new z=clip_value) + p1_world = p1[:, :2] * p1[:, 2:3] + p2_world = p2[:, :2] * p2[:, 2:3] + p4[:, :2] = (p1_world * (1 - w2[:, None]) + p2_world * w2[:, None]) / clip_value + + ################################## + # Solve for intersection point p5 + ################################## + + # p5 is a (T, 3) tensor representing the point on the segment between p1 and p3 that + # intersects the clipping plane. + # Solve for the weight w3 such that p1.z * (1-w3) + p2.z * w3 = clip_value, + # and then interpolate p5 = p1 * (1-w3) + p3 * w3 + w3 = (p1[:, 2] - clip_value) / (p1[:, 2] - p3[:, 2]) + w3 = w3.detach() + p5 = p1 * (1 - w3[:, None]) + p3 * w3[:, None] + if perspective_correct: + # Again if using a perspective camera, convert back to world coordinates + # interpolate and convert back + p1_world = p1[:, :2] * p1[:, 2:3] + p3_world = p3[:, :2] * p3[:, 2:3] + p5[:, :2] = (p1_world * (1 - w3[:, None]) + p3_world * w3[:, None]) / clip_value + + # Set the barycentric coordinates of p1,p2,p3,p4,p5 in terms of the original + # unclipped triangle in face_verts. + T_idx = torch.arange(T, device=face_verts.device) + p_barycentric = [torch.zeros((T, 3), device=face_verts.device) for i in range(5)] + p_barycentric[0][(T_idx, p1_face_ind)] = 1 + p_barycentric[1][(T_idx, p2_face_ind)] = 1 + p_barycentric[2][(T_idx, p3_face_ind)] = 1 + p_barycentric[3][(T_idx, p1_face_ind)] = 1 - w2 + p_barycentric[3][(T_idx, p2_face_ind)] = w2 + p_barycentric[4][(T_idx, p1_face_ind)] = 1 - w3 + p_barycentric[4][(T_idx, p3_face_ind)] = w3 + + p = (p1, p2, p3, p4, p5) + + return p, p_barycentric + + +################### +# Main Entry point +################### +def clip_faces( + face_verts_unclipped: torch.Tensor, + mesh_to_face_first_idx: torch.Tensor, + num_faces_per_mesh: torch.Tensor, + frustum: ClipFrustum, +) -> ClippedFaces: + """ + Clip a mesh to the portion contained within a view frustum and with z > z_clip_value. + + There are two types of clipping: + 1) Cull triangles that are completely outside the view frustum. This is purely + to save computation by reducing the number of triangles that need to be + rasterized. + 2) Clip triangles into the portion of the triangle where z > z_clip_value. The + clipped region may be a quadrilateral, which results in splitting a triangle + into two triangles. This does not save computation, but is necessary to + correctly rasterize using perspective cameras for triangles that pass through + z <= 0, because NDC/screen coordinates go to infinity at z=0. + + Args: + face_verts_unclipped: An (F, 3, 3) tensor, where F is the number of faces in + the packed representation of Meshes, the 2nd dimension represents the 3 vertices + of the triangle, and the 3rd dimension stores the xyz locations of each + vertex. The z-coordinates must be represented in world coordinates, while + the xy-coordinates may be in NDC/screen coordinates + mesh_to_face_first_idx: an tensor of shape (N,), where N is the number of meshes + in the batch. The ith element stores the index into face_verts_unclipped + of the first face of the ith mesh. + num_faces_per_mesh: a tensor of shape (N,) storing the number of faces in each mesh. + frustum: a ClipFrustum object defining the frustum used to cull faces. + + Returns: + clipped_faces: ClippedFaces object storing a clipped version of the Meshes + along with tensors that can be used to convert barycentric coordinates + returned by rasterization of the clipped meshes into a barycentric + coordinates for the unclipped meshes. + """ + F = face_verts_unclipped.shape[0] + device = face_verts_unclipped.device + + # Triangles completely outside the view frustum will be culled + # faces_culled is of shape (F, ) + faces_culled = _get_culled_faces(face_verts_unclipped, frustum) + + # Triangles that are partially behind the z clipping plane will be clipped to + # smaller triangles + z_clip_value = frustum.z_clip_value + perspective_correct = frustum.perspective_correct + if z_clip_value is not None: + # (F, 3) tensor (where F is the number of triangles) marking whether each vertex + # in a triangle is behind the clipping plane + faces_clipped_verts = face_verts_unclipped[:, :, 2] < z_clip_value + + # (F) dim tensor containing the number of clipped vertices in each triangle + faces_num_clipped_verts = faces_clipped_verts.sum(1) + else: + faces_num_clipped_verts = torch.zeros([F], device=device) + + # If no triangles need to be clipped or culled, avoid unnecessary computation + # and return early + if faces_num_clipped_verts.sum().item() == 0 and faces_culled.sum().item() == 0: + return ClippedFaces( + face_verts=face_verts_unclipped, + mesh_to_face_first_idx=mesh_to_face_first_idx, + num_faces_per_mesh=num_faces_per_mesh, + ) + + ##################################################################################### + # Classify faces into the 4 relevant cases: + # 1) The triangle is completely in front of the clipping plane (it is left + # unchanged) + # 2) The triangle is completely behind the clipping plane (it is culled) + # 3) The triangle has exactly two vertices behind the clipping plane (it is + # clipped into a smaller triangle) + # 4) The triangle has exactly one vertex behind the clipping plane (it is clipped + # into a smaller quadrilateral and split into two triangles) + ##################################################################################### + + faces_unculled = ~faces_culled + # Case 1: no clipped verts or culled faces + cases1_unclipped = (faces_num_clipped_verts == 0) & faces_unculled + case1_unclipped_idx = cases1_unclipped.nonzero(as_tuple=True)[0] + # Case 2: all verts clipped + case2_unclipped = (faces_num_clipped_verts == 3) | faces_culled + # Case 3: two verts clipped + case3_unclipped = (faces_num_clipped_verts == 2) & faces_unculled + case3_unclipped_idx = case3_unclipped.nonzero(as_tuple=True)[0] + # Case 4: one vert clipped + case4_unclipped = (faces_num_clipped_verts == 1) & faces_unculled + case4_unclipped_idx = case4_unclipped.nonzero(as_tuple=True)[0] + + # faces_unclipped_to_clipped_idx is an (F) dim tensor storing the index of each + # face to the corresponding face in face_verts_clipped. + # Each case 2 triangle will be culled (deleted from face_verts_clipped), + # while each case 4 triangle will be split into two smaller triangles + # (replaced by two consecutive triangles in face_verts_clipped) + + # case2_unclipped is an (F,) dim 0/1 tensor of all the case2 faces + # case4_unclipped is an (F,) dim 0/1 tensor of all the case4 faces + faces_delta = case4_unclipped.int() - case2_unclipped.int() + # faces_delta_cum gives the per face change in index. Faces which are + # clipped in the original mesh are mapped to the closest non clipped face + # in face_verts_clipped (this doesn't matter as they are not used + # during rasterization anyway). + faces_delta_cum = faces_delta.cumsum(0) - faces_delta + delta = 1 + case4_unclipped.int() - case2_unclipped.int() + faces_unclipped_to_clipped_idx = delta.cumsum(0) - delta + + ########################################### + # Allocate tensors for the output Meshes. + # These will then be filled in for each case. + ########################################### + F_clipped = ( + F + # pyre-fixme[58]: `+` is not supported for operand types `int` and + # `Union[bool, float, int]`. + + faces_delta_cum[-1].item() + # pyre-fixme[58]: `+` is not supported for operand types `int` and + # `Union[bool, float, int]`. + + faces_delta[-1].item() + ) # Total number of faces in the new Meshes + face_verts_clipped = torch.zeros( + (F_clipped, 3, 3), dtype=face_verts_unclipped.dtype, device=device + ) + faces_clipped_to_unclipped_idx = torch.zeros( + [F_clipped], dtype=torch.int64, device=device + ) + + # Update version of mesh_to_face_first_idx and num_faces_per_mesh applicable to + # face_verts_clipped + mesh_to_face_first_idx_clipped = faces_unclipped_to_clipped_idx[ + mesh_to_face_first_idx + ] + F_clipped_t = torch.full([1], F_clipped, dtype=torch.int64, device=device) + num_faces_next = torch.cat((mesh_to_face_first_idx_clipped[1:], F_clipped_t)) + num_faces_per_mesh_clipped = num_faces_next - mesh_to_face_first_idx_clipped + + ################# Start Case 1 ######################################## + + # Case 1: Triangles are fully visible, copy unchanged triangles into the + # appropriate position in the new list of faces + case1_clipped_idx = faces_unclipped_to_clipped_idx[case1_unclipped_idx] + face_verts_clipped[case1_clipped_idx] = face_verts_unclipped[case1_unclipped_idx] + faces_clipped_to_unclipped_idx[case1_clipped_idx] = case1_unclipped_idx + + # If no triangles need to be clipped but some triangles were culled, avoid + # unnecessary clipping computation + if case3_unclipped_idx.shape[0] + case4_unclipped_idx.shape[0] == 0: + return ClippedFaces( + face_verts=face_verts_clipped, + mesh_to_face_first_idx=mesh_to_face_first_idx_clipped, + num_faces_per_mesh=num_faces_per_mesh_clipped, + faces_clipped_to_unclipped_idx=faces_clipped_to_unclipped_idx, + ) + + ################# End Case 1 ########################################## + + ################# Start Case 3 ######################################## + + # Case 3: exactly two vertices are behind the camera, clipping the triangle into a + # triangle. In the diagram below, we clip the bottom part of the triangle, and add + # new vertices p4 and p5 by intersecting with the clipping plane. The updated + # triangle is the triangle between p4, p1, p5 + # + # p1 (unclipped vertex) + # /\ + # / \ + # / t \ + # _____________p4/______\p5__________ clip_value + # xxxxxxxxxxxxxx/ \xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxx/____ \xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxx p2 xxxx---____\p3 xxxxxxxxxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx + faces_case3 = face_verts_unclipped[case3_unclipped_idx] + + # index (0, 1, or 2) of the vertex in front of the clipping plane + # pyre-fixme[61]: `faces_clipped_verts` is undefined, or not always defined. + p1_face_ind = torch.where(~faces_clipped_verts[case3_unclipped_idx])[1] + + # Solve for the points p4, p5 that intersect the clipping plane + p, p_barycentric = _find_verts_intersecting_clipping_plane( + faces_case3, p1_face_ind, z_clip_value, perspective_correct + ) + + p1, _, _, p4, p5 = p + p1_barycentric, _, _, p4_barycentric, p5_barycentric = p_barycentric + + # Store clipped triangle + case3_clipped_idx = faces_unclipped_to_clipped_idx[case3_unclipped_idx] + t_barycentric = torch.stack((p4_barycentric, p5_barycentric, p1_barycentric), 2) + face_verts_clipped[case3_clipped_idx] = torch.stack((p4, p5, p1), 1) + faces_clipped_to_unclipped_idx[case3_clipped_idx] = case3_unclipped_idx + + ################# End Case 3 ########################################## + + ################# Start Case 4 ######################################## + + # Case 4: exactly one vertex is behind the camera, clip the triangle into a + # quadrilateral. In the diagram below, we clip the bottom part of the triangle, + # and add new vertices p4 and p5 by intersecting with the cliiping plane. The + # unclipped region is a quadrilateral, which is split into two triangles: + # t1: p4, p2, p5 + # t2: p5, p2, p3 + # + # p3_____________________p2 + # \ __--/ + # \ t2 __-- / + # \ __-- t1 / + # ______________p5\__--_________/p4_________clip_value + # xxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxxxxx + # xxxxxxxxxxxxxxxxxxxxxx\ /xxxxxxxxxxxxxxxxxxxxx + # p1 (clipped vertex) + + faces_case4 = face_verts_unclipped[case4_unclipped_idx] + + # index (0, 1, or 2) of the vertex behind the clipping plane + # pyre-fixme[61]: `faces_clipped_verts` is undefined, or not always defined. + p1_face_ind = torch.where(faces_clipped_verts[case4_unclipped_idx])[1] + + # Solve for the points p4, p5 that intersect the clipping plane + p, p_barycentric = _find_verts_intersecting_clipping_plane( + faces_case4, p1_face_ind, z_clip_value, perspective_correct + ) + _, p2, p3, p4, p5 = p + _, p2_barycentric, p3_barycentric, p4_barycentric, p5_barycentric = p_barycentric + + # Store clipped triangles + case4_clipped_idx = faces_unclipped_to_clipped_idx[case4_unclipped_idx] + face_verts_clipped[case4_clipped_idx] = torch.stack((p4, p2, p5), 1) + face_verts_clipped[case4_clipped_idx + 1] = torch.stack((p5, p2, p3), 1) + t1_barycentric = torch.stack((p4_barycentric, p2_barycentric, p5_barycentric), 2) + t2_barycentric = torch.stack((p5_barycentric, p2_barycentric, p3_barycentric), 2) + faces_clipped_to_unclipped_idx[case4_clipped_idx] = case4_unclipped_idx + faces_clipped_to_unclipped_idx[case4_clipped_idx + 1] = case4_unclipped_idx + + ##################### End Case 4 ######################### + + # Triangles that were clipped (case 3 & case 4) will require conversion of + # barycentric coordinates from being in terms of the smaller clipped triangle to in terms + # of the original big triangle. If there are T clipped triangles, + # barycentric_conversion is a (T, 3, 3) tensor, where barycentric_conversion[i, :, k] + # stores the barycentric weights in terms of the world coordinates of the original + # (big) triangle for the kth vertex in the clipped (small) triangle. If our + # rasterizer then expresses some NDC coordinate in terms of barycentric + # world coordinates for the clipped (small) triangle as alpha_clipped[i,:], + # alpha_unclipped[i, :] = barycentric_conversion[i, :, :]*alpha_clipped[i, :] + barycentric_conversion = torch.cat((t_barycentric, t1_barycentric, t2_barycentric)) + + # faces_clipped_to_conversion_idx is an (F_clipped,) shape tensor mapping each output + # face to the applicable row of barycentric_conversion (or set to -1 if conversion is + # not needed) + faces_to_convert_idx = torch.cat( + (case3_clipped_idx, case4_clipped_idx, case4_clipped_idx + 1), 0 + ) + barycentric_idx = torch.arange( + barycentric_conversion.shape[0], dtype=torch.int64, device=device + ) + faces_clipped_to_conversion_idx = torch.full( + [F_clipped], -1, dtype=torch.int64, device=device + ) + faces_clipped_to_conversion_idx[faces_to_convert_idx] = barycentric_idx + + # clipped_faces_quadrilateral_ind is an (F_clipped) dim tensor + # For case 4 clipped triangles (where a big triangle is split in two smaller triangles), + # store the index of the neighboring clipped triangle. + # This will be needed because if the soft rasterizer includes both + # triangles in the list of top K nearest triangles, we + # should only use the one with the smaller distance. + clipped_faces_neighbor_idx = torch.full( + [F_clipped], -1, dtype=torch.int64, device=device + ) + clipped_faces_neighbor_idx[case4_clipped_idx] = case4_clipped_idx + 1 + clipped_faces_neighbor_idx[case4_clipped_idx + 1] = case4_clipped_idx + + clipped_faces = ClippedFaces( + face_verts=face_verts_clipped, + mesh_to_face_first_idx=mesh_to_face_first_idx_clipped, + num_faces_per_mesh=num_faces_per_mesh_clipped, + faces_clipped_to_unclipped_idx=faces_clipped_to_unclipped_idx, + barycentric_conversion=barycentric_conversion, + faces_clipped_to_conversion_idx=faces_clipped_to_conversion_idx, + clipped_faces_neighbor_idx=clipped_faces_neighbor_idx, + ) + return clipped_faces + + +def convert_clipped_rasterization_to_original_faces( + pix_to_face_clipped, bary_coords_clipped, clipped_faces: ClippedFaces +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Convert rasterization Fragments (expressed as pix_to_face_clipped, + bary_coords_clipped, dists_clipped) of clipped Meshes computed using clip_faces() + to the corresponding rasterization Fragments where barycentric coordinates and + face indices are in terms of the original unclipped Meshes. The distances are + handled in the rasterizer C++/CUDA kernels (i.e. for Cases 1/3 the distance + can be used directly and for Case 4 triangles the distance of the pixel to + the closest of the two subdivided triangles is used). + + Args: + pix_to_face_clipped: LongTensor of shape (N, image_size, image_size, + faces_per_pixel) giving the indices of the nearest faces at each pixel, + sorted in ascending z-order. Concretely + ``pix_to_face_clipped[n, y, x, k] = f`` means that ``faces_verts_clipped[f]`` + is the kth closest face (in the z-direction) to pixel (y, x). Pixels that + are hit by fewer than faces_per_pixel are padded with -1. + bary_coords_clipped: FloatTensor of shape + (N, image_size, image_size, faces_per_pixel, 3) giving the barycentric + coordinates in world coordinates of the nearest faces at each pixel, sorted + in ascending z-order. Concretely, if ``pix_to_face_clipped[n, y, x, k] = f`` + then ``[w0, w1, w2] = bary_coords_clipped[n, y, x, k]`` gives the + barycentric coords for pixel (y, x) relative to the face defined by + ``unproject(face_verts_clipped[f])``. Pixels hit by fewer than + faces_per_pixel are padded with -1. + clipped_faces: an instance of ClippedFaces class giving the auxillary variables + for converting rasterization outputs from clipped to unclipped Meshes. + + Returns: + 3-tuple: (pix_to_face_unclipped, bary_coords_unclipped, dists_unclipped) that + have the same definition as (pix_to_face_clipped, bary_coords_clipped, + dists_clipped) except that they pertain to faces_verts_unclipped instead of + faces_verts_clipped (i.e the original meshes as opposed to the modified meshes) + """ + faces_clipped_to_unclipped_idx = clipped_faces.faces_clipped_to_unclipped_idx + + # If no clipping then return inputs + if ( + faces_clipped_to_unclipped_idx is None + or faces_clipped_to_unclipped_idx.numel() == 0 + ): + return pix_to_face_clipped, bary_coords_clipped + + device = pix_to_face_clipped.device + + # Convert pix_to_face indices to now refer to the faces in the unclipped Meshes. + # Init empty tensor to fill in all the background values which have pix_to_face=-1. + empty = torch.full(pix_to_face_clipped.shape, -1, device=device, dtype=torch.int64) + pix_to_face_unclipped = torch.where( + pix_to_face_clipped != -1, + faces_clipped_to_unclipped_idx[pix_to_face_clipped], + empty, + ) + + # For triangles that were clipped into smaller triangle(s), convert barycentric + # coordinates from being in terms of the clipped triangle to being in terms of the + # original unclipped triangle. + + # barycentric_conversion is a (T, 3, 3) tensor such that + # alpha_unclipped[i, :] = barycentric_conversion[i, :, :]*alpha_clipped[i, :] + barycentric_conversion = clipped_faces.barycentric_conversion + + # faces_clipped_to_conversion_idx is an (F_clipped,) shape tensor mapping each output + # face to the applicable row of barycentric_conversion (or set to -1 if conversion is + # not needed) + faces_clipped_to_conversion_idx = clipped_faces.faces_clipped_to_conversion_idx + + if barycentric_conversion is not None: + bary_coords_unclipped = bary_coords_clipped.clone() + + # Select the subset of faces that require conversion, where N is the sum + # number of case3/case4 triangles that are in the closest k triangles to some + # rasterized pixel. + pix_to_conversion_idx = torch.where( + pix_to_face_clipped != -1, + faces_clipped_to_conversion_idx[pix_to_face_clipped], + empty, + ) + faces_to_convert_mask = pix_to_conversion_idx != -1 + N = faces_to_convert_mask.sum().item() + + # Expand to (N, H, W, K, 3) to be the same shape as barycentric coordinates + faces_to_convert_mask_expanded = faces_to_convert_mask[:, :, :, :, None].expand( + -1, -1, -1, -1, 3 + ) + + # An (N,) dim tensor of indices into barycentric_conversion + conversion_idx_subset = pix_to_conversion_idx[faces_to_convert_mask] + + # An (N, 3, 1) tensor of barycentric coordinates in terms of the clipped triangles + bary_coords_clipped_subset = bary_coords_clipped[faces_to_convert_mask_expanded] + bary_coords_clipped_subset = bary_coords_clipped_subset.reshape((N, 3, 1)) + + # An (N, 3, 3) tensor storing matrices to convert from clipped to unclipped + # barycentric coordinates + bary_conversion_subset = barycentric_conversion[conversion_idx_subset] + + # An (N, 3, 1) tensor of barycentric coordinates in terms of the unclipped triangle + bary_coords_unclipped_subset = bary_conversion_subset.bmm( + bary_coords_clipped_subset + ) + + bary_coords_unclipped_subset = bary_coords_unclipped_subset.reshape([N * 3]) + bary_coords_unclipped[faces_to_convert_mask_expanded] = ( + bary_coords_unclipped_subset + ) + + # dists for case 4 faces will be handled in the rasterizer + # so no need to modify them here. + else: + bary_coords_unclipped = bary_coords_clipped + + return pix_to_face_unclipped, bary_coords_unclipped diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/rasterize_meshes.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/rasterize_meshes.py new file mode 100644 index 0000000000000000000000000000000000000000..88c28fd8b064078baad265a0b65ea3a29caee80b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/rasterize_meshes.py @@ -0,0 +1,767 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import List, Optional, Tuple, Union + +import numpy as np +import torch +from pytorch3d import _C + +from ..utils import parse_image_size + +from .clip import ( + clip_faces, + ClipFrustum, + convert_clipped_rasterization_to_original_faces, +) + + +# TODO make the epsilon user configurable +kEpsilon = 1e-8 + +# Maximum number of faces per bins for +# coarse-to-fine rasterization +kMaxFacesPerBin = 22 + + +def rasterize_meshes( + meshes, + image_size: Union[int, List[int], Tuple[int, int]] = 256, + blur_radius: float = 0.0, + faces_per_pixel: int = 8, + bin_size: Optional[int] = None, + max_faces_per_bin: Optional[int] = None, + perspective_correct: bool = False, + clip_barycentric_coords: bool = False, + cull_backfaces: bool = False, + z_clip_value: Optional[float] = None, + cull_to_frustum: bool = False, +): + """ + Rasterize a batch of meshes given the shape of the desired output image. + Each mesh is rasterized onto a separate image of shape + (H, W) if `image_size` is a tuple or (image_size, image_size) if it + is an int. + + If the desired image size is non square (i.e. a tuple of (H, W) where H != W) + the aspect ratio needs special consideration. There are two aspect ratios + to be aware of: + - the aspect ratio of each pixel + - the aspect ratio of the output image + The camera can be used to set the pixel aspect ratio. In the rasterizer, + we assume square pixels, but variable image aspect ratio (i.e rectangle images). + + In most cases you will want to set the camera aspect ratio to + 1.0 (i.e. square pixels) and only vary the + `image_size` (i.e. the output image dimensions in pixels). + + Args: + meshes: A Meshes object representing a batch of meshes, batch size N. + image_size: Size in pixels of the output image to be rasterized. + Can optionally be a tuple of (H, W) in the case of non square images. + blur_radius: Float distance in the range [0, 2] used to expand the face + bounding boxes for rasterization. Setting blur radius + results in blurred edges around the shape instead of a + hard boundary. Set to 0 for no blur. + faces_per_pixel (Optional): Number of faces to save per pixel, returning + the nearest faces_per_pixel points along the z-axis. + bin_size: Size of bins to use for coarse-to-fine rasterization. Setting + bin_size=0 uses naive rasterization; setting bin_size=None attempts to + set it heuristically based on the shape of the input. This should not + affect the output, but can affect the speed of the forward pass. + max_faces_per_bin: Only applicable when using coarse-to-fine rasterization + (bin_size > 0); this is the maximum number of faces allowed within each + bin. This should not affect the output values, but can affect + the memory usage in the forward pass. + perspective_correct: Bool, Whether to apply perspective correction when computing + barycentric coordinates for pixels. This should be set to True if a perspective + camera is used. + clip_barycentric_coords: Whether, after any perspective correction is applied + but before the depth is calculated (e.g. for z clipping), + to "correct" a location outside the face (i.e. with a negative + barycentric coordinate) to a position on the edge of the face. + cull_backfaces: Bool, Whether to only rasterize mesh faces which are + visible to the camera. This assumes that vertices of + front-facing triangles are ordered in an anti-clockwise + fashion, and triangles that face away from the camera are + in a clockwise order relative to the current view + direction. NOTE: This will only work if the mesh faces are + consistently defined with counter-clockwise ordering when + viewed from the outside. + z_clip_value: if not None, then triangles will be clipped (and possibly + subdivided into smaller triangles) such that z >= z_clip_value. + This avoids camera projections that go to infinity as z->0. + Default is None as clipping affects rasterization speed and + should only be turned on if explicitly needed. + See clip.py for all the extra computation that is required. + cull_to_frustum: if True, triangles outside the view frustum will be culled. + Culling involves removing all faces which fall outside view frustum. + Default is False so that it is turned on only when needed. + + Returns: + 4-element tuple containing + + - **pix_to_face**: LongTensor of shape + (N, image_size, image_size, faces_per_pixel) + giving the indices of the nearest faces at each pixel, + sorted in ascending z-order. + Concretely ``pix_to_face[n, y, x, k] = f`` means that + ``faces_verts[f]`` is the kth closest face (in the z-direction) + to pixel (y, x). Pixels that are hit by fewer than + faces_per_pixel are padded with -1. + - **zbuf**: FloatTensor of shape (N, image_size, image_size, faces_per_pixel) + giving the NDC z-coordinates of the nearest faces at each pixel, + sorted in ascending z-order. + Concretely, if ``pix_to_face[n, y, x, k] = f`` then + ``zbuf[n, y, x, k] = face_verts[f, 2]``. Pixels hit by fewer than + faces_per_pixel are padded with -1. + - **barycentric**: FloatTensor of shape + (N, image_size, image_size, faces_per_pixel, 3) + giving the barycentric coordinates in NDC units of the + nearest faces at each pixel, sorted in ascending z-order. + Concretely, if ``pix_to_face[n, y, x, k] = f`` then + ``[w0, w1, w2] = barycentric[n, y, x, k]`` gives + the barycentric coords for pixel (y, x) relative to the face + defined by ``face_verts[f]``. Pixels hit by fewer than + faces_per_pixel are padded with -1. + - **pix_dists**: FloatTensor of shape + (N, image_size, image_size, faces_per_pixel) + giving the signed Euclidean distance (in NDC units) in the + x/y plane of each point closest to the pixel. Concretely if + ``pix_to_face[n, y, x, k] = f`` then ``pix_dists[n, y, x, k]`` is the + squared distance between the pixel (y, x) and the face given + by vertices ``face_verts[f]``. Pixels hit with fewer than + ``faces_per_pixel`` are padded with -1. + + In the case that image_size is a tuple of (H, W) then the outputs + will be of shape `(N, H, W, ...)`. + """ + verts_packed = meshes.verts_packed() + faces_packed = meshes.faces_packed() + face_verts = verts_packed[faces_packed] + mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx() + num_faces_per_mesh = meshes.num_faces_per_mesh() + + # In the case that H != W use the max image size to set the bin_size + # to accommodate the num bins constraint in the coarse rasterizer. + # If the ratio of H:W is large this might cause issues as the smaller + # dimension will have fewer bins. + # TODO: consider a better way of setting the bin size. + im_size = parse_image_size(image_size) + max_image_size = max(*im_size) + + clipped_faces_neighbor_idx = None + + if z_clip_value is not None or cull_to_frustum: + # Cull faces outside the view frustum, and clip faces that are partially + # behind the camera into the portion of the triangle in front of the + # camera. This may change the number of faces + frustum = ClipFrustum( + left=-1, + right=1, + top=-1, + bottom=1, + perspective_correct=perspective_correct, + z_clip_value=z_clip_value, + cull=cull_to_frustum, + ) + clipped_faces = clip_faces( + face_verts, mesh_to_face_first_idx, num_faces_per_mesh, frustum=frustum + ) + face_verts = clipped_faces.face_verts + mesh_to_face_first_idx = clipped_faces.mesh_to_face_first_idx + num_faces_per_mesh = clipped_faces.num_faces_per_mesh + + # For case 4 clipped triangles (where a big triangle is split in two smaller triangles), + # need the index of the neighboring clipped triangle as only one can be in + # in the top K closest faces in the rasterization step. + clipped_faces_neighbor_idx = clipped_faces.clipped_faces_neighbor_idx + + if clipped_faces_neighbor_idx is None: + # Set to the default which is all -1s. + clipped_faces_neighbor_idx = torch.full( + size=(face_verts.shape[0],), + fill_value=-1, + device=meshes.device, + dtype=torch.int64, + ) + + # TODO: Choose naive vs coarse-to-fine based on mesh size and image size. + if bin_size is None: + if not verts_packed.is_cuda: + # Binned CPU rasterization is not supported. + bin_size = 0 + else: + # TODO better heuristics for bin size. + if max_image_size <= 64: + bin_size = 8 + else: + # Heuristic based formula maps max_image_size -> bin_size as follows: + # max_image_size < 64 -> 8 + # 16 < max_image_size < 256 -> 16 + # 256 < max_image_size < 512 -> 32 + # 512 < max_image_size < 1024 -> 64 + # 1024 < max_image_size < 2048 -> 128 + bin_size = int(2 ** max(np.ceil(np.log2(max_image_size)) - 4, 4)) + + if bin_size != 0: + # There is a limit on the number of faces per bin in the cuda kernel. + faces_per_bin = 1 + (max_image_size - 1) // bin_size + if faces_per_bin >= kMaxFacesPerBin: + raise ValueError( + "bin_size too small, number of faces per bin must be less than %d; got %d" + % (kMaxFacesPerBin, faces_per_bin) + ) + + if max_faces_per_bin is None: + max_faces_per_bin = int(max(10000, meshes._F / 5)) + + pix_to_face, zbuf, barycentric_coords, dists = _RasterizeFaceVerts.apply( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + clipped_faces_neighbor_idx, + im_size, + blur_radius, + faces_per_pixel, + bin_size, + max_faces_per_bin, + perspective_correct, + clip_barycentric_coords, + cull_backfaces, + ) + + if z_clip_value is not None or cull_to_frustum: + # If faces were clipped, map the rasterization result to be in terms of the + # original unclipped faces. This may involve converting barycentric + # coordinates + outputs = convert_clipped_rasterization_to_original_faces( + pix_to_face, + barycentric_coords, + # pyre-fixme[61]: `clipped_faces` may not be initialized here. + clipped_faces, + ) + pix_to_face, barycentric_coords = outputs + + return pix_to_face, zbuf, barycentric_coords, dists + + +class _RasterizeFaceVerts(torch.autograd.Function): + """ + Torch autograd wrapper for forward and backward pass of rasterize_meshes + implemented in C++/CUDA. + + Args: + face_verts: Tensor of shape (F, 3, 3) giving (packed) vertex positions + for faces in all the meshes in the batch. Concretely, + face_verts[f, i] = [x, y, z] gives the coordinates for the + ith vertex of the fth face. These vertices are expected to + be in NDC coordinates in the range [-1, 1]. + mesh_to_face_first_idx: LongTensor of shape (N) giving the index in + faces_verts of the first face in each mesh in + the batch. + num_faces_per_mesh: LongTensor of shape (N) giving the number of faces + for each mesh in the batch. + image_size, blur_radius, faces_per_pixel: same as rasterize_meshes. + perspective_correct: same as rasterize_meshes. + cull_backfaces: same as rasterize_meshes. + + Returns: + same as rasterize_meshes function. + """ + + @staticmethod + def forward( + ctx, + face_verts: torch.Tensor, + mesh_to_face_first_idx: torch.Tensor, + num_faces_per_mesh: torch.Tensor, + clipped_faces_neighbor_idx: torch.Tensor, + image_size: Union[List[int], Tuple[int, int]] = (256, 256), + blur_radius: float = 0.01, + faces_per_pixel: int = 0, + bin_size: int = 0, + max_faces_per_bin: int = 0, + perspective_correct: bool = False, + clip_barycentric_coords: bool = False, + cull_backfaces: bool = False, + z_clip_value: Optional[float] = None, + cull_to_frustum: bool = True, + ): + # pyre-fixme[16]: Module `pytorch3d` has no attribute `_C`. + pix_to_face, zbuf, barycentric_coords, dists = _C.rasterize_meshes( + face_verts, + mesh_to_face_first_idx, + num_faces_per_mesh, + clipped_faces_neighbor_idx, + image_size, + blur_radius, + faces_per_pixel, + bin_size, + max_faces_per_bin, + perspective_correct, + clip_barycentric_coords, + cull_backfaces, + ) + + ctx.save_for_backward(face_verts, pix_to_face) + ctx.mark_non_differentiable(pix_to_face) + ctx.perspective_correct = perspective_correct + ctx.clip_barycentric_coords = clip_barycentric_coords + return pix_to_face, zbuf, barycentric_coords, dists + + @staticmethod + def backward(ctx, grad_pix_to_face, grad_zbuf, grad_barycentric_coords, grad_dists): + grad_face_verts = None + grad_mesh_to_face_first_idx = None + grad_num_faces_per_mesh = None + grad_clipped_faces_neighbor_idx = None + grad_image_size = None + grad_radius = None + grad_faces_per_pixel = None + grad_bin_size = None + grad_max_faces_per_bin = None + grad_perspective_correct = None + grad_clip_barycentric_coords = None + grad_cull_backfaces = None + face_verts, pix_to_face = ctx.saved_tensors + grad_face_verts = _C.rasterize_meshes_backward( + face_verts, + pix_to_face, + grad_zbuf, + grad_barycentric_coords, + grad_dists, + ctx.perspective_correct, + ctx.clip_barycentric_coords, + ) + grads = ( + grad_face_verts, + grad_mesh_to_face_first_idx, + grad_num_faces_per_mesh, + grad_clipped_faces_neighbor_idx, + grad_image_size, + grad_radius, + grad_faces_per_pixel, + grad_bin_size, + grad_max_faces_per_bin, + grad_perspective_correct, + grad_clip_barycentric_coords, + grad_cull_backfaces, + ) + return grads + + +def non_square_ndc_range(S1, S2): + """ + In the case of non square images, we scale the NDC range + to maintain the aspect ratio. The smaller dimension has NDC + range of 2.0. + + Args: + S1: dimension along with the NDC range is needed + S2: the other image dimension + + Returns: + ndc_range: NDC range for dimension S1 + """ + ndc_range = 2.0 + if S1 > S2: + ndc_range = (S1 / S2) * ndc_range + return ndc_range + + +def pix_to_non_square_ndc(i, S1, S2): + """ + The default value of the NDC range is [-1, 1]. + However in the case of non square images, we scale the NDC range + to maintain the aspect ratio. The smaller dimension has NDC + range from [-1, 1] and the other dimension is scaled by + the ratio of H:W. + e.g. for image size (H, W) = (64, 128) + Height NDC range: [-1, 1] + Width NDC range: [-2, 2] + + Args: + i: pixel position on axes S1 + S1: dimension along with i is given + S2: the other image dimension + + Returns: + pixel: NDC coordinate of point i for dimension S1 + """ + # NDC: x-offset + (i * pixel_width + half_pixel_width) + ndc_range = non_square_ndc_range(S1, S2) + offset = ndc_range / 2.0 + return -offset + (ndc_range * i + offset) / S1 + + +def rasterize_meshes_python( # noqa: C901 + meshes, + image_size: Union[int, Tuple[int, int]] = 256, + blur_radius: float = 0.0, + faces_per_pixel: int = 8, + perspective_correct: bool = False, + clip_barycentric_coords: bool = False, + cull_backfaces: bool = False, + z_clip_value: Optional[float] = None, + cull_to_frustum: bool = True, + clipped_faces_neighbor_idx: Optional[torch.Tensor] = None, +): + """ + Naive PyTorch implementation of mesh rasterization with the same inputs and + outputs as the rasterize_meshes function. + + This function is not optimized and is implemented as a comparison for the + C++/CUDA implementations. + """ + N = len(meshes) + H, W = image_size if isinstance(image_size, tuple) else (image_size, image_size) + + K = faces_per_pixel + device = meshes.device + + verts_packed = meshes.verts_packed() + faces_packed = meshes.faces_packed() + faces_verts = verts_packed[faces_packed] + mesh_to_face_first_idx = meshes.mesh_to_faces_packed_first_idx() + num_faces_per_mesh = meshes.num_faces_per_mesh() + + if z_clip_value is not None or cull_to_frustum: + # Cull faces outside the view frustum, and clip faces that are partially + # behind the camera into the portion of the triangle in front of the + # camera. This may change the number of faces + frustum = ClipFrustum( + left=-1, + right=1, + top=-1, + bottom=1, + perspective_correct=perspective_correct, + z_clip_value=z_clip_value, + cull=cull_to_frustum, + ) + clipped_faces = clip_faces( + faces_verts, mesh_to_face_first_idx, num_faces_per_mesh, frustum=frustum + ) + faces_verts = clipped_faces.face_verts + mesh_to_face_first_idx = clipped_faces.mesh_to_face_first_idx + num_faces_per_mesh = clipped_faces.num_faces_per_mesh + + # Initialize output tensors. + face_idxs = torch.full( + (N, H, W, K), fill_value=-1, dtype=torch.int64, device=device + ) + zbuf = torch.full((N, H, W, K), fill_value=-1, dtype=torch.float32, device=device) + bary_coords = torch.full( + (N, H, W, K, 3), fill_value=-1, dtype=torch.float32, device=device + ) + pix_dists = torch.full( + (N, H, W, K), fill_value=-1, dtype=torch.float32, device=device + ) + + # Calculate all face bounding boxes. + x_mins = torch.min(faces_verts[:, :, 0], dim=1, keepdim=True).values + x_maxs = torch.max(faces_verts[:, :, 0], dim=1, keepdim=True).values + y_mins = torch.min(faces_verts[:, :, 1], dim=1, keepdim=True).values + y_maxs = torch.max(faces_verts[:, :, 1], dim=1, keepdim=True).values + z_mins = torch.min(faces_verts[:, :, 2], dim=1, keepdim=True).values + + # Expand by blur radius. + x_mins = x_mins - np.sqrt(blur_radius) - kEpsilon + x_maxs = x_maxs + np.sqrt(blur_radius) + kEpsilon + y_mins = y_mins - np.sqrt(blur_radius) - kEpsilon + y_maxs = y_maxs + np.sqrt(blur_radius) + kEpsilon + + # Loop through meshes in the batch. + for n in range(N): + face_start_idx = mesh_to_face_first_idx[n] + face_stop_idx = face_start_idx + num_faces_per_mesh[n] + + # Iterate through the horizontal lines of the image from top to bottom. + for yi in range(H): + # Y coordinate of one end of the image. Reverse the ordering + # of yi so that +Y is pointing up in the image. + yfix = H - 1 - yi + yf = pix_to_non_square_ndc(yfix, H, W) + + # Iterate through pixels on this horizontal line, left to right. + for xi in range(W): + # X coordinate of one end of the image. Reverse the ordering + # of xi so that +X is pointing to the left in the image. + xfix = W - 1 - xi + xf = pix_to_non_square_ndc(xfix, W, H) + top_k_points = [] + + # Check whether each face in the mesh affects this pixel. + for f in range(face_start_idx, face_stop_idx): + face = faces_verts[f].squeeze() + v0, v1, v2 = face.unbind(0) + + face_area = edge_function(v0, v1, v2) + + # Ignore triangles facing away from the camera. + back_face = face_area < 0 + if cull_backfaces and back_face: + continue + + # Ignore faces which have zero area. + if face_area == 0.0: + continue + + outside_bbox = ( + xf < x_mins[f] + or xf > x_maxs[f] + or yf < y_mins[f] + or yf > y_maxs[f] + ) + + # Faces with at least one vertex behind the camera won't + # render correctly and should be removed or clipped before + # calling the rasterizer + if z_mins[f] < kEpsilon: + continue + + # Check if pixel is outside of face bbox. + if outside_bbox: + continue + + # Compute barycentric coordinates and pixel z distance. + pxy = torch.tensor([xf, yf], dtype=torch.float32, device=device) + + bary = barycentric_coordinates(pxy, v0[:2], v1[:2], v2[:2]) + if perspective_correct: + z0, z1, z2 = v0[2], v1[2], v2[2] + l0, l1, l2 = bary[0], bary[1], bary[2] + top0 = l0 * z1 * z2 + top1 = z0 * l1 * z2 + top2 = z0 * z1 * l2 + bot = top0 + top1 + top2 + bary = torch.stack([top0 / bot, top1 / bot, top2 / bot]) + + # Check if inside before clipping + inside = all(x > 0.0 for x in bary) + + # Barycentric clipping + if clip_barycentric_coords: + bary = barycentric_coordinates_clip(bary) + # use clipped barycentric coords to calculate the z value + pz = bary[0] * v0[2] + bary[1] * v1[2] + bary[2] * v2[2] + + # Check if point is behind the image. + if pz < 0: + continue + + # Calculate signed 2D distance from point to face. + # Points inside the triangle have negative distance. + dist = point_triangle_distance(pxy, v0[:2], v1[:2], v2[:2]) + + # Add an epsilon to prevent errors when comparing distance + # to blur radius. + if not inside and dist >= blur_radius: + continue + + # Handle the case where a face (f) partially behind the image plane is + # clipped to a quadrilateral and then split into two faces (t1, t2). + top_k_idx = -1 + if ( + clipped_faces_neighbor_idx is not None + and clipped_faces_neighbor_idx[f] != -1 + ): + neighbor_idx = clipped_faces_neighbor_idx[f] + # See if neighbor_idx is in top_k and find index + top_k_idx = [ + i + for i, val in enumerate(top_k_points) + if val[1] == neighbor_idx + ] + top_k_idx = top_k_idx[0] if len(top_k_idx) > 0 else -1 + + if top_k_idx != -1 and dist < top_k_points[top_k_idx][3]: + # Overwrite the neighbor with current face info + top_k_points[top_k_idx] = (pz, f, bary, dist, inside) + else: + # Handle as a normal face + top_k_points.append((pz, f, bary, dist, inside)) + + top_k_points.sort() + if len(top_k_points) > K: + top_k_points = top_k_points[:K] + + # Save to output tensors. + for k, (pz, f, bary, dist, inside) in enumerate(top_k_points): + zbuf[n, yi, xi, k] = pz + face_idxs[n, yi, xi, k] = f + bary_coords[n, yi, xi, k, 0] = bary[0] + bary_coords[n, yi, xi, k, 1] = bary[1] + bary_coords[n, yi, xi, k, 2] = bary[2] + # Write the signed distance + pix_dists[n, yi, xi, k] = -dist if inside else dist + + if z_clip_value is not None or cull_to_frustum: + # If faces were clipped, map the rasterization result to be in terms of the + # original unclipped faces. This may involve converting barycentric + # coordinates + ( + face_idxs, + bary_coords, + ) = convert_clipped_rasterization_to_original_faces( + face_idxs, + bary_coords, + # pyre-fixme[61]: `clipped_faces` may not be initialized here. + clipped_faces, + ) + + return face_idxs, zbuf, bary_coords, pix_dists + + +def edge_function(p, v0, v1): + r""" + Determines whether a point p is on the right side of a 2D line segment + given by the end points v0, v1. + + Args: + p: (x, y) Coordinates of a point. + v0, v1: (x, y) Coordinates of the end points of the edge. + + Returns: + area: The signed area of the parallelogram given by the vectors + + .. code-block:: python + + B = p - v0 + A = v1 - v0 + + v1 ________ + /\ / + A / \ / + / \ / + v0 /______\/ + B p + + The area can also be interpreted as the cross product A x B. + If the sign of the area is positive, the point p is on the + right side of the edge. Negative area indicates the point is on + the left side of the edge. i.e. for an edge v1 - v0 + + .. code-block:: python + + v1 + / + / + - / + + / + / + v0 + """ + return (p[0] - v0[0]) * (v1[1] - v0[1]) - (p[1] - v0[1]) * (v1[0] - v0[0]) + + +def barycentric_coordinates_clip(bary): + """ + Clip negative barycentric coordinates to 0.0 and renormalize so + the barycentric coordinates for a point sum to 1. When the blur_radius + is greater than 0, a face will still be recorded as overlapping a pixel + if the pixel is outside the face. In this case at least one of the + barycentric coordinates for the pixel relative to the face will be negative. + Clipping will ensure that the texture and z buffer are interpolated correctly. + + Args: + bary: tuple of barycentric coordinates + + Returns + bary_clip: (w0, w1, w2) barycentric coordinates with no negative values. + """ + # Only negative values are clamped to 0.0. + w0_clip = torch.clamp(bary[0], min=0.0) + w1_clip = torch.clamp(bary[1], min=0.0) + w2_clip = torch.clamp(bary[2], min=0.0) + bary_sum = torch.clamp(w0_clip + w1_clip + w2_clip, min=1e-5) + w0_clip = w0_clip / bary_sum + w1_clip = w1_clip / bary_sum + w2_clip = w2_clip / bary_sum + + return (w0_clip, w1_clip, w2_clip) + + +def barycentric_coordinates(p, v0, v1, v2): + """ + Compute the barycentric coordinates of a point relative to a triangle. + + Args: + p: Coordinates of a point. + v0, v1, v2: Coordinates of the triangle vertices. + + Returns + bary: (w0, w1, w2) barycentric coordinates in the range [0, 1]. + """ + area = edge_function(v2, v0, v1) + kEpsilon # 2 x face area. + w0 = edge_function(p, v1, v2) / area + w1 = edge_function(p, v2, v0) / area + w2 = edge_function(p, v0, v1) / area + return (w0, w1, w2) + + +def point_line_distance(p, v0, v1): + """ + Return minimum distance between line segment (v1 - v0) and point p. + + Args: + p: Coordinates of a point. + v0, v1: Coordinates of the end points of the line segment. + + Returns: + non-square distance to the boundary of the triangle. + + Consider the line extending the segment - this can be parameterized as + ``v0 + t (v1 - v0)``. + + First find the projection of point p onto the line. It falls where + ``t = [(p - v0) . (v1 - v0)] / |v1 - v0|^2`` + where . is the dot product. + + The parameter t is clamped from [0, 1] to handle points outside the + segment (v1 - v0). + + Once the projection of the point on the segment is known, the distance from + p to the projection gives the minimum distance to the segment. + """ + if p.shape != v0.shape != v1.shape: + raise ValueError("All points must have the same number of coordinates") + + v1v0 = v1 - v0 + l2 = v1v0.dot(v1v0) # |v1 - v0|^2 + if l2 <= kEpsilon: + return (p - v1).dot(p - v1) # v0 == v1 + + t = v1v0.dot(p - v0) / l2 + t = torch.clamp(t, min=0.0, max=1.0) + p_proj = v0 + t * v1v0 + delta_p = p_proj - p + return delta_p.dot(delta_p) + + +def point_triangle_distance(p, v0, v1, v2): + """ + Return shortest distance between a point and a triangle. + + Args: + p: Coordinates of a point. + v0, v1, v2: Coordinates of the three triangle vertices. + + Returns: + shortest absolute distance from the point to the triangle. + """ + if p.shape != v0.shape != v1.shape != v2.shape: + raise ValueError("All points must have the same number of coordinates") + + e01_dist = point_line_distance(p, v0, v1) + e02_dist = point_line_distance(p, v0, v2) + e12_dist = point_line_distance(p, v1, v2) + edge_dists_min = torch.min(torch.min(e01_dist, e02_dist), e12_dist) + + return edge_dists_min diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/rasterizer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/rasterizer.py new file mode 100644 index 0000000000000000000000000000000000000000..0e5c9f4dd00e96258286c56e28e48585a16a7ab7 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/rasterizer.py @@ -0,0 +1,273 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from dataclasses import dataclass +from typing import Optional, Tuple, Union + +import torch +import torch.nn as nn +from pytorch3d.renderer.cameras import try_get_projection_transform + +from .rasterize_meshes import rasterize_meshes + + +@dataclass(frozen=True) +class Fragments: + """ + A dataclass representing the outputs of a rasterizer. Can be detached from the + computational graph in order to stop the gradients from flowing through the + rasterizer. + + Members: + pix_to_face: + LongTensor of shape (N, image_size, image_size, faces_per_pixel) giving + the indices of the nearest faces at each pixel, sorted in ascending + z-order. Concretely ``pix_to_face[n, y, x, k] = f`` means that + ``faces_verts[f]`` is the kth closest face (in the z-direction) to pixel + (y, x). Pixels that are hit by fewer than faces_per_pixel are padded with + -1. + + zbuf: + FloatTensor of shape (N, image_size, image_size, faces_per_pixel) giving + the NDC z-coordinates of the nearest faces at each pixel, sorted in + ascending z-order. Concretely, if ``pix_to_face[n, y, x, k] = f`` then + ``zbuf[n, y, x, k] = face_verts[f, 2]``. Pixels hit by fewer than + faces_per_pixel are padded with -1. + + bary_coords: + FloatTensor of shape (N, image_size, image_size, faces_per_pixel, 3) + giving the barycentric coordinates in NDC units of the nearest faces at + each pixel, sorted in ascending z-order. Concretely, if ``pix_to_face[n, + y, x, k] = f`` then ``[w0, w1, w2] = barycentric[n, y, x, k]`` gives the + barycentric coords for pixel (y, x) relative to the face defined by + ``face_verts[f]``. Pixels hit by fewer than faces_per_pixel are padded + with -1. + + dists: + FloatTensor of shape (N, image_size, image_size, faces_per_pixel) giving + the signed Euclidean distance (in NDC units) in the x/y plane of each + point closest to the pixel. Concretely if ``pix_to_face[n, y, x, k] = f`` + then ``pix_dists[n, y, x, k]`` is the squared distance between the pixel + (y, x) and the face given by vertices ``face_verts[f]``. Pixels hit with + fewer than ``faces_per_pixel`` are padded with -1. + """ + + pix_to_face: torch.Tensor + zbuf: torch.Tensor + bary_coords: torch.Tensor + dists: Optional[torch.Tensor] + + def detach(self) -> "Fragments": + return Fragments( + pix_to_face=self.pix_to_face, + zbuf=self.zbuf.detach(), + bary_coords=self.bary_coords.detach(), + dists=self.dists.detach() if self.dists is not None else self.dists, + ) + + +@dataclass +class RasterizationSettings: + """ + Class to store the mesh rasterization params with defaults + + Members: + image_size: Either common height and width or (height, width), in pixels. + blur_radius: Float distance in the range [0, 2] used to expand the face + bounding boxes for rasterization. Setting blur radius + results in blurred edges around the shape instead of a + hard boundary. Set to 0 for no blur. + faces_per_pixel: (int) Number of faces to keep track of per pixel. + We return the nearest faces_per_pixel faces along the z-axis. + bin_size: Size of bins to use for coarse-to-fine rasterization. Setting + bin_size=0 uses naive rasterization; setting bin_size=None attempts + to set it heuristically based on the shape of the input. This should + not affect the output, but can affect the speed of the forward pass. + max_faces_opengl: Max number of faces in any mesh we will rasterize. Used only by + MeshRasterizerOpenGL to pre-allocate OpenGL memory. + max_faces_per_bin: Only applicable when using coarse-to-fine + rasterization (bin_size != 0); this is the maximum number of faces + allowed within each bin. This should not affect the output values, + but can affect the memory usage in the forward pass. + Setting max_faces_per_bin=None attempts to set with a heuristic. + perspective_correct: Whether to apply perspective correction when + computing barycentric coordinates for pixels. + None (default) means make correction if the camera uses perspective. + clip_barycentric_coords: Whether, after any perspective correction + is applied but before the depth is calculated (e.g. for + z clipping), to "correct" a location outside the face (i.e. with + a negative barycentric coordinate) to a position on the edge of the + face. None (default) means clip if blur_radius > 0, which is a condition + under which such outside-face-points are likely. + cull_backfaces: Whether to only rasterize mesh faces which are + visible to the camera. This assumes that vertices of + front-facing triangles are ordered in an anti-clockwise + fashion, and triangles that face away from the camera are + in a clockwise order relative to the current view + direction. NOTE: This will only work if the mesh faces are + consistently defined with counter-clockwise ordering when + viewed from the outside. + z_clip_value: if not None, then triangles will be clipped (and possibly + subdivided into smaller triangles) such that z >= z_clip_value. + This avoids camera projections that go to infinity as z->0. + Default is None as clipping affects rasterization speed and + should only be turned on if explicitly needed. + See clip.py for all the extra computation that is required. + cull_to_frustum: Whether to cull triangles outside the view frustum. + Culling involves removing all faces which fall outside view frustum. + Default is False for performance as often not needed. + """ + + image_size: Union[int, Tuple[int, int]] = 256 + blur_radius: float = 0.0 + faces_per_pixel: int = 1 + bin_size: Optional[int] = None + max_faces_opengl: int = 10_000_000 + max_faces_per_bin: Optional[int] = None + perspective_correct: Optional[bool] = None + clip_barycentric_coords: Optional[bool] = None + cull_backfaces: bool = False + z_clip_value: Optional[float] = None + cull_to_frustum: bool = False + + +class MeshRasterizer(nn.Module): + """ + This class implements methods for rasterizing a batch of heterogeneous + Meshes. + """ + + def __init__(self, cameras=None, raster_settings=None) -> None: + """ + Args: + cameras: A cameras object which has a `transform_points` method + which returns the transformed points after applying the + world-to-view and view-to-ndc transformations. + raster_settings: the parameters for rasterization. This should be a + named tuple. + + All these initial settings can be overridden by passing keyword + arguments to the forward function. + """ + super().__init__() + if raster_settings is None: + raster_settings = RasterizationSettings() + + self.cameras = cameras + self.raster_settings = raster_settings + + def to(self, device): + # Manually move to device cameras as it is not a subclass of nn.Module + if self.cameras is not None: + self.cameras = self.cameras.to(device) + return self + + def transform(self, meshes_world, **kwargs) -> torch.Tensor: + """ + Args: + meshes_world: a Meshes object representing a batch of meshes with + vertex coordinates in world space. + + Returns: + meshes_proj: a Meshes object with the vertex positions projected + in NDC space + + NOTE: keeping this as a separate function for readability but it could + be moved into forward. + """ + cameras = kwargs.get("cameras", self.cameras) + if cameras is None: + msg = "Cameras must be specified either at initialization \ + or in the forward pass of MeshRasterizer" + raise ValueError(msg) + + n_cameras = len(cameras) + if n_cameras != 1 and n_cameras != len(meshes_world): + msg = "Wrong number (%r) of cameras for %r meshes" + raise ValueError(msg % (n_cameras, len(meshes_world))) + + verts_world = meshes_world.verts_padded() + + # NOTE: Retaining view space z coordinate for now. + # TODO: Revisit whether or not to transform z coordinate to [-1, 1] or + # [0, 1] range. + eps = kwargs.get("eps", None) + verts_view = cameras.get_world_to_view_transform(**kwargs).transform_points( + verts_world, eps=eps + ) + to_ndc_transform = cameras.get_ndc_camera_transform(**kwargs) + projection_transform = try_get_projection_transform(cameras, kwargs) + if projection_transform is not None: + projection_transform = projection_transform.compose(to_ndc_transform) + verts_ndc = projection_transform.transform_points(verts_view, eps=eps) + else: + # Call transform_points instead of explicitly composing transforms to handle + # the case, where camera class does not have a projection matrix form. + verts_proj = cameras.transform_points(verts_world, eps=eps) + verts_ndc = to_ndc_transform.transform_points(verts_proj, eps=eps) + + verts_ndc[..., 2] = verts_view[..., 2] + meshes_ndc = meshes_world.update_padded(new_verts_padded=verts_ndc) + return meshes_ndc + + def forward(self, meshes_world, **kwargs) -> Fragments: + """ + Args: + meshes_world: a Meshes object representing a batch of meshes with + coordinates in world space. + Returns: + Fragments: Rasterization outputs as a named tuple. + """ + meshes_proj = self.transform(meshes_world, **kwargs) + raster_settings = kwargs.get("raster_settings", self.raster_settings) + + # By default, turn on clip_barycentric_coords if blur_radius > 0. + # When blur_radius > 0, a face can be matched to a pixel that is outside the + # face, resulting in negative barycentric coordinates. + clip_barycentric_coords = raster_settings.clip_barycentric_coords + if clip_barycentric_coords is None: + clip_barycentric_coords = raster_settings.blur_radius > 0.0 + + # If not specified, infer perspective_correct and z_clip_value from the camera + cameras = kwargs.get("cameras", self.cameras) + if raster_settings.perspective_correct is not None: + perspective_correct = raster_settings.perspective_correct + else: + perspective_correct = cameras.is_perspective() + if raster_settings.z_clip_value is not None: + z_clip = raster_settings.z_clip_value + else: + znear = cameras.get_znear() + if isinstance(znear, torch.Tensor): + znear = znear.min().item() + z_clip = None if not perspective_correct or znear is None else znear / 2 + + # By default, turn on clip_barycentric_coords if blur_radius > 0. + # When blur_radius > 0, a face can be matched to a pixel that is outside the + # face, resulting in negative barycentric coordinates. + + pix_to_face, zbuf, bary_coords, dists = rasterize_meshes( + meshes_proj, + image_size=raster_settings.image_size, + blur_radius=raster_settings.blur_radius, + faces_per_pixel=raster_settings.faces_per_pixel, + bin_size=raster_settings.bin_size, + max_faces_per_bin=raster_settings.max_faces_per_bin, + clip_barycentric_coords=clip_barycentric_coords, + perspective_correct=perspective_correct, + cull_backfaces=raster_settings.cull_backfaces, + z_clip_value=z_clip, + cull_to_frustum=raster_settings.cull_to_frustum, + ) + + return Fragments( + pix_to_face=pix_to_face, + zbuf=zbuf, + bary_coords=bary_coords, + dists=dists, + ) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/renderer.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/renderer.py new file mode 100644 index 0000000000000000000000000000000000000000..5b623243a2a5660505fb6476bb31f5d15615d362 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/renderer.py @@ -0,0 +1,112 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Tuple + +import torch +import torch.nn as nn + +from ...structures.meshes import Meshes + +# A renderer class should be initialized with a +# function for rasterization and a function for shading. +# The rasterizer should: +# - transform inputs from world -> screen space +# - rasterize inputs +# - return fragments +# The shader can take fragments as input along with any other properties of +# the scene and generate images. + +# E.g. rasterize inputs and then shade +# +# fragments = self.rasterize(meshes) +# images = self.shader(fragments, meshes) +# return images + + +class MeshRenderer(nn.Module): + """ + A class for rendering a batch of heterogeneous meshes. The class should + be initialized with a rasterizer (a MeshRasterizer or a MeshRasterizerOpenGL) + and shader class which each have a forward function. + """ + + def __init__(self, rasterizer, shader) -> None: + super().__init__() + self.rasterizer = rasterizer + self.shader = shader + + def to(self, device): + # Rasterizer and shader have submodules which are not of type nn.Module + self.rasterizer.to(device) + self.shader.to(device) + return self + + def forward(self, meshes_world: Meshes, **kwargs) -> torch.Tensor: + """ + Render a batch of images from a batch of meshes by rasterizing and then + shading. + + NOTE: If the blur radius for rasterization is > 0.0, some pixels can + have one or more barycentric coordinates lying outside the range [0, 1]. + For a pixel with out of bounds barycentric coordinates with respect to a + face f, clipping is required before interpolating the texture uv + coordinates and z buffer so that the colors and depths are limited to + the range for the corresponding face. + For this set rasterizer.raster_settings.clip_barycentric_coords=True + """ + fragments = self.rasterizer(meshes_world, **kwargs) + images = self.shader(fragments, meshes_world, **kwargs) + + return images + + +class MeshRendererWithFragments(nn.Module): + """ + A class for rendering a batch of heterogeneous meshes. The class should + be initialized with a rasterizer (a MeshRasterizer or a MeshRasterizerOpenGL) + and shader class which each have a forward function. + + In the forward pass this class returns the `fragments` from which intermediate + values such as the depth map can be easily extracted e.g. + + .. code-block:: python + images, fragments = renderer(meshes) + depth = fragments.zbuf + """ + + def __init__(self, rasterizer, shader) -> None: + super().__init__() + self.rasterizer = rasterizer + self.shader = shader + + def to(self, device): + # Rasterizer and shader have submodules which are not of type nn.Module + self.rasterizer.to(device) + self.shader.to(device) + return self + + def forward( + self, meshes_world: Meshes, **kwargs + ) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Render a batch of images from a batch of meshes by rasterizing and then + shading. + + NOTE: If the blur radius for rasterization is > 0.0, some pixels can + have one or more barycentric coordinates lying outside the range [0, 1]. + For a pixel with out of bounds barycentric coordinates with respect to a + face f, clipping is required before interpolating the texture uv + coordinates and z buffer so that the colors and depths are limited to + the range for the corresponding face. + For this set rasterizer.raster_settings.clip_barycentric_coords=True + """ + fragments = self.rasterizer(meshes_world, **kwargs) + images = self.shader(fragments, meshes_world, **kwargs) + + return images, fragments diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/shader.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/shader.py new file mode 100644 index 0000000000000000000000000000000000000000..77aeba91892b02ff91577eb69b4159854e56053e --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/shader.py @@ -0,0 +1,444 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings +from typing import Optional + +import torch +import torch.nn as nn + +from ...common.datatypes import Device +from ...structures.meshes import Meshes +from ..blending import ( + BlendParams, + hard_rgb_blend, + sigmoid_alpha_blend, + softmax_rgb_blend, +) +from ..lighting import PointLights +from ..materials import Materials +from ..splatter_blend import SplatterBlender +from ..utils import TensorProperties +from .rasterizer import Fragments +from .shading import ( + _phong_shading_with_pixels, + flat_shading, + gouraud_shading, + phong_shading, +) + + +# A Shader should take as input fragments from the output of rasterization +# along with scene params and output images. A shader could perform operations +# such as: +# - interpolate vertex attributes for all the fragments +# - sample colors from a texture map +# - apply per pixel lighting +# - blend colors across top K faces per pixel. +class ShaderBase(nn.Module): + def __init__( + self, + device: Device = "cpu", + cameras: Optional[TensorProperties] = None, + lights: Optional[TensorProperties] = None, + materials: Optional[Materials] = None, + blend_params: Optional[BlendParams] = None, + ) -> None: + super().__init__() + self.lights = lights if lights is not None else PointLights(device=device) + self.materials = ( + materials if materials is not None else Materials(device=device) + ) + self.cameras = cameras + self.blend_params = blend_params if blend_params is not None else BlendParams() + + def _get_cameras(self, **kwargs): + cameras = kwargs.get("cameras", self.cameras) + if cameras is None: + msg = "Cameras must be specified either at initialization \ + or in the forward pass of the shader." + raise ValueError(msg) + + return cameras + + # pyre-fixme[14]: `to` overrides method defined in `Module` inconsistently. + def to(self, device: Device): + # Manually move to device modules which are not subclasses of nn.Module + cameras = self.cameras + if cameras is not None: + self.cameras = cameras.to(device) + self.materials = self.materials.to(device) + self.lights = self.lights.to(device) + return self + + +class HardPhongShader(ShaderBase): + """ + Per pixel lighting - the lighting model is applied using the interpolated + coordinates and normals for each pixel. The blending function hard assigns + the color of the closest face for each pixel. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = HardPhongShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + texels = meshes.sample_textures(fragments) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + blend_params = kwargs.get("blend_params", self.blend_params) + colors = phong_shading( + meshes=meshes, + fragments=fragments, + texels=texels, + lights=lights, + cameras=cameras, + materials=materials, + ) + images = hard_rgb_blend(colors, fragments, blend_params) + return images + + +class SoftPhongShader(ShaderBase): + """ + Per pixel lighting - the lighting model is applied using the interpolated + coordinates and normals for each pixel. The blending function returns the + soft aggregated color using all the faces per pixel. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = SoftPhongShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + texels = meshes.sample_textures(fragments) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + blend_params = kwargs.get("blend_params", self.blend_params) + colors = phong_shading( + meshes=meshes, + fragments=fragments, + texels=texels, + lights=lights, + cameras=cameras, + materials=materials, + ) + znear = kwargs.get("znear", getattr(cameras, "znear", 1.0)) + zfar = kwargs.get("zfar", getattr(cameras, "zfar", 100.0)) + images = softmax_rgb_blend( + colors, fragments, blend_params, znear=znear, zfar=zfar + ) + return images + + +class HardGouraudShader(ShaderBase): + """ + Per vertex lighting - the lighting model is applied to the vertex colors and + the colors are then interpolated using the barycentric coordinates to + obtain the colors for each pixel. The blending function hard assigns + the color of the closest face for each pixel. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = HardGouraudShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + blend_params = kwargs.get("blend_params", self.blend_params) + + # As Gouraud shading applies the illumination to the vertex + # colors, the interpolated pixel texture is calculated in the + # shading step. In comparison, for Phong shading, the pixel + # textures are computed first after which the illumination is + # applied. + pixel_colors = gouraud_shading( + meshes=meshes, + fragments=fragments, + lights=lights, + cameras=cameras, + materials=materials, + ) + images = hard_rgb_blend(pixel_colors, fragments, blend_params) + return images + + +class SoftGouraudShader(ShaderBase): + """ + Per vertex lighting - the lighting model is applied to the vertex colors and + the colors are then interpolated using the barycentric coordinates to + obtain the colors for each pixel. The blending function returns the + soft aggregated color using all the faces per pixel. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = SoftGouraudShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + pixel_colors = gouraud_shading( + meshes=meshes, + fragments=fragments, + lights=lights, + cameras=cameras, + materials=materials, + ) + znear = kwargs.get("znear", getattr(cameras, "znear", 1.0)) + zfar = kwargs.get("zfar", getattr(cameras, "zfar", 100.0)) + images = softmax_rgb_blend( + pixel_colors, fragments, self.blend_params, znear=znear, zfar=zfar + ) + return images + + +def TexturedSoftPhongShader( + device: Device = "cpu", + cameras: Optional[TensorProperties] = None, + lights: Optional[TensorProperties] = None, + materials: Optional[Materials] = None, + blend_params: Optional[BlendParams] = None, +) -> SoftPhongShader: + """ + TexturedSoftPhongShader class has been DEPRECATED. Use SoftPhongShader instead. + Preserving TexturedSoftPhongShader as a function for backwards compatibility. + """ + warnings.warn( + """TexturedSoftPhongShader is now deprecated; + use SoftPhongShader instead.""", + PendingDeprecationWarning, + ) + return SoftPhongShader( + device=device, + cameras=cameras, + lights=lights, + materials=materials, + blend_params=blend_params, + ) + + +class HardFlatShader(ShaderBase): + """ + Per face lighting - the lighting model is applied using the average face + position and the face normal. The blending function hard assigns + the color of the closest face for each pixel. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = HardFlatShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + texels = meshes.sample_textures(fragments) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + blend_params = kwargs.get("blend_params", self.blend_params) + colors = flat_shading( + meshes=meshes, + fragments=fragments, + texels=texels, + lights=lights, + cameras=cameras, + materials=materials, + ) + images = hard_rgb_blend(colors, fragments, blend_params) + return images + + +class SoftSilhouetteShader(nn.Module): + """ + Calculate the silhouette by blending the top K faces for each pixel based + on the 2d euclidean distance of the center of the pixel to the mesh face. + + Use this shader for generating silhouettes similar to SoftRasterizer [0]. + + .. note:: + + To be consistent with SoftRasterizer, initialize the + RasterizationSettings for the rasterizer with + `blur_radius = np.log(1. / 1e-4 - 1.) * blend_params.sigma` + + [0] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based + 3D Reasoning', ICCV 2019 + """ + + def __init__(self, blend_params: Optional[BlendParams] = None) -> None: + super().__init__() + self.blend_params = blend_params if blend_params is not None else BlendParams() + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + """ + Only want to render the silhouette so RGB values can be ones. + There is no need for lighting or texturing + """ + colors = torch.ones_like(fragments.bary_coords) + blend_params = kwargs.get("blend_params", self.blend_params) + images = sigmoid_alpha_blend(colors, fragments, blend_params) + return images + + +class SplatterPhongShader(ShaderBase): + """ + Per pixel lighting - the lighting model is applied using the interpolated + coordinates and normals for each pixel. The blending function returns the + color aggregated using splats from surrounding pixels (see [0]). + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = SplatterPhongShader(device=torch.device("cuda:0")) + + [0] Cole, F. et al., "Differentiable Surface Rendering via Non-differentiable + Sampling". + """ + + def __init__(self, **kwargs): + self.splatter_blender = None + super().__init__(**kwargs) + + def to(self, device: Device): + if self.splatter_blender: + self.splatter_blender.to(device) + return super().to(device) + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + texels = meshes.sample_textures(fragments) + lights = kwargs.get("lights", self.lights) + materials = kwargs.get("materials", self.materials) + + colors, pixel_coords_cameras = _phong_shading_with_pixels( + meshes=meshes, + fragments=fragments.detach(), + texels=texels, + lights=lights, + cameras=cameras, + materials=materials, + ) + + if not self.splatter_blender: + # Init only once, to avoid re-computing constants. + N, H, W, K, _ = colors.shape + self.splatter_blender = SplatterBlender((N, H, W, K), colors.device) + + blend_params = kwargs.get("blend_params", self.blend_params) + self.check_blend_params(blend_params) + + images = self.splatter_blender( + colors, + pixel_coords_cameras, + cameras, + fragments.pix_to_face < 0, + kwargs.get("blend_params", self.blend_params), + ) + + return images + + def check_blend_params(self, blend_params): + if blend_params.sigma != 0.5: + warnings.warn( + f"SplatterPhongShader received sigma={blend_params.sigma}. sigma is " + "defined in pixel units, and any value other than 0.5 is highly " + "unexpected. Only use other values if you know what you are doing. " + ) + + +class HardDepthShader(ShaderBase): + """ + Renders the Z distances of the closest face for each pixel. If no face is + found it returns the zfar value of the camera. + + Output from this shader is [N, H, W, 1] since it's only depth. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = HardDepthShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + cameras = super()._get_cameras(**kwargs) + + zfar = kwargs.get("zfar", getattr(cameras, "zfar", 100.0)) + mask = fragments.pix_to_face[..., 0:1] < 0 + + zbuf = fragments.zbuf[..., 0:1].clone() + zbuf[mask] = zfar + return zbuf + + +class SoftDepthShader(ShaderBase): + """ + Renders the Z distances using an aggregate of the distances of each face + based off of the point distance. If no face is found it returns the zfar + value of the camera. + + Output from this shader is [N, H, W, 1] since it's only depth. + + To use the default values, simply initialize the shader with the desired + device e.g. + + .. code-block:: + + shader = SoftDepthShader(device=torch.device("cuda:0")) + """ + + def forward(self, fragments: Fragments, meshes: Meshes, **kwargs) -> torch.Tensor: + if fragments.dists is None: + raise ValueError("SoftDepthShader requires Fragments.dists to be present.") + + cameras = super()._get_cameras(**kwargs) + + N, H, W, K = fragments.pix_to_face.shape + device = fragments.zbuf.device + mask = fragments.pix_to_face >= 0 + + zfar = kwargs.get("zfar", getattr(cameras, "zfar", 100.0)) + + # Sigmoid probability map based on the distance of the pixel to the face. + prob_map = torch.sigmoid(-fragments.dists / self.blend_params.sigma) * mask + + # append extra face for zfar + dists = torch.cat( + (fragments.zbuf, torch.ones((N, H, W, 1), device=device) * zfar), dim=3 + ) + probs = torch.cat((prob_map, torch.ones((N, H, W, 1), device=device)), dim=3) + + # compute weighting based off of probabilities using cumsum + probs = probs.cumsum(dim=3) + probs = probs.clamp(max=1) + probs = probs.diff(dim=3, prepend=torch.zeros((N, H, W, 1), device=device)) + + return (probs * dists).sum(dim=3).unsqueeze(3) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/shading.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/shading.py new file mode 100644 index 0000000000000000000000000000000000000000..bb113ee12b1682fd6b8e8e9982c2a05562047003 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/shading.py @@ -0,0 +1,225 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import Tuple + +import torch +from pytorch3d.ops import interpolate_face_attributes + +from .textures import TexturesVertex + + +def _apply_lighting( + points, normals, lights, cameras, materials +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Args: + points: torch tensor of shape (N, ..., 3) or (P, 3). + normals: torch tensor of shape (N, ..., 3) or (P, 3) + lights: instance of the Lights class. + cameras: instance of the Cameras class. + materials: instance of the Materials class. + + Returns: + ambient_color: same shape as materials.ambient_color + diffuse_color: same shape as the input points + specular_color: same shape as the input points + """ + light_diffuse = lights.diffuse(normals=normals, points=points) + light_specular = lights.specular( + normals=normals, + points=points, + camera_position=cameras.get_camera_center(), + shininess=materials.shininess, + ) + ambient_color = materials.ambient_color * lights.ambient_color + diffuse_color = materials.diffuse_color * light_diffuse + specular_color = materials.specular_color * light_specular + + if normals.dim() == 2 and points.dim() == 2: + # If given packed inputs remove batch dim in output. + return ( + ambient_color.squeeze(), + diffuse_color.squeeze(), + specular_color.squeeze(), + ) + + if ambient_color.ndim != diffuse_color.ndim: + # Reshape from (N, 3) to have dimensions compatible with + # diffuse_color which is of shape (N, H, W, K, 3) + ambient_color = ambient_color[:, None, None, None, :] + return ambient_color, diffuse_color, specular_color + + +def _phong_shading_with_pixels( + meshes, fragments, lights, cameras, materials, texels +) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Apply per pixel shading. First interpolate the vertex normals and + vertex coordinates using the barycentric coordinates to get the position + and normal at each pixel. Then compute the illumination for each pixel. + The pixel color is obtained by multiplying the pixel textures by the ambient + and diffuse illumination and adding the specular component. + + Args: + meshes: Batch of meshes + fragments: Fragments named tuple with the outputs of rasterization + lights: Lights class containing a batch of lights + cameras: Cameras class containing a batch of cameras + materials: Materials class containing a batch of material properties + texels: texture per pixel of shape (N, H, W, K, 3) + + Returns: + colors: (N, H, W, K, 3) + pixel_coords: (N, H, W, K, 3), camera coordinates of each intersection. + """ + verts = meshes.verts_packed() # (V, 3) + faces = meshes.faces_packed() # (F, 3) + vertex_normals = meshes.verts_normals_packed() # (V, 3) + faces_verts = verts[faces] + faces_normals = vertex_normals[faces] + pixel_coords_in_camera = interpolate_face_attributes( + fragments.pix_to_face, fragments.bary_coords, faces_verts + ) + pixel_normals = interpolate_face_attributes( + fragments.pix_to_face, fragments.bary_coords, faces_normals + ) + ambient, diffuse, specular = _apply_lighting( + pixel_coords_in_camera, pixel_normals, lights, cameras, materials + ) + colors = (ambient + diffuse) * texels + specular + return colors, pixel_coords_in_camera + + +def phong_shading( + meshes, fragments, lights, cameras, materials, texels +) -> torch.Tensor: + """ + Apply per pixel shading. First interpolate the vertex normals and + vertex coordinates using the barycentric coordinates to get the position + and normal at each pixel. Then compute the illumination for each pixel. + The pixel color is obtained by multiplying the pixel textures by the ambient + and diffuse illumination and adding the specular component. + + Args: + meshes: Batch of meshes + fragments: Fragments named tuple with the outputs of rasterization + lights: Lights class containing a batch of lights + cameras: Cameras class containing a batch of cameras + materials: Materials class containing a batch of material properties + texels: texture per pixel of shape (N, H, W, K, 3) + + Returns: + colors: (N, H, W, K, 3) + """ + colors, _ = _phong_shading_with_pixels( + meshes, fragments, lights, cameras, materials, texels + ) + return colors + + +def gouraud_shading(meshes, fragments, lights, cameras, materials) -> torch.Tensor: + """ + Apply per vertex shading. First compute the vertex illumination by applying + ambient, diffuse and specular lighting. If vertex color is available, + combine the ambient and diffuse vertex illumination with the vertex color + and add the specular component to determine the vertex shaded color. + Then interpolate the vertex shaded colors using the barycentric coordinates + to get a color per pixel. + + Gouraud shading is only supported for meshes with texture type `TexturesVertex`. + This is because the illumination is applied to the vertex colors. + + Args: + meshes: Batch of meshes + fragments: Fragments named tuple with the outputs of rasterization + lights: Lights class containing a batch of lights parameters + cameras: Cameras class containing a batch of cameras parameters + materials: Materials class containing a batch of material properties + + Returns: + colors: (N, H, W, K, 3) + """ + if not isinstance(meshes.textures, TexturesVertex): + raise ValueError("Mesh textures must be an instance of TexturesVertex") + + faces = meshes.faces_packed() # (F, 3) + verts = meshes.verts_packed() # (V, 3) + verts_normals = meshes.verts_normals_packed() # (V, 3) + verts_colors = meshes.textures.verts_features_packed() # (V, D) + vert_to_mesh_idx = meshes.verts_packed_to_mesh_idx() + + # Format properties of lights and materials so they are compatible + # with the packed representation of the vertices. This transforms + # all tensor properties in the class from shape (N, ...) -> (V, ...) where + # V is the number of packed vertices. If the number of meshes in the + # batch is one then this is not necessary. + if len(meshes) > 1: + lights = lights.clone().gather_props(vert_to_mesh_idx) + cameras = cameras.clone().gather_props(vert_to_mesh_idx) + materials = materials.clone().gather_props(vert_to_mesh_idx) + + # Calculate the illumination at each vertex + ambient, diffuse, specular = _apply_lighting( + verts, verts_normals, lights, cameras, materials + ) + + verts_colors_shaded = verts_colors * (ambient + diffuse) + specular + face_colors = verts_colors_shaded[faces] + colors = interpolate_face_attributes( + fragments.pix_to_face, fragments.bary_coords, face_colors + ) + return colors + + +def flat_shading(meshes, fragments, lights, cameras, materials, texels) -> torch.Tensor: + """ + Apply per face shading. Use the average face position and the face normals + to compute the ambient, diffuse and specular lighting. Apply the ambient + and diffuse color to the pixel color and add the specular component to + determine the final pixel color. + + Args: + meshes: Batch of meshes + fragments: Fragments named tuple with the outputs of rasterization + lights: Lights class containing a batch of lights parameters + cameras: Cameras class containing a batch of cameras parameters + materials: Materials class containing a batch of material properties + texels: texture per pixel of shape (N, H, W, K, 3) + + Returns: + colors: (N, H, W, K, 3) + """ + verts = meshes.verts_packed() # (V, 3) + faces = meshes.faces_packed() # (F, 3) + face_normals = meshes.faces_normals_packed() # (V, 3) + faces_verts = verts[faces] + face_coords = faces_verts.mean(dim=-2) # (F, 3, XYZ) mean xyz across verts + + # Replace empty pixels in pix_to_face with 0 in order to interpolate. + mask = fragments.pix_to_face == -1 + pix_to_face = fragments.pix_to_face.clone() + pix_to_face[mask] = 0 + + N, H, W, K = pix_to_face.shape + idx = pix_to_face.view(N * H * W * K, 1).expand(N * H * W * K, 3) + + # gather pixel coords + pixel_coords = face_coords.gather(0, idx).view(N, H, W, K, 3) + pixel_coords[mask] = 0.0 + # gather pixel normals + pixel_normals = face_normals.gather(0, idx).view(N, H, W, K, 3) + pixel_normals[mask] = 0.0 + + # Calculate the illumination at each face + ambient, diffuse, specular = _apply_lighting( + pixel_coords, pixel_normals, lights, cameras, materials + ) + colors = (ambient + diffuse) * texels + specular + return colors diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/textures.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/textures.py new file mode 100644 index 0000000000000000000000000000000000000000..abeadf3f010445e15194159f88bfa03733ab92dd --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/textures.py @@ -0,0 +1,1937 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import itertools +import warnings +from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Union + +import torch +import torch.nn.functional as F +from pytorch3d.ops import interpolate_face_attributes +from pytorch3d.structures.utils import list_to_packed, list_to_padded, padded_to_list +from torch.nn.functional import interpolate + +from .utils import pack_unique_rectangles, PackedRectangle, Rectangle + + +# This file contains classes and helper functions for texturing. +# There are three types of textures: TexturesVertex, TexturesAtlas +# and TexturesUV which inherit from a base textures class TexturesBase. +# +# Each texture class has a method 'sample_textures' to sample a +# value given barycentric coordinates. +# +# All the textures accept either list or padded inputs. The values +# are stored as either per face values (TexturesAtlas, TexturesUV), +# or per face vertex features (TexturesVertex). + + +def _list_to_padded_wrapper( + x: List[torch.Tensor], + pad_size: Union[list, tuple, None] = None, + pad_value: float = 0.0, +) -> torch.Tensor: + r""" + This is a wrapper function for + pytorch3d.structures.utils.list_to_padded function which only accepts + 3-dimensional inputs. + + For this use case, the input x is of shape (F, 3, ...) where only F + is different for each element in the list + + Transforms a list of N tensors each of shape (Mi, ...) into a single tensor + of shape (N, pad_size, ...), or (N, max(Mi), ...) + if pad_size is None. + + Args: + x: list of Tensors + pad_size: int specifying the size of the first dimension + of the padded tensor + pad_value: float value to be used to fill the padded tensor + + Returns: + x_padded: tensor consisting of padded input tensors + """ + N = len(x) + dims = x[0].ndim + reshape_dims = x[0].shape[1:] + D = torch.prod(torch.tensor(reshape_dims)).item() + x_reshaped = [] + for y in x: + if y.ndim != dims and y.shape[1:] != reshape_dims: + msg = ( + "list_to_padded requires tensors to have the same number of dimensions" + ) + raise ValueError(msg) + # pyre-fixme[6]: For 2nd param expected `int` but got `Union[bool, float, int]`. + x_reshaped.append(y.reshape(-1, D)) + x_padded = list_to_padded(x_reshaped, pad_size=pad_size, pad_value=pad_value) + # pyre-fixme[58]: `+` is not supported for operand types `Tuple[int, int]` and + # `Size`. + return x_padded.reshape((N, -1) + reshape_dims) + + +def _padded_to_list_wrapper( + x: torch.Tensor, split_size: Union[list, tuple, None] = None +) -> List[torch.Tensor]: + r""" + This is a wrapper function for pytorch3d.structures.utils.padded_to_list + which only accepts 3-dimensional inputs. + + For this use case, the input x is of shape (N, F, ...) where F + is the number of faces which is different for each tensor in the batch. + + This function transforms a padded tensor of shape (N, M, ...) into a + list of N tensors of shape (Mi, ...) where (Mi) is specified in + split_size(i), or of shape (M,) if split_size is None. + + Args: + x: padded Tensor + split_size: list of ints defining the number of items for each tensor + in the output list. + + Returns: + x_list: a list of tensors + """ + N, M = x.shape[:2] + reshape_dims = x.shape[2:] + D = torch.prod(torch.tensor(reshape_dims)).item() + # pyre-fixme[6]: For 3rd param expected `int` but got `Union[bool, float, int]`. + x_reshaped = x.reshape(N, M, D) + x_list = padded_to_list(x_reshaped, split_size=split_size) + # pyre-fixme[58]: `+` is not supported for operand types `Tuple[typing.Any]` and + # `Size`. + x_list = [xl.reshape((xl.shape[0],) + reshape_dims) for xl in x_list] + return x_list + + +def _pad_texture_maps( + images: Union[Tuple[torch.Tensor], List[torch.Tensor]], align_corners: bool +) -> torch.Tensor: + """ + Pad all texture images so they have the same height and width. + + Args: + images: list of N tensors of shape (H_i, W_i, C) + align_corners: used for interpolation + + Returns: + tex_maps: Tensor of shape (N, max_H, max_W, C) + """ + tex_maps = [] + max_H = 0 + max_W = 0 + for im in images: + h, w, _C = im.shape + if h > max_H: + max_H = h + if w > max_W: + max_W = w + tex_maps.append(im) + max_shape = (max_H, max_W) + + for i, image in enumerate(tex_maps): + if image.shape[:2] != max_shape: + image_BCHW = image.permute(2, 0, 1)[None] + new_image_BCHW = interpolate( + image_BCHW, + size=max_shape, + mode="bilinear", + align_corners=align_corners, + ) + tex_maps[i] = new_image_BCHW[0].permute(1, 2, 0) + tex_maps = torch.stack(tex_maps, dim=0) # (num_tex_maps, max_H, max_W, C) + return tex_maps + + +def _pad_texture_multiple_maps( + multiple_texture_maps: Union[Tuple[torch.Tensor], List[torch.Tensor]], + align_corners: bool, +) -> torch.Tensor: + """ + Pad all texture images so they have the same height and width. + + Args: + images: list of N tensors of shape (M_i, H_i, W_i, C) + M_i : Number of texture maps:w + + align_corners: used for interpolation + + Returns: + tex_maps: Tensor of shape (N, max_M, max_H, max_W, C) + """ + tex_maps = [] + max_M = 0 + max_H = 0 + max_W = 0 + C = 0 + for im in multiple_texture_maps: + m, h, w, C = im.shape + if m > max_M: + max_M = m + if h > max_H: + max_H = h + if w > max_W: + max_W = w + tex_maps.append(im) + max_shape = (max_M, max_H, max_W, C) + max_im_shape = (max_H, max_W) + for i, tms in enumerate(tex_maps): + new_tex_maps = torch.zeros(max_shape) + for j in range(tms.shape[0]): + im = tms[j] + if im.shape[:2] != max_im_shape: + image_BCHW = im.permute(2, 0, 1)[None] + new_image_BCHW = interpolate( + image_BCHW, + size=max_im_shape, + mode="bilinear", + align_corners=align_corners, + ) + new_tex_maps[j] = new_image_BCHW[0].permute(1, 2, 0) + else: + new_tex_maps[j] = im + tex_maps[i] = new_tex_maps + tex_maps = torch.stack(tex_maps, dim=0) # (num_tex_maps, max_H, max_W, C) + return tex_maps + + +# A base class for defining a batch of textures +# with helper methods. +# This is also useful to have so that inside `Meshes` +# we can allow the input textures to be any texture +# type which is an instance of the base class. +class TexturesBase: + def isempty(self): + if self._N is not None and self.valid is not None: + return self._N == 0 or self.valid.eq(False).all() + return False + + def to(self, device): + for k in dir(self): + v = getattr(self, k) + if isinstance(v, (list, tuple)) and all( + torch.is_tensor(elem) for elem in v + ): + v = [elem.to(device) for elem in v] + setattr(self, k, v) + if torch.is_tensor(v) and v.device != device: + setattr(self, k, v.to(device)) + self.device = device + return self + + def _extend(self, N: int, props: List[str]) -> Dict[str, Union[torch.Tensor, List]]: + """ + Create a dict with the specified properties + repeated N times per batch element. + + Args: + N: number of new copies of each texture + in the batch. + props: a List of strings which refer to either + class attributes or class methods which + return tensors or lists. + + Returns: + Dict with the same keys as props. The values are the + extended properties. + """ + if not isinstance(N, int): + raise ValueError("N must be an integer.") + if N <= 0: + raise ValueError("N must be > 0.") + + new_props = {} + for p in props: + t = getattr(self, p) + if callable(t): + t = t() # class method + if t is None: + new_props[p] = None + elif isinstance(t, list): + if not all(isinstance(elem, (int, float)) for elem in t): + raise ValueError("Extend only supports lists of scalars") + t = [[ti] * N for ti in t] + new_props[p] = list(itertools.chain(*t)) + elif torch.is_tensor(t): + new_props[p] = t.repeat_interleave(N, dim=0) + else: + raise ValueError( + f"Property {p} has unsupported type {type(t)}." + "Only tensors and lists are supported." + ) + return new_props + + def _getitem(self, index: Union[int, slice], props: List[str]): + """ + Helper function for __getitem__ + """ + new_props = {} + if isinstance(index, (int, slice)): + for p in props: + t = getattr(self, p) + if callable(t): + t = t() # class method + new_props[p] = t[index] if t is not None else None + elif isinstance(index, list): + index = torch.tensor(index) + if isinstance(index, torch.Tensor): + if index.dtype == torch.bool: + index = index.nonzero() + index = index.squeeze(1) if index.numel() > 0 else index + index = index.tolist() + for p in props: + t = getattr(self, p) + if callable(t): + t = t() # class method + new_props[p] = [t[i] for i in index] if t is not None else None + return new_props + + def sample_textures(self) -> torch.Tensor: + """ + Different texture classes sample textures in different ways + e.g. for vertex textures, the values at each vertex + are interpolated across the face using the barycentric + coordinates. + Each texture class should implement a sample_textures + method to take the `fragments` from rasterization. + Using `fragments.pix_to_face` and `fragments.bary_coords` + this function should return the sampled texture values for + each pixel in the output image. + """ + raise NotImplementedError() + + def submeshes( + self, + vertex_ids_list: List[List[torch.LongTensor]], + faces_ids_list: List[List[torch.LongTensor]], + ) -> "TexturesBase": + """ + Extract sub-textures used for submeshing. + """ + raise NotImplementedError(f"{self.__class__} does not support submeshes") + + def faces_verts_textures_packed(self) -> torch.Tensor: + """ + Returns the texture for each vertex for each face in the mesh. + For N meshes, this function returns sum(Fi)x3xC where Fi is the + number of faces in the i-th mesh and C is the dimensional of + the feature (C = 3 for RGB textures). + You can use the utils function in structures.utils to convert the + packed representation to a list or padded. + """ + raise NotImplementedError() + + def clone(self) -> "TexturesBase": + """ + Each texture class should implement a method + to clone all necessary internal tensors. + """ + raise NotImplementedError() + + def detach(self) -> "TexturesBase": + """ + Each texture class should implement a method + to detach all necessary internal tensors. + """ + raise NotImplementedError() + + def __getitem__(self, index) -> "TexturesBase": + """ + Each texture class should implement a method + to get the texture properties for the + specified elements in the batch. + The TexturesBase._getitem(i) method + can be used as a helper function to retrieve the + class attributes for item i. Then, a new + instance of the child class can be created with + the attributes. + """ + raise NotImplementedError() + + +def Textures( + maps: Optional[Union[List[torch.Tensor], torch.Tensor]] = None, + faces_uvs: Optional[torch.Tensor] = None, + verts_uvs: Optional[torch.Tensor] = None, + verts_rgb: Optional[torch.Tensor] = None, +) -> TexturesBase: + """ + Textures class has been DEPRECATED. + Preserving Textures as a function for backwards compatibility. + + Args: + maps: texture map per mesh. This can either be a list of maps + [(H, W, C)] or a padded tensor of shape (N, H, W, C). + faces_uvs: (N, F, 3) tensor giving the index into verts_uvs for each + vertex in the face. Padding value is assumed to be -1. + verts_uvs: (N, V, 2) tensor giving the uv coordinate per vertex. + verts_rgb: (N, V, C) tensor giving the color per vertex. Padding + value is assumed to be -1. (C=3 for RGB.) + + + Returns: + a Textures class which is an instance of TexturesBase e.g. TexturesUV, + TexturesAtlas, TexturesVertex + + """ + + warnings.warn( + """Textures class is deprecated, + use TexturesUV, TexturesAtlas, TexturesVertex instead. + Textures class will be removed in future releases.""", + PendingDeprecationWarning, + ) + + if faces_uvs is not None and verts_uvs is not None and maps is not None: + return TexturesUV(maps=maps, faces_uvs=faces_uvs, verts_uvs=verts_uvs) + + if verts_rgb is not None: + return TexturesVertex(verts_features=verts_rgb) + + raise ValueError( + "Textures either requires all three of (faces uvs, verts uvs, maps) or verts rgb" + ) + + +class TexturesAtlas(TexturesBase): + def __init__(self, atlas: Union[torch.Tensor, List[torch.Tensor]]) -> None: + """ + A texture representation where each face has a square texture map. + This is based on the implementation from SoftRasterizer [1]. + + Args: + atlas: (N, F, R, R, C) tensor giving the per face texture map. + The atlas can be created during obj loading with the + pytorch3d.io.load_obj function - in the input arguments + set `create_texture_atlas=True`. The atlas will be + returned in aux.texture_atlas. + + + The padded and list representations of the textures are stored + and the packed representations is computed on the fly and + not cached. + + [1] Liu et al, 'Soft Rasterizer: A Differentiable Renderer for Image-based + 3D Reasoning', ICCV 2019 + See also https://github.com/ShichenLiu/SoftRas/issues/21 + """ + if isinstance(atlas, (list, tuple)): + correct_format = all( + ( + torch.is_tensor(elem) + and elem.ndim == 4 + and elem.shape[1] == elem.shape[2] + and elem.shape[1] == atlas[0].shape[1] + ) + for elem in atlas + ) + if not correct_format: + msg = ( + "Expected atlas to be a list of tensors of shape (F, R, R, C) " + "with the same value of R." + ) + raise ValueError(msg) + self._atlas_list = atlas + self._atlas_padded = None + self.device = torch.device("cpu") + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(atlas) + self._num_faces_per_mesh = [len(a) for a in atlas] + + if self._N > 0: + self.device = atlas[0].device + + elif torch.is_tensor(atlas): + if atlas.ndim != 5: + msg = "Expected atlas to be of shape (N, F, R, R, C); got %r" + raise ValueError(msg % repr(atlas.ndim)) + self._atlas_padded = atlas + self._atlas_list = None + self.device = atlas.device + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(atlas) + max_F = atlas.shape[1] + self._num_faces_per_mesh = [max_F] * self._N + else: + raise ValueError("Expected atlas to be a tensor or list") + + # The num_faces_per_mesh, N and valid + # are reset inside the Meshes object when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device) + + def clone(self) -> "TexturesAtlas": + tex = self.__class__(atlas=self.atlas_padded().clone()) + if self._atlas_list is not None: + tex._atlas_list = [atlas.clone() for atlas in self._atlas_list] + num_faces = ( + self._num_faces_per_mesh.clone() + if torch.is_tensor(self._num_faces_per_mesh) + else self._num_faces_per_mesh + ) + tex.valid = self.valid.clone() + tex._num_faces_per_mesh = num_faces + return tex + + def detach(self) -> "TexturesAtlas": + tex = self.__class__(atlas=self.atlas_padded().detach()) + if self._atlas_list is not None: + tex._atlas_list = [atlas.detach() for atlas in self._atlas_list] + num_faces = ( + self._num_faces_per_mesh.detach() + if torch.is_tensor(self._num_faces_per_mesh) + else self._num_faces_per_mesh + ) + tex.valid = self.valid.detach() + tex._num_faces_per_mesh = num_faces + return tex + + def __getitem__(self, index) -> "TexturesAtlas": + props = ["atlas_list", "_num_faces_per_mesh"] + new_props = self._getitem(index, props=props) + atlas = new_props["atlas_list"] + if isinstance(atlas, list): + # multiple batch elements + new_tex = self.__class__(atlas=atlas) + elif torch.is_tensor(atlas): + # single element + new_tex = self.__class__(atlas=[atlas]) + else: + raise ValueError("Not all values are provided in the correct format") + new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"] + return new_tex + + def atlas_padded(self) -> torch.Tensor: + if self._atlas_padded is None: + if self.isempty(): + self._atlas_padded = torch.zeros( + (self._N, 0, 0, 0, 3), dtype=torch.float32, device=self.device + ) + else: + self._atlas_padded = _list_to_padded_wrapper( + self._atlas_list, pad_value=0.0 + ) + return self._atlas_padded + + def atlas_list(self) -> List[torch.Tensor]: + if self._atlas_list is None: + if self.isempty(): + self._atlas_padded = [ + torch.empty((0, 0, 0, 3), dtype=torch.float32, device=self.device) + ] * self._N + self._atlas_list = _padded_to_list_wrapper( + self._atlas_padded, split_size=self._num_faces_per_mesh + ) + return self._atlas_list + + def atlas_packed(self) -> torch.Tensor: + if self.isempty(): + return torch.zeros( + (self._N, 0, 0, 3), dtype=torch.float32, device=self.device + ) + atlas_list = self.atlas_list() + return list_to_packed(atlas_list)[0] + + def extend(self, N: int) -> "TexturesAtlas": + new_props = self._extend(N, ["atlas_padded", "_num_faces_per_mesh"]) + new_tex = self.__class__(atlas=new_props["atlas_padded"]) + new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"] + return new_tex + + # pyre-fixme[14]: `sample_textures` overrides method defined in `TexturesBase` + # inconsistently. + def sample_textures(self, fragments, **kwargs) -> torch.Tensor: + """ + This is similar to a nearest neighbor sampling and involves a + discretization step. The barycentric coordinates from + rasterization are used to find the nearest grid cell in the texture + atlas and the RGB is returned as the color. + This means that this step is differentiable with respect to the RGB + values of the texture atlas but not differentiable with respect to the + barycentric coordinates. + + TODO: Add a different sampling mode which interpolates the barycentric + coordinates to sample the texture and will be differentiable w.r.t + the barycentric coordinates. + + Args: + fragments: + The outputs of rasterization. From this we use + + - pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. + - barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying + the barycentric coordinates of each pixel + relative to the faces (in the packed + representation) which overlap the pixel. + + Returns: + texels: (N, H, W, K, C) + """ + N, H, W, K = fragments.pix_to_face.shape + atlas_packed = self.atlas_packed() + R = atlas_packed.shape[1] + bary = fragments.bary_coords + pix_to_face = fragments.pix_to_face + + bary_w01 = bary[..., :2] + # pyre-fixme[16]: `bool` has no attribute `__getitem__`. + mask = (pix_to_face < 0)[..., None] + bary_w01 = torch.where(mask, torch.zeros_like(bary_w01), bary_w01) + # If barycentric coordinates are > 1.0 (in the case of + # blur_radius > 0.0), wxy might be > R. We need to clamp this + # index to R-1 to index into the texture atlas. + w_xy = (bary_w01 * R).to(torch.int64).clamp(max=R - 1) # (N, H, W, K, 2) + + below_diag = ( + bary_w01.sum(dim=-1) * R - w_xy.float().sum(dim=-1) + ) <= 1.0 # (N, H, W, K) + w_x, w_y = w_xy.unbind(-1) + w_x = torch.where(below_diag, w_x, (R - 1 - w_x)) + w_y = torch.where(below_diag, w_y, (R - 1 - w_y)) + + texels = atlas_packed[pix_to_face, w_y, w_x] + texels = texels * (pix_to_face >= 0)[..., None].float() + + return texels + + def submeshes( + self, + vertex_ids_list: List[List[torch.LongTensor]], + faces_ids_list: List[List[torch.LongTensor]], + ) -> "TexturesAtlas": + """ + Extract a sub-texture for use in a submesh. + + If the meshes batch corresponding to this TextureAtlas contains + `n = len(faces_ids_list)` meshes, then self.atlas_list() + will be of length n. After submeshing, we obtain a batch of + `k = sum(len(v) for v in atlas_list` submeshes (see Meshes.submeshes). This + function creates a corresponding TexturesAtlas object with `atlas_list` + of length `k`. + """ + if len(faces_ids_list) != len(self.atlas_list()): + raise IndexError( + "faces_ids_list must be of " "the same length as atlas_list." + ) + + sub_features = [] + for atlas, faces_ids in zip(self.atlas_list(), faces_ids_list): + for faces_ids_submesh in faces_ids: + sub_features.append(atlas[faces_ids_submesh]) + + return self.__class__(sub_features) + + def faces_verts_textures_packed(self) -> torch.Tensor: + """ + Samples texture from each vertex for each face in the mesh. + For N meshes with {Fi} number of faces, it returns a + tensor of shape sum(Fi)x3xC (C = 3 for RGB). + You can use the utils function in structures.utils to convert the + packed representation to a list or padded. + """ + atlas_packed = self.atlas_packed() + # assume each face consists of (v0, v1, v2). + # to sample from the atlas we only need the first two barycentric coordinates. + # for details on how this texture sample works refer to the sample_textures function. + t0 = atlas_packed[:, 0, -1] # corresponding to v0 with bary = (1, 0) + t1 = atlas_packed[:, -1, 0] # corresponding to v1 with bary = (0, 1) + t2 = atlas_packed[:, 0, 0] # corresponding to v2 with bary = (0, 0) + return torch.stack((t0, t1, t2), dim=1) + + def join_batch(self, textures: List["TexturesAtlas"]) -> "TexturesAtlas": + """ + Join the list of textures given by `textures` to + self to create a batch of textures. Return a new + TexturesAtlas object with the combined textures. + + Args: + textures: List of TexturesAtlas objects + + Returns: + new_tex: TexturesAtlas object with the combined + textures from self and the list `textures`. + """ + tex_types_same = all(isinstance(tex, TexturesAtlas) for tex in textures) + if not tex_types_same: + raise ValueError("All textures must be of type TexturesAtlas.") + + atlas_list = [] + atlas_list += self.atlas_list() + num_faces_per_mesh = self._num_faces_per_mesh.copy() + for tex in textures: + atlas_list += tex.atlas_list() + num_faces_per_mesh += tex._num_faces_per_mesh + new_tex = self.__class__(atlas=atlas_list) + new_tex._num_faces_per_mesh = num_faces_per_mesh + return new_tex + + def join_scene(self) -> "TexturesAtlas": + """ + Return a new TexturesAtlas amalgamating the batch. + """ + return self.__class__(atlas=[torch.cat(self.atlas_list())]) + + def check_shapes( + self, batch_size: int, max_num_verts: int, max_num_faces: int + ) -> bool: + """ + Check if the dimensions of the atlas match that of the mesh faces + """ + # (N, F) should be the same + return self.atlas_padded().shape[0:2] == (batch_size, max_num_faces) + + +class TexturesUV(TexturesBase): + def __init__( + self, + maps: Union[torch.Tensor, List[torch.Tensor]], + faces_uvs: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]], + verts_uvs: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]], + *, + maps_ids: Optional[ + Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]] + ] = None, + padding_mode: str = "border", + align_corners: bool = True, + sampling_mode: str = "bilinear", + ) -> None: + """ + Textures are represented as a per mesh texture map and uv coordinates for each + vertex in each face. NOTE: this class only supports one texture map per mesh. + + Args: + maps: Either (1) a texture map per mesh. This can either be a list of maps + [(H, W, C)] or a padded tensor of shape (N, H, W, C). + For RGB, C = 3. In this case maps_ids must be None. + Or (2) a set of M texture maps per mesh. This can either be a list of sets + [(M, H, W, C)] or a padded tensor of shape (N, M, H, W, C). + For RGB, C = 3. In this case maps_ids must be provided to + identify which is relevant to each face. + faces_uvs: (N, F, 3) LongTensor giving the index into verts_uvs + for each face + verts_uvs: (N, V, 2) tensor giving the uv coordinates per vertex + (a FloatTensor with values between 0 and 1). + maps_ids: Used if there are to be multiple maps per face. + This can be either a list of map_ids [(F,)] + or a long tensor of shape (N, F) giving the id of the texture map + for each face. If maps_ids is present, the maps has an extra dimension M + (so maps_padded is (N, M, H, W, C) and maps_list has elements of + shape (M, H, W, C)). + Specifically, the color + of a vertex V is given by an average of + maps_padded[i, maps_ids[i, f], u, v, :] + over u and v integers adjacent to + _verts_uvs_padded[i, _faces_uvs_padded[i, f, 0], :] . + align_corners: If true, the extreme values 0 and 1 for verts_uvs + indicate the centers of the edge pixels in the maps. + padding_mode: padding mode for outside grid values + ("zeros", "border" or "reflection"). + sampling_mode: type of interpolation used to sample the texture. + Corresponds to the mode parameter in PyTorch's + grid_sample ("nearest" or "bilinear"). + + The align_corners and padding_mode arguments correspond to the arguments + of the `grid_sample` torch function. There is an informative illustration of + the two align_corners options at + https://discuss.pytorch.org/t/22663/9 . + + An example of how the indexing into the maps, with align_corners=True, + works is as follows. + If maps[i] has shape [1001, 101] and the value of verts_uvs[i][j] + is [0.4, 0.3], then a value of j in faces_uvs[i] means a vertex + whose color is given by maps[i][700, 40]. padding_mode affects what + happens if a value in verts_uvs is less than 0 or greater than 1. + Note that increasing a value in verts_uvs[..., 0] increases an index + in maps, whereas increasing a value in verts_uvs[..., 1] _decreases_ + an _earlier_ index in maps. + + If align_corners=False, an example would be as follows. + If maps[i] has shape [1000, 100] and the value of verts_uvs[i][j] + is [0.405, 0.2995], then a value of j in faces_uvs[i] means a vertex + whose color is given by maps[i][700, 40]. + When align_corners=False, padding_mode even matters for values in + verts_uvs slightly above 0 or slightly below 1. In this case, the + padding_mode matters if the first value is outside the interval + [0.0005, 0.9995] or if the second is outside the interval + [0.005, 0.995]. + """ + self.padding_mode = padding_mode + self.align_corners = align_corners + self.sampling_mode = sampling_mode + if isinstance(faces_uvs, (list, tuple)): + for fv in faces_uvs: + if fv.ndim != 2 or fv.shape[-1] != 3: + msg = "Expected faces_uvs to be of shape (F, 3); got %r" + raise ValueError(msg % repr(fv.shape)) + self._faces_uvs_list = faces_uvs + self._faces_uvs_padded = None + self.device = torch.device("cpu") + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(faces_uvs) + self._num_faces_per_mesh = [len(fv) for fv in faces_uvs] + + if self._N > 0: + self.device = faces_uvs[0].device + + elif torch.is_tensor(faces_uvs): + if faces_uvs.ndim != 3 or faces_uvs.shape[-1] != 3: + msg = "Expected faces_uvs to be of shape (N, F, 3); got %r" + raise ValueError(msg % repr(faces_uvs.shape)) + self._faces_uvs_padded = faces_uvs + self._faces_uvs_list = None + self.device = faces_uvs.device + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(faces_uvs) + max_F = faces_uvs.shape[1] + self._num_faces_per_mesh = [max_F] * self._N + else: + raise ValueError("Expected faces_uvs to be a tensor or list") + + if isinstance(verts_uvs, (list, tuple)): + for fv in verts_uvs: + if fv.ndim != 2 or fv.shape[-1] != 2: + msg = "Expected verts_uvs to be of shape (V, 2); got %r" + raise ValueError(msg % repr(fv.shape)) + self._verts_uvs_list = verts_uvs + self._verts_uvs_padded = None + + if len(verts_uvs) != self._N: + raise ValueError( + "verts_uvs and faces_uvs must have the same batch dimension" + ) + if not all(v.device == self.device for v in verts_uvs): + raise ValueError("verts_uvs and faces_uvs must be on the same device") + + elif torch.is_tensor(verts_uvs): + if ( + verts_uvs.ndim != 3 + or verts_uvs.shape[-1] != 2 + or verts_uvs.shape[0] != self._N + ): + msg = "Expected verts_uvs to be of shape (N, V, 2); got %r" + raise ValueError(msg % repr(verts_uvs.shape)) + self._verts_uvs_padded = verts_uvs + self._verts_uvs_list = None + + if verts_uvs.device != self.device: + raise ValueError("verts_uvs and faces_uvs must be on the same device") + else: + raise ValueError("Expected verts_uvs to be a tensor or list") + + self._maps_ids_padded, self._maps_ids_list = self._format_maps_ids(maps_ids) + + if isinstance(maps, (list, tuple)): + self._maps_list = maps + else: + self._maps_list = None + self._maps_padded = self._format_maps_padded(maps) + + if self._maps_padded.device != self.device: + raise ValueError("maps must be on the same device as verts/faces uvs.") + self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device) + + def _format_maps_ids( + self, + maps_ids: Optional[ + Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]] + ], + ) -> Tuple[ + Optional[torch.Tensor], Optional[Union[List[torch.Tensor], Tuple[torch.Tensor]]] + ]: + if maps_ids is None: + return None, None + elif isinstance(maps_ids, (list, tuple)): + for mid in maps_ids: + if mid.ndim != 1: + msg = "Expected maps_ids to be of shape (F,); got %r" + raise ValueError(msg % repr(mid.shape)) + if len(maps_ids) != self._N: + raise ValueError( + "map_ids, faces_uvs and verts_uvs must have the same batch dimension" + ) + if not all(mid.device == self.device for mid in maps_ids): + raise ValueError( + "maps_ids and verts/faces uvs must be on the same device" + ) + + if not all( + mid.shape[0] == nfm + for mid, nfm in zip(maps_ids, self._num_faces_per_mesh) + ): + raise ValueError( + "map_ids and faces_uvs must have the same number of faces per mesh" + ) + if not all(mid.device == self.device for mid in maps_ids): + raise ValueError( + "maps_ids and verts/faces uvs must be on the same device" + ) + if not self._num_faces_per_mesh: + return torch.Tensor(), maps_ids + return list_to_padded(maps_ids, pad_value=0), maps_ids + elif isinstance(maps_ids, torch.Tensor): + if maps_ids.ndim != 2 or maps_ids.shape[0] != self._N: + msg = "Expected maps_ids to be of shape (N, F); got %r" + raise ValueError(msg % repr(maps_ids.shape)) + maps_ids_padded = maps_ids + max_F = max(self._num_faces_per_mesh) + if not maps_ids.shape[1] == max_F: + raise ValueError( + "map_ids and faces_uvs must have the same number of faces per mesh" + ) + if maps_ids.device != self.device: + raise ValueError( + "maps_ids and verts/faces uvs must be on the same device" + ) + return maps_ids_padded, None + raise ValueError("Expected maps_ids to be a tensor or list") + + def _format_maps_padded( + self, maps: Union[torch.Tensor, List[torch.Tensor]] + ) -> torch.Tensor: + maps_ids_none = self._maps_ids_padded is None + if isinstance(maps, torch.Tensor): + if not maps_ids_none: + if maps.ndim != 5 or maps.shape[0] != self._N: + msg = "Expected maps to be of shape (N, M, H, W, C); got %r" + raise ValueError(msg % repr(maps.shape)) + elif maps.ndim != 4 or maps.shape[0] != self._N: + msg = "Expected maps to be of shape (N, H, W, C); got %r" + raise ValueError(msg % repr(maps.shape)) + return maps + + if isinstance(maps, (list, tuple)): + if len(maps) != self._N: + raise ValueError("Expected one texture map per mesh in the batch.") + if self._N > 0: + ndim = 3 if maps_ids_none else 4 + if not all(map.ndim == ndim for map in maps): + raise ValueError("Invalid number of dimensions in texture maps") + if not all(map.shape[-1] == maps[0].shape[-1] for map in maps): + raise ValueError("Inconsistent number of channels in maps") + maps_padded = ( + _pad_texture_maps(maps, align_corners=self.align_corners) + if maps_ids_none + else _pad_texture_multiple_maps( + maps, align_corners=self.align_corners + ) + ) + else: + if maps_ids_none: + maps_padded = torch.empty( + (self._N, 0, 0, 3), dtype=torch.float32, device=self.device + ) + else: + maps_padded = torch.empty( + (self._N, 0, 0, 0, 3), dtype=torch.float32, device=self.device + ) + return maps_padded + + raise ValueError("Expected maps to be a tensor or list of tensors.") + + def clone(self) -> "TexturesUV": + tex = self.__class__( + self.maps_padded().clone(), + self.faces_uvs_padded().clone(), + self.verts_uvs_padded().clone(), + maps_ids=( + self._maps_ids_padded.clone() + if self._maps_ids_padded is not None + else None + ), + align_corners=self.align_corners, + padding_mode=self.padding_mode, + sampling_mode=self.sampling_mode, + ) + if self._maps_list is not None: + tex._maps_list = [m.clone() for m in self._maps_list] + if self._verts_uvs_list is not None: + tex._verts_uvs_list = [v.clone() for v in self._verts_uvs_list] + if self._faces_uvs_list is not None: + tex._faces_uvs_list = [f.clone() for f in self._faces_uvs_list] + if self._maps_ids_list is not None: + tex._maps_ids_list = [f.clone() for f in self._maps_ids_list] + num_faces = ( + self._num_faces_per_mesh.clone() + if torch.is_tensor(self._num_faces_per_mesh) + else self._num_faces_per_mesh + ) + tex._num_faces_per_mesh = num_faces + tex.valid = self.valid.clone() + return tex + + def detach(self) -> "TexturesUV": + tex = self.__class__( + self.maps_padded().detach(), + self.faces_uvs_padded().detach(), + self.verts_uvs_padded().detach(), + maps_ids=( + self._maps_ids_padded.detach() + if self._maps_ids_padded is not None + else None + ), + align_corners=self.align_corners, + padding_mode=self.padding_mode, + sampling_mode=self.sampling_mode, + ) + if self._maps_list is not None: + tex._maps_list = [m.detach() for m in self._maps_list] + if self._verts_uvs_list is not None: + tex._verts_uvs_list = [v.detach() for v in self._verts_uvs_list] + if self._faces_uvs_list is not None: + tex._faces_uvs_list = [f.detach() for f in self._faces_uvs_list] + if self._maps_ids_list is not None: + tex._maps_ids_list = [mi.detach() for mi in self._maps_ids_list] + num_faces = ( + self._num_faces_per_mesh.detach() + if torch.is_tensor(self._num_faces_per_mesh) + else self._num_faces_per_mesh + ) + tex._num_faces_per_mesh = num_faces + tex.valid = self.valid.detach() + return tex + + def __getitem__(self, index) -> "TexturesUV": + props = [ + "faces_uvs_list", + "verts_uvs_list", + "maps_list", + "maps_ids_list", + "_num_faces_per_mesh", + ] + new_props = self._getitem(index, props) + faces_uvs = new_props["faces_uvs_list"] + verts_uvs = new_props["verts_uvs_list"] + maps = new_props["maps_list"] + maps_ids = new_props["maps_ids_list"] + + # if index has multiple values then faces/verts/maps may be a list of tensors + if all(isinstance(f, (list, tuple)) for f in [faces_uvs, verts_uvs, maps]): + if maps_ids is not None and not isinstance(maps_ids, (list, tuple)): + raise ValueError( + "Maps ids are not in the correct format expected list or tuple" + ) + new_tex = self.__class__( + faces_uvs=faces_uvs, + verts_uvs=verts_uvs, + maps=maps, + maps_ids=maps_ids, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + sampling_mode=self.sampling_mode, + ) + elif all(torch.is_tensor(f) for f in [faces_uvs, verts_uvs, maps]): + if maps_ids is not None and not torch.is_tensor(maps_ids): + raise ValueError( + "Maps ids are not in the correct format expected tensor" + ) + new_tex = self.__class__( + faces_uvs=[faces_uvs], + verts_uvs=[verts_uvs], + maps=[maps], + maps_ids=[maps_ids] if maps_ids is not None else None, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + sampling_mode=self.sampling_mode, + ) + else: + raise ValueError("Not all values are provided in the correct format") + new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"] + return new_tex + + def faces_uvs_padded(self) -> torch.Tensor: + if self._faces_uvs_padded is None: + if self.isempty(): + self._faces_uvs_padded = torch.zeros( + (self._N, 0, 3), dtype=torch.float32, device=self.device + ) + else: + self._faces_uvs_padded = list_to_padded( + self._faces_uvs_list, pad_value=0.0 + ) + return self._faces_uvs_padded + + def faces_uvs_list(self) -> List[torch.Tensor]: + if self._faces_uvs_list is None: + if self.isempty(): + self._faces_uvs_list = [ + torch.empty((0, 3), dtype=torch.float32, device=self.device) + ] * self._N + else: + self._faces_uvs_list = padded_to_list( + self._faces_uvs_padded, split_size=self._num_faces_per_mesh + ) + return self._faces_uvs_list + + def verts_uvs_padded(self) -> torch.Tensor: + if self._verts_uvs_padded is None: + if self.isempty(): + self._verts_uvs_padded = torch.zeros( + (self._N, 0, 2), dtype=torch.float32, device=self.device + ) + else: + self._verts_uvs_padded = list_to_padded( + self._verts_uvs_list, pad_value=0.0 + ) + return self._verts_uvs_padded + + def verts_uvs_list(self) -> List[torch.Tensor]: + if self._verts_uvs_list is None: + if self.isempty(): + self._verts_uvs_list = [ + torch.empty((0, 2), dtype=torch.float32, device=self.device) + ] * self._N + else: + # The number of vertices in the mesh and in verts_uvs can differ + # e.g. if a vertex is shared between 3 faces, it can + # have up to 3 different uv coordinates. + self._verts_uvs_list = list(self._verts_uvs_padded.unbind(0)) + return self._verts_uvs_list + + def maps_ids_padded(self) -> Optional[torch.Tensor]: + return self._maps_ids_padded + + def maps_ids_list(self) -> Optional[List[torch.Tensor]]: + if self._maps_ids_list is not None: + return self._maps_ids_list + elif self._maps_ids_padded is not None: + return self._maps_ids_padded.unbind(0) + else: + return None + + # Currently only the padded maps are used. + def maps_padded(self) -> torch.Tensor: + return self._maps_padded + + def maps_list(self) -> List[torch.Tensor]: + if self._maps_list is not None: + return self._maps_list + return self._maps_padded.unbind(0) + + def extend(self, N: int) -> "TexturesUV": + new_props = self._extend( + N, + [ + "maps_padded", + "verts_uvs_padded", + "faces_uvs_padded", + "maps_ids_padded", + "_num_faces_per_mesh", + ], + ) + new_tex = self.__class__( + maps=new_props["maps_padded"], + faces_uvs=new_props["faces_uvs_padded"], + verts_uvs=new_props["verts_uvs_padded"], + maps_ids=new_props["maps_ids_padded"], + padding_mode=self.padding_mode, + align_corners=self.align_corners, + sampling_mode=self.sampling_mode, + ) + + new_tex._num_faces_per_mesh = new_props["_num_faces_per_mesh"] + return new_tex + + # pyre-fixme[14]: `sample_textures` overrides method defined in `TexturesBase` + # inconsistently. + def sample_textures(self, fragments, **kwargs) -> torch.Tensor: + """ + Interpolate a 2D texture map using uv vertex texture coordinates for each + face in the mesh. First interpolate the vertex uvs using barycentric coordinates + for each pixel in the rasterized output. Then interpolate the texture map + using the uv coordinate for each pixel. + + Args: + fragments: + The outputs of rasterization. From this we use + + - pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. + - barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying + the barycentric coordinates of each pixel + relative to the faces (in the packed + representation) which overlap the pixel. + + Returns: + texels: tensor of shape (N, H, W, K, C) giving the interpolated + texture for each pixel in the rasterized image. + """ + if self.isempty(): + faces_verts_uvs = torch.zeros( + (self._N, 3, 2), dtype=torch.float32, device=self.device + ) + else: + packing_list = [ + i[j] for i, j in zip(self.verts_uvs_list(), self.faces_uvs_list()) + ] + faces_verts_uvs = torch.cat(packing_list) + + # pixel_uvs: (N, H, W, K, 2) + pixel_uvs = interpolate_face_attributes( + fragments.pix_to_face, fragments.bary_coords, faces_verts_uvs + ) + + N, H_out, W_out, K = fragments.pix_to_face.shape + + texture_maps = self.maps_padded() + maps_ids_padded = self.maps_ids_padded() + if maps_ids_padded is None: + # pixel_uvs: (N, H, W, K, 2) -> (N, K, H, W, 2) -> (NK, H, W, 2) + pixel_uvs = pixel_uvs.permute(0, 3, 1, 2, 4).reshape(N * K, H_out, W_out, 2) + N, H_in, W_in, C = texture_maps.shape # 3 for RGB + + # textures.map: + # (N, H, W, C) -> (N, C, H, W) -> (1, N, C, H, W) + # -> expand (K, N, C, H, W) -> reshape (N*K, C, H, W) + texture_maps = ( + texture_maps.permute(0, 3, 1, 2)[None, ...] + .expand(K, -1, -1, -1, -1) + .transpose(0, 1) + .reshape(N * K, C, H_in, W_in) + ) + # Textures: (N*K, C, H, W), pixel_uvs: (N*K, H, W, 2) + # Now need to format the pixel uvs and the texture map correctly! + # From pytorch docs, grid_sample takes `grid` and `input`: + # grid specifies the sampling pixel locations normalized by + # the input spatial dimensions It should have most + # values in the range of [-1, 1]. Values x = -1, y = -1 + # is the left-top pixel of input, and values x = 1, y = 1 is the + # right-bottom pixel of input. + + # map to a range of [-1, 1] and flip the y axis + pixel_uvs = torch.lerp( + pixel_uvs.new_tensor([-1.0, 1.0]), + pixel_uvs.new_tensor([1.0, -1.0]), + pixel_uvs, + ) + + if texture_maps.device != pixel_uvs.device: + texture_maps = texture_maps.to(pixel_uvs.device) + texels = F.grid_sample( + texture_maps, + pixel_uvs, + mode=self.sampling_mode, + align_corners=self.align_corners, + padding_mode=self.padding_mode, + ) + # texels now has shape (NK, C, H_out, W_out) + texels = texels.reshape(N, K, C, H_out, W_out).permute(0, 3, 4, 1, 2) + return texels + else: + # We have maps_ids_padded: (N, F), textures_map: (N, M, Hi, Wi, C), + # fragments.pix_to_face: (N, Ho, Wo, K) + # Get pixel_to_map_ids: (N, K, Ho, Wo) by indexing pix_to_face into maps_ids + N, M, H_in, W_in, C = texture_maps.shape # 3 for RGB + + mask = fragments.pix_to_face < 0 + pix_to_face = fragments.pix_to_face.clone() + pix_to_face[mask] = 0 + + pixel_to_map_ids = ( + maps_ids_padded.flatten() + .gather(0, pix_to_face.flatten()) + .view(N, H_out, W_out, K, 1) + .permute(0, 3, 1, 2, 4) + ) # N x H_out x W_out x K x 1 + + # Normalize between -1 and 1 with M (number of maps) + pixel_to_map_ids = (2.0 * pixel_to_map_ids.float() / float(M - 1)) - 1 + pixel_uvs = pixel_uvs.permute(0, 3, 1, 2, 4) + pixel_uvs = torch.lerp( + pixel_uvs.new_tensor([-1.0, 1.0]), + pixel_uvs.new_tensor([1.0, -1.0]), + pixel_uvs, + ) # N x H_out x W_out x K x 2 + + # N x H_out x W_out x K x 3 + pixel_uvms = torch.cat((pixel_uvs, pixel_to_map_ids), dim=4) + # (N, M, H, W, C) -> (N, C, M, H, W) + texture_maps = texture_maps.permute(0, 4, 1, 2, 3) + if texture_maps.device != pixel_uvs.device: + texture_maps = texture_maps.to(pixel_uvs.device) + texels = F.grid_sample( + texture_maps, + pixel_uvms, + mode=self.sampling_mode, + align_corners=self.align_corners, + padding_mode=self.padding_mode, + ) + # (N, C, K, H_out, W_out) -> (N, H_out, W_out, K, C) + texels = texels.permute(0, 3, 4, 2, 1).contiguous() + return texels + + def faces_verts_textures_packed(self) -> torch.Tensor: + """ + Samples texture from each vertex and for each face in the mesh. + For N meshes with {Fi} number of faces, it returns a + tensor of shape sum(Fi)x3xC (C = 3 for RGB). + You can use the utils function in structures.utils to convert the + packed representation to a list or padded. + """ + if self.isempty(): + return torch.zeros( + (0, 3, self.maps_padded().shape[-1]), + dtype=torch.float32, + device=self.device, + ) + else: + packing_list = [ + i[j] for i, j in zip(self.verts_uvs_list(), self.faces_uvs_list()) + ] + faces_verts_uvs = _list_to_padded_wrapper( + packing_list, pad_value=0.0 + ) # Nxmax(Fi)x3x2 + # map to a range of [-1, 1] and flip the y axis + faces_verts_uvs = torch.lerp( + faces_verts_uvs.new_tensor([-1.0, 1.0]), + faces_verts_uvs.new_tensor([1.0, -1.0]), + faces_verts_uvs, + ) + texture_maps = self.maps_padded() # NxHxWxC or NxMxHxWxC + maps_ids_padded = self.maps_ids_padded() + if maps_ids_padded is None: + texture_maps = texture_maps.permute(0, 3, 1, 2) # NxCxHxW + else: + M = texture_maps.shape[1] + # (N, M, H, W, C) -> (N, C, M, H, W) + texture_maps = texture_maps.permute(0, 4, 1, 2, 3) + # expand maps_ids to (N, F, 3, 1) + maps_ids_padded = maps_ids_padded[:, :, None, None].expand(-1, -1, 3, -1) + maps_ids_padded = (2.0 * maps_ids_padded.float() / float(M - 1)) - 1.0 + + # (N, F, 3, 2+1) -> (N, 1, F, 3, 3) + faces_verts_uvs = torch.cat( + (faces_verts_uvs, maps_ids_padded), dim=3 + ).unsqueeze(1) + # (N, M, H, W, C) -> (N, C, H, W, M) + # texture_maps = texture_maps.permute(0, 4, 2, 3, 1) + textures = F.grid_sample( + texture_maps, + faces_verts_uvs, + mode=self.sampling_mode, + align_corners=self.align_corners, + padding_mode=self.padding_mode, + ) # (N, C, max(Fi), 3) + if maps_ids_padded is not None: + textures = textures.squeeze(dim=2) + # (N, C, max(Fi), 3) -> (N, max(Fi), 3, C) + textures = textures.permute(0, 2, 3, 1) + textures = _padded_to_list_wrapper( + textures, split_size=self._num_faces_per_mesh + ) # list of N {Fix3xC} tensors + return list_to_packed(textures)[0] + + def join_batch(self, textures: List["TexturesUV"]) -> "TexturesUV": + """ + Join the list of textures given by `textures` to + self to create a batch of textures. Return a new + TexturesUV object with the combined textures. + + Args: + textures: List of TexturesUV objects + + Returns: + new_tex: TexturesUV object with the combined + textures from self and the list `textures`. + """ + if self.maps_ids_padded() is not None: + # TODO + raise NotImplementedError( + "join_batch does not support TexturesUV with multiple maps" + ) + tex_types_same = all(isinstance(tex, TexturesUV) for tex in textures) + if not tex_types_same: + raise ValueError("All textures must be of type TexturesUV.") + + padding_modes_same = all( + tex.padding_mode == self.padding_mode for tex in textures + ) + if not padding_modes_same: + raise ValueError("All textures must have the same padding_mode.") + align_corners_same = all( + tex.align_corners == self.align_corners for tex in textures + ) + if not align_corners_same: + raise ValueError("All textures must have the same align_corners value.") + sampling_mode_same = all( + tex.sampling_mode == self.sampling_mode for tex in textures + ) + if not sampling_mode_same: + raise ValueError("All textures must have the same sampling_mode.") + + verts_uvs_list = [] + faces_uvs_list = [] + maps_list = [] + faces_uvs_list += self.faces_uvs_list() + verts_uvs_list += self.verts_uvs_list() + maps_list += self.maps_list() + num_faces_per_mesh = self._num_faces_per_mesh.copy() + for tex in textures: + verts_uvs_list += tex.verts_uvs_list() + faces_uvs_list += tex.faces_uvs_list() + num_faces_per_mesh += tex._num_faces_per_mesh + maps_list += tex.maps_list() + + new_tex = self.__class__( + maps=maps_list, + faces_uvs=faces_uvs_list, + verts_uvs=verts_uvs_list, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + sampling_mode=self.sampling_mode, + ) + new_tex._num_faces_per_mesh = num_faces_per_mesh + return new_tex + + def _place_map_into_single_map( + self, single_map: torch.Tensor, map_: torch.Tensor, location: PackedRectangle + ) -> None: + """ + Copy map into a larger tensor single_map at the destination specified by location. + If align_corners is False, we add the needed border around the destination. + + Used by join_scene. + + Args: + single_map: (total_H, total_W, C) + map_: (H, W, C) source data + location: where to place map + """ + do_flip = location.flipped + source = map_.transpose(0, 1) if do_flip else map_ + border_width = 0 if self.align_corners else 1 + lower_u = location.x + border_width + lower_v = location.y + border_width + upper_u = lower_u + source.shape[0] + upper_v = lower_v + source.shape[1] + single_map[lower_u:upper_u, lower_v:upper_v] = source + + if self.padding_mode != "zeros" and not self.align_corners: + single_map[lower_u - 1, lower_v:upper_v] = single_map[ + lower_u, lower_v:upper_v + ] + single_map[upper_u, lower_v:upper_v] = single_map[ + upper_u - 1, lower_v:upper_v + ] + single_map[lower_u:upper_u, lower_v - 1] = single_map[ + lower_u:upper_u, lower_v + ] + single_map[lower_u:upper_u, upper_v] = single_map[ + lower_u:upper_u, upper_v - 1 + ] + single_map[lower_u - 1, lower_v - 1] = single_map[lower_u, lower_v] + single_map[lower_u - 1, upper_v] = single_map[lower_u, upper_v - 1] + single_map[upper_u, lower_v - 1] = single_map[upper_u - 1, lower_v] + single_map[upper_u, upper_v] = single_map[upper_u - 1, upper_v - 1] + + def join_scene(self) -> "TexturesUV": + """ + Return a new TexturesUV amalgamating the batch. + + We calculate a large single map which contains the original maps, + and find verts_uvs to point into it. This will not replicate + behavior of padding for verts_uvs values outside [0,1]. + + If align_corners=False, we need to add an artificial border around + every map. + + We use the function `pack_unique_rectangles` to provide a layout for + the single map. This means that if self was created with a list of maps, + and to() has not been called, and there were two maps which were exactly + the same tensor object, then they will become the same data in the unified map. + _place_map_into_single_map is used to copy the maps into the single map. + The merging of verts_uvs and faces_uvs is handled locally in this function. + """ + if self.maps_ids_padded() is not None: + # TODO + raise NotImplementedError("join_scene does not support multiple maps.") + maps = self.maps_list() + heights_and_widths = [] + extra_border = 0 if self.align_corners else 2 + for map_ in maps: + heights_and_widths.append( + Rectangle( + map_.shape[0] + extra_border, map_.shape[1] + extra_border, id(map_) + ) + ) + merging_plan = pack_unique_rectangles(heights_and_widths) + C = maps[0].shape[-1] + single_map = maps[0].new_zeros((*merging_plan.total_size, C)) + verts_uvs = self.verts_uvs_list() + verts_uvs_merged = [] + + for map_, loc, uvs in zip(maps, merging_plan.locations, verts_uvs): + new_uvs = uvs.clone() + if loc.is_first: + self._place_map_into_single_map(single_map, map_, loc) + do_flip = loc.flipped + x_shape = map_.shape[1] if do_flip else map_.shape[0] + y_shape = map_.shape[0] if do_flip else map_.shape[1] + + if do_flip: + # Here we have flipped / transposed the map. + # In uvs, the y values are decreasing from 1 to 0 and the x + # values increase from 0 to 1. We subtract all values from 1 + # as the x's become y's and the y's become x's. + new_uvs = 1.0 - new_uvs[:, [1, 0]] + if TYPE_CHECKING: + new_uvs = torch.Tensor(new_uvs) + + # If align_corners is True, then an index of x (where x is in + # the range 0 .. map_.shape[1]-1) in one of the input maps + # was hit by a u of x/(map_.shape[1]-1). + # That x is located at the index loc[1] + x in the single_map, and + # to hit that we need u to equal (loc[1] + x) / (total_size[1]-1) + # so the old u should be mapped to + # { u*(map_.shape[1]-1) + loc[1] } / (total_size[1]-1) + + # Also, an index of y (where y is in + # the range 0 .. map_.shape[0]-1) in one of the input maps + # was hit by a v of 1 - y/(map_.shape[0]-1). + # That y is located at the index loc[0] + y in the single_map, and + # to hit that we need v to equal 1 - (loc[0] + y) / (total_size[0]-1) + # so the old v should be mapped to + # 1 - { (1-v)*(map_.shape[0]-1) + loc[0] } / (total_size[0]-1) + # = + # { v*(map_.shape[0]-1) + total_size[0] - map.shape[0] - loc[0] } + # / (total_size[0]-1) + + # If align_corners is False, then an index of x (where x is in + # the range 1 .. map_.shape[1]-2) in one of the input maps + # was hit by a u of (x+0.5)/(map_.shape[1]). + # That x is located at the index loc[1] + 1 + x in the single_map, + # (where the 1 is for the border) + # and to hit that we need u to equal (loc[1] + 1 + x + 0.5) / (total_size[1]) + # so the old u should be mapped to + # { loc[1] + 1 + u*map_.shape[1]-0.5 + 0.5 } / (total_size[1]) + # = { loc[1] + 1 + u*map_.shape[1] } / (total_size[1]) + + # Also, an index of y (where y is in + # the range 1 .. map_.shape[0]-2) in one of the input maps + # was hit by a v of 1 - (y+0.5)/(map_.shape[0]). + # That y is located at the index loc[0] + 1 + y in the single_map, + # (where the 1 is for the border) + # and to hit that we need v to equal 1 - (loc[0] + 1 + y + 0.5) / (total_size[0]) + # so the old v should be mapped to + # 1 - { loc[0] + 1 + (1-v)*map_.shape[0]-0.5 + 0.5 } / (total_size[0]) + # = { total_size[0] - loc[0] -1 - (1-v)*map_.shape[0] } + # / (total_size[0]) + # = { total_size[0] - loc[0] - map.shape[0] - 1 + v*map_.shape[0] } + # / (total_size[0]) + + # We change the y's in new_uvs for the scaling of height, + # and the x's for the scaling of width. + # That is why the 1's and 0's are mismatched in these lines. + one_if_align = 1 if self.align_corners else 0 + one_if_not_align = 1 - one_if_align + denom_x = merging_plan.total_size[0] - one_if_align + scale_x = x_shape - one_if_align + denom_y = merging_plan.total_size[1] - one_if_align + scale_y = y_shape - one_if_align + new_uvs[:, 1] *= scale_x / denom_x + new_uvs[:, 1] += ( + merging_plan.total_size[0] - x_shape - loc.x - one_if_not_align + ) / denom_x + new_uvs[:, 0] *= scale_y / denom_y + new_uvs[:, 0] += (loc.y + one_if_not_align) / denom_y + + verts_uvs_merged.append(new_uvs) + + faces_uvs_merged = [] + offset = 0 + for faces_uvs_, verts_uvs_ in zip(self.faces_uvs_list(), verts_uvs): + faces_uvs_merged.append(offset + faces_uvs_) + offset += verts_uvs_.shape[0] + + return self.__class__( + maps=[single_map], + faces_uvs=[torch.cat(faces_uvs_merged)], + verts_uvs=[torch.cat(verts_uvs_merged)], + align_corners=self.align_corners, + padding_mode=self.padding_mode, + sampling_mode=self.sampling_mode, + ) + + def centers_for_image(self, index: int) -> torch.Tensor: + """ + Return the locations in the texture map which correspond to the given + verts_uvs, for one of the meshes. This is potentially useful for + visualizing the data. See the texturesuv_image_matplotlib and + texturesuv_image_PIL functions. + + Args: + index: batch index of the mesh whose centers to return. + + Returns: + centers: coordinates of points in the texture image + - a FloatTensor of shape (V,2) + """ + if self.maps_ids_padded() is not None: + # TODO: invent a visualization for the multiple maps case + raise NotImplementedError("This function does not support multiple maps.") + if self._N != 1: + raise ValueError( + "This function only supports plotting textures for one mesh." + ) + texture_image = self.maps_padded() + verts_uvs = self.verts_uvs_list()[index][None] + _, H, W, _3 = texture_image.shape + coord1 = torch.arange(W).expand(H, W) + coord2 = torch.arange(H)[:, None].expand(H, W) + coords = torch.stack([coord1, coord2])[None] + with torch.no_grad(): + # Get xy cartesian coordinates based on the uv coordinates + centers = F.grid_sample( + torch.flip(coords.to(texture_image), [2]), + # Convert from [0, 1] -> [-1, 1] range expected by grid sample + verts_uvs[:, None] * 2.0 - 1, + mode=self.sampling_mode, + align_corners=self.align_corners, + padding_mode=self.padding_mode, + ).cpu() + centers = centers[0, :, 0].T + return centers + + def check_shapes( + self, batch_size: int, max_num_verts: int, max_num_faces: int + ) -> bool: + """ + Check if the dimensions of the verts/faces uvs match that of the mesh + """ + # (N, F) should be the same + # (N, V) is not guaranteed to be the same + return (self.faces_uvs_padded().shape[0:2] == (batch_size, max_num_faces)) and ( + self.verts_uvs_padded().shape[0] == batch_size + ) + + def submeshes( + self, + vertex_ids_list: List[List[torch.LongTensor]], + faces_ids_list: List[List[torch.LongTensor]], + ) -> "TexturesUV": + """ + Extract a sub-texture for use in a submesh. + + If the meshes batch corresponding to this TexturesUV contains + `n = len(faces_ids_list)` meshes, then self.faces_uvs_padded() + will be of length n. After submeshing, we obtain a batch of + `k = sum(len(f) for f in faces_ids_list` submeshes (see Meshes.submeshes). This + function creates a corresponding TexturesUV object with `faces_uvs_padded` + of length `k`. + + Args: + vertex_ids_list: Not used when submeshing TexturesUV. + + face_ids_list: A list of length equal to self.faces_uvs_padded. Each + element is a LongTensor listing the face ids that the submesh keeps in + each respective mesh. + + + Returns: + A "TexturesUV in which faces_uvs_padded, verts_uvs_padded, and maps_padded + have length sum(len(faces) for faces in faces_ids_list) + """ + if self.maps_ids_padded() is not None: + # TODO + raise NotImplementedError("This function does not support multiple maps.") + if len(faces_ids_list) != len(self.faces_uvs_padded()): + raise IndexError( + "faces_uvs_padded must be of " "the same length as face_ids_list." + ) + + sub_faces_uvs, sub_verts_uvs, sub_maps = [], [], [] + for faces_ids, faces_uvs, verts_uvs, map_ in zip( + faces_ids_list, + self.faces_uvs_padded(), + self.verts_uvs_padded(), + self.maps_padded(), + ): + for faces_ids_submesh in faces_ids: + sub_faces_uvs.append(faces_uvs[faces_ids_submesh]) + sub_verts_uvs.append(verts_uvs) + sub_maps.append(map_) + + return self.__class__( + maps=sub_maps, + faces_uvs=sub_faces_uvs, + verts_uvs=sub_verts_uvs, + padding_mode=self.padding_mode, + align_corners=self.align_corners, + sampling_mode=self.sampling_mode, + ) + + +class TexturesVertex(TexturesBase): + def __init__( + self, + verts_features: Union[torch.Tensor, List[torch.Tensor], Tuple[torch.Tensor]], + ) -> None: + """ + Batched texture representation where each vertex in a mesh + has a C dimensional feature vector. + + Args: + verts_features: list of (Vi, C) or (N, V, C) tensor giving a feature + vector with arbitrary dimensions for each vertex. + """ + if isinstance(verts_features, (tuple, list)): + correct_shape = all( + (torch.is_tensor(v) and v.ndim == 2) for v in verts_features + ) + if not correct_shape: + raise ValueError( + "Expected verts_features to be a list of tensors of shape (V, C)." + ) + + self._verts_features_list = verts_features + self._verts_features_padded = None + self.device = torch.device("cpu") + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(verts_features) + self._num_verts_per_mesh = [len(fv) for fv in verts_features] + + if self._N > 0: + self.device = verts_features[0].device + + elif torch.is_tensor(verts_features): + if verts_features.ndim != 3: + msg = "Expected verts_features to be of shape (N, V, C); got %r" + raise ValueError(msg % repr(verts_features.shape)) + self._verts_features_padded = verts_features + self._verts_features_list = None + self.device = verts_features.device + + # These values may be overridden when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self._N = len(verts_features) + max_F = verts_features.shape[1] + self._num_verts_per_mesh = [max_F] * self._N + else: + raise ValueError("verts_features must be a tensor or list of tensors") + + # This is set inside the Meshes object when textures is + # passed into the Meshes constructor. For more details + # refer to the __init__ of Meshes. + self.valid = torch.ones((self._N,), dtype=torch.bool, device=self.device) + + def clone(self) -> "TexturesVertex": + tex = self.__class__(self.verts_features_padded().clone()) + if self._verts_features_list is not None: + tex._verts_features_list = [f.clone() for f in self._verts_features_list] + tex._num_verts_per_mesh = self._num_verts_per_mesh.copy() + tex.valid = self.valid.clone() + return tex + + def detach(self) -> "TexturesVertex": + tex = self.__class__(self.verts_features_padded().detach()) + if self._verts_features_list is not None: + tex._verts_features_list = [f.detach() for f in self._verts_features_list] + tex._num_verts_per_mesh = self._num_verts_per_mesh.copy() + tex.valid = self.valid.detach() + return tex + + def __getitem__(self, index) -> "TexturesVertex": + props = ["verts_features_list", "_num_verts_per_mesh"] + new_props = self._getitem(index, props) + verts_features = new_props["verts_features_list"] + if isinstance(verts_features, list): + # Handle the case of an empty list + if len(verts_features) == 0: + verts_features = torch.empty( + size=(0, 0, 3), + dtype=torch.float32, + device=self.verts_features_padded().device, + ) + new_tex = self.__class__(verts_features=verts_features) + elif torch.is_tensor(verts_features): + new_tex = self.__class__(verts_features=[verts_features]) + else: + raise ValueError("Not all values are provided in the correct format") + new_tex._num_verts_per_mesh = new_props["_num_verts_per_mesh"] + return new_tex + + def verts_features_padded(self) -> torch.Tensor: + if self._verts_features_padded is None: + if self.isempty(): + self._verts_features_padded = torch.zeros( + (self._N, 0, 3, 0), dtype=torch.float32, device=self.device + ) + else: + self._verts_features_padded = list_to_padded( + self._verts_features_list, pad_value=0.0 + ) + return self._verts_features_padded + + def verts_features_list(self) -> List[torch.Tensor]: + if self._verts_features_list is None: + if self.isempty(): + self._verts_features_list = [ + torch.empty((0, 3), dtype=torch.float32, device=self.device) + ] * self._N + else: + self._verts_features_list = padded_to_list( + self._verts_features_padded, split_size=self._num_verts_per_mesh + ) + return self._verts_features_list + + def verts_features_packed(self) -> torch.Tensor: + if self.isempty(): + return torch.zeros((self._N, 3, 0), dtype=torch.float32, device=self.device) + verts_features_list = self.verts_features_list() + return list_to_packed(verts_features_list)[0] + + def extend(self, N: int) -> "TexturesVertex": + new_props = self._extend(N, ["verts_features_padded", "_num_verts_per_mesh"]) + new_tex = self.__class__(verts_features=new_props["verts_features_padded"]) + new_tex._num_verts_per_mesh = new_props["_num_verts_per_mesh"] + return new_tex + + # pyre-fixme[14]: `sample_textures` overrides method defined in `TexturesBase` + # inconsistently. + def sample_textures(self, fragments, faces_packed=None) -> torch.Tensor: + """ + Determine the color for each rasterized face. Interpolate the colors for + vertices which form the face using the barycentric coordinates. + Args: + fragments: + The outputs of rasterization. From this we use + + - pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. + - barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying + the barycentric coordinates of each pixel + relative to the faces (in the packed + representation) which overlap the pixel. + + Returns: + texels: A texture per pixel of shape (N, H, W, K, C). + There will be one C dimensional value for each element in + fragments.pix_to_face. + """ + verts_features_packed = self.verts_features_packed() + faces_verts_features = verts_features_packed[faces_packed] + + texels = interpolate_face_attributes( + fragments.pix_to_face, fragments.bary_coords, faces_verts_features + ) + return texels + + def submeshes( + self, + vertex_ids_list: List[List[torch.LongTensor]], + faces_ids_list: List[List[torch.LongTensor]], + ) -> "TexturesVertex": + """ + Extract a sub-texture for use in a submesh. + + If the meshes batch corresponding to this TexturesVertex contains + `n = len(vertex_ids_list)` meshes, then self.verts_features_list() + will be of length n. After submeshing, we obtain a batch of + `k = sum(len(v) for v in vertex_ids_list` submeshes (see Meshes.submeshes). This + function creates a corresponding TexturesVertex object with `verts_features_list` + of length `k`. + + Args: + vertex_ids_list: A list of length equal to self.verts_features_list. Each + element is a LongTensor listing the vertices that the submesh keeps in + each respective mesh. + + face_ids_list: Not used when submeshing TexturesVertex. + + Returns: + A TexturesVertex in which verts_features_list has length + sum(len(vertices) for vertices in vertex_ids_list). Each element contains + vertex features corresponding to the subset of vertices in that submesh. + """ + if len(vertex_ids_list) != len(self.verts_features_list()): + raise IndexError( + "verts_features_list must be of " "the same length as vertex_ids_list." + ) + + sub_features = [] + for vertex_ids, features in zip(vertex_ids_list, self.verts_features_list()): + for vertex_ids_submesh in vertex_ids: + sub_features.append(features[vertex_ids_submesh]) + + return self.__class__(sub_features) + + def faces_verts_textures_packed(self, faces_packed=None) -> torch.Tensor: + """ + Samples texture from each vertex and for each face in the mesh. + For N meshes with {Fi} number of faces, it returns a + tensor of shape sum(Fi)x3xC (C = 3 for RGB). + You can use the utils function in structures.utils to convert the + packed representation to a list or padded. + """ + verts_features_packed = self.verts_features_packed() + faces_verts_features = verts_features_packed[faces_packed] + return faces_verts_features + + def join_batch(self, textures: List["TexturesVertex"]) -> "TexturesVertex": + """ + Join the list of textures given by `textures` to + self to create a batch of textures. Return a new + TexturesVertex object with the combined textures. + + Args: + textures: List of TexturesVertex objects + + Returns: + new_tex: TexturesVertex object with the combined + textures from self and the list `textures`. + """ + tex_types_same = all(isinstance(tex, TexturesVertex) for tex in textures) + if not tex_types_same: + raise ValueError("All textures must be of type TexturesVertex.") + + verts_features_list = [] + verts_features_list += self.verts_features_list() + num_verts_per_mesh = self._num_verts_per_mesh.copy() + for tex in textures: + verts_features_list += tex.verts_features_list() + num_verts_per_mesh += tex._num_verts_per_mesh + + new_tex = self.__class__(verts_features=verts_features_list) + new_tex._num_verts_per_mesh = num_verts_per_mesh + return new_tex + + def join_scene(self) -> "TexturesVertex": + """ + Return a new TexturesVertex amalgamating the batch. + """ + return self.__class__(verts_features=[torch.cat(self.verts_features_list())]) + + def check_shapes( + self, batch_size: int, max_num_verts: int, max_num_faces: int + ) -> bool: + """ + Check if the dimensions of the verts features match that of the mesh verts + """ + # (N, V) should be the same + return self.verts_features_padded().shape[:-1] == (batch_size, max_num_verts) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/utils.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..73b91c495b0b94a8d70184e526d67ca64ad6db64 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/mesh/utils.py @@ -0,0 +1,320 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import List, NamedTuple, Tuple + +import torch +from pytorch3d.ops import interpolate_face_attributes + + +def _clip_barycentric_coordinates(bary) -> torch.Tensor: + """ + Args: + bary: barycentric coordinates of shape (...., 3) where `...` represents + an arbitrary number of dimensions + + Returns: + bary: Barycentric coordinates clipped (i.e any values < 0 are set to 0) + and renormalized. We only clip the negative values. Values > 1 will fall + into the [0, 1] range after renormalization. + The output is the same shape as the input. + """ + if bary.shape[-1] != 3: + msg = "Expected barycentric coords to have last dim = 3; got %r" + raise ValueError(msg % (bary.shape,)) + ndims = bary.ndim - 1 + mask = bary.eq(-1).all(dim=-1, keepdim=True).expand(*((-1,) * ndims + (3,))) + clipped = bary.clamp(min=0.0) + clipped[mask] = 0.0 + clipped_sum = torch.clamp(clipped.sum(dim=-1, keepdim=True), min=1e-5) + clipped = clipped / clipped_sum + clipped[mask] = -1.0 + return clipped + + +def _interpolate_zbuf( + pix_to_face: torch.Tensor, barycentric_coords: torch.Tensor, meshes +) -> torch.Tensor: + """ + A helper function to calculate the z buffer for each pixel in the + rasterized output. + + Args: + pix_to_face: LongTensor of shape (N, H, W, K) specifying the indices + of the faces (in the packed representation) which + overlap each pixel in the image. + barycentric_coords: FloatTensor of shape (N, H, W, K, 3) specifying + the barycentric coordinates of each pixel + relative to the faces (in the packed + representation) which overlap the pixel. + meshes: Meshes object representing a batch of meshes. + + Returns: + zbuffer: (N, H, W, K) FloatTensor + """ + verts = meshes.verts_packed() + faces = meshes.faces_packed() + faces_verts_z = verts[faces][..., 2][..., None] # (F, 3, 1) + zbuf = interpolate_face_attributes(pix_to_face, barycentric_coords, faces_verts_z)[ + ..., 0 + ] # (1, H, W, K) + zbuf[pix_to_face == -1] = -1 + return zbuf + + +# ----------- Rectangle Packing -------------------- # + + +class Rectangle(NamedTuple): + xsize: int + ysize: int + identifier: int + + +class PackedRectangle(NamedTuple): + x: int + y: int + flipped: bool + is_first: bool + + +class PackedRectangles(NamedTuple): + total_size: Tuple[int, int] + locations: List[PackedRectangle] + + +# Note the order of members matters here because it determines the queue order. +# We want to place longer rectangles first. +class _UnplacedRectangle(NamedTuple): + size: Tuple[int, int] + ind: int + flipped: bool + + +def _try_place_rectangle( + rect: _UnplacedRectangle, + placed_so_far: List[PackedRectangle], + occupied: List[Tuple[int, int]], +) -> bool: + """ + Try to place rect within the current bounding box. + Part of the implementation of pack_rectangles. + + Note that the arguments `placed_so_far` and `occupied` are modified. + + Args: + rect: rectangle to place + placed_so_far: the locations decided upon so far - a list of + (x, y, whether flipped). The nth element is the + location of the nth rectangle if it has been decided. + (modified in place) + occupied: the nodes of the graph of extents of rightmost placed + rectangles - (modified in place) + + Returns: + True on success. + + Example: + (We always have placed the first rectangle horizontally and other + rectangles above it.) + Let's say the placed boxes 1-4 are laid out like this. + The coordinates of the points marked X are stored in occupied. + It is to the right of the X's that we seek to place rect. + + +-----------------------X + |2 | + | +---X + | |4 | + | | | + | +---+X + | |3 | + | | | + +-----------------------+----+------X + y |1 | + ^ | --->x | + | +-----------------------------------+ + + We want to place this rectangle. + + +-+ + |5| + | | + | | = rect + | | + | | + | | + +-+ + + The call will succeed, returning True, leaving us with + + +-----------------------X + |2 | +-X + | +---+|5| + | |4 || | + | | || | + | +---++ | + | |3 | | + | | | | + +-----------------------+----+-+----X + |1 | + | | + +-----------------------------------+ . + + """ + total_width = occupied[0][0] + needed_height = rect.size[1] + current_start_idx = None + current_max_width = 0 + previous_height = 0 + currently_packed = 0 + for idx, interval in enumerate(occupied): + if interval[0] <= total_width - rect.size[0]: + currently_packed += interval[1] - previous_height + current_max_width = max(interval[0], current_max_width) + if current_start_idx is None: + current_start_idx = idx + if currently_packed >= needed_height: + current_max_width = max(interval[0], current_max_width) + placed_so_far[rect.ind] = PackedRectangle( + current_max_width, + occupied[current_start_idx - 1][1], + rect.flipped, + True, + ) + new_occupied = ( + current_max_width + rect.size[0], + occupied[current_start_idx - 1][1] + needed_height, + ) + if currently_packed == needed_height: + occupied[idx] = new_occupied + del occupied[current_start_idx:idx] + elif idx > current_start_idx: + occupied[idx - 1] = new_occupied + del occupied[current_start_idx : (idx - 1)] + else: + occupied.insert(idx, new_occupied) + return True + else: + current_start_idx = None + current_max_width = 0 + currently_packed = 0 + previous_height = interval[1] + return False + + +def pack_rectangles(sizes: List[Tuple[int, int]]) -> PackedRectangles: + """ + Naive rectangle packing in to a large rectangle. Flipping (i.e. rotating + a rectangle by 90 degrees) is allowed. + + This is used to join several uv maps into a single scene, see + TexturesUV.join_scene. + + Args: + sizes: List of sizes of rectangles to pack + + Returns: + total_size: size of total large rectangle + rectangles: location for each of the input rectangles. + This includes whether they are flipped. + The is_first field is always True. + """ + + if len(sizes) < 2: + raise ValueError("Cannot pack less than two boxes") + + queue = [] + for i, size in enumerate(sizes): + if size[0] < size[1]: + queue.append(_UnplacedRectangle((size[1], size[0]), i, True)) + else: + queue.append(_UnplacedRectangle((size[0], size[1]), i, False)) + queue.sort() + placed_so_far = [PackedRectangle(-1, -1, False, False)] * len(sizes) + + biggest = queue.pop() + total_width, current_height = biggest.size + placed_so_far[biggest.ind] = PackedRectangle(0, 0, biggest.flipped, True) + + second = queue.pop() + placed_so_far[second.ind] = PackedRectangle(0, current_height, second.flipped, True) + current_height += second.size[1] + occupied = [biggest.size, (second.size[0], current_height)] + + for rect in reversed(queue): + if _try_place_rectangle(rect, placed_so_far, occupied): + continue + + rotated = _UnplacedRectangle( + (rect.size[1], rect.size[0]), rect.ind, not rect.flipped + ) + if _try_place_rectangle(rotated, placed_so_far, occupied): + continue + + # rect wasn't placed in the current bounding box, + # so we add extra space to fit it in. + placed_so_far[rect.ind] = PackedRectangle(0, current_height, rect.flipped, True) + current_height += rect.size[1] + occupied.append((rect.size[0], current_height)) + + return PackedRectangles((total_width, current_height), placed_so_far) + + +def pack_unique_rectangles(rectangles: List[Rectangle]) -> PackedRectangles: + """ + Naive rectangle packing in to a large rectangle. Flipping (i.e. rotating + a rectangle by 90 degrees) is allowed. Inputs are deduplicated by their + identifier. + + This is a wrapper around pack_rectangles, where inputs come with an + identifier. In particular, it calls pack_rectangles for the deduplicated inputs, + then returns the values for all the inputs. The output for all rectangles with + the same identifier will be the same, except that only the first one will have + the is_first field True. + + This is used to join several uv maps into a single scene, see + TexturesUV.join_scene. + + Args: + rectangles: List of sizes of rectangles to pack + + Returns: + total_size: size of total large rectangle + rectangles: location for each of the input rectangles. + This includes whether they are flipped. + The is_first field is true for the first rectangle + with each identifier. + """ + + if len(rectangles) < 2: + raise ValueError("Cannot pack less than two boxes") + + input_map = {} + input_indices: List[Tuple[int, bool]] = [] + unique_input_sizes: List[Tuple[int, int]] = [] + for rectangle in rectangles: + if rectangle.identifier not in input_map: + unique_index = len(unique_input_sizes) + unique_input_sizes.append((rectangle.xsize, rectangle.ysize)) + input_map[rectangle.identifier] = unique_index + input_indices.append((unique_index, True)) + else: + unique_index = input_map[rectangle.identifier] + input_indices.append((unique_index, False)) + + if len(unique_input_sizes) == 1: + first = [PackedRectangle(0, 0, False, True)] + rest = (len(rectangles) - 1) * [PackedRectangle(0, 0, False, False)] + return PackedRectangles(unique_input_sizes[0], first + rest) + + total_size, unique_locations = pack_rectangles(unique_input_sizes) + full_locations = [] + for input_index, first in input_indices: + full_locations.append(unique_locations[input_index]._replace(is_first=first)) + + return PackedRectangles(total_size, full_locations) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/points/compositor.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/points/compositor.py new file mode 100644 index 0000000000000000000000000000000000000000..14187eee279c330ce5291cfd6ddcc7db8a598841 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/renderer/points/compositor.py @@ -0,0 +1,116 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import List, Optional, Tuple, Union + +import torch +import torch.nn as nn + +from ..compositing import alpha_composite, norm_weighted_sum + + +# A compositor should take as input 3D points and some corresponding information. +# Given this information, the compositor can: +# - blend colors across the top K vertices at a pixel + + +class AlphaCompositor(nn.Module): + """ + Accumulate points using alpha compositing. + """ + + def __init__( + self, background_color: Optional[Union[Tuple, List, torch.Tensor]] = None + ) -> None: + super().__init__() + self.background_color = background_color + + def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor: + background_color = kwargs.get("background_color", self.background_color) + images = alpha_composite(fragments, alphas, ptclds) + + # images are of shape (N, C, H, W) + # check for background color & feature size C (C=4 indicates rgba) + if background_color is not None: + return _add_background_color_to_images(fragments, images, background_color) + return images + + +class NormWeightedCompositor(nn.Module): + """ + Accumulate points using a normalized weighted sum. + """ + + def __init__( + self, background_color: Optional[Union[Tuple, List, torch.Tensor]] = None + ) -> None: + super().__init__() + self.background_color = background_color + + def forward(self, fragments, alphas, ptclds, **kwargs) -> torch.Tensor: + background_color = kwargs.get("background_color", self.background_color) + images = norm_weighted_sum(fragments, alphas, ptclds) + + # images are of shape (N, C, H, W) + # check for background color & feature size C (C=4 indicates rgba) + if background_color is not None: + return _add_background_color_to_images(fragments, images, background_color) + return images + + +def _add_background_color_to_images(pix_idxs, images, background_color): + """ + Mask pixels in images without corresponding points with a given background_color. + + Args: + pix_idxs: int32 Tensor of shape (N, points_per_pixel, image_size, image_size) + giving the indices of the nearest points at each pixel, sorted in z-order. + images: Tensor of shape (N, 4, image_size, image_size) giving the + accumulated features at each point, where 4 refers to a rgba feature. + background_color: Tensor, list, or tuple with 3 or 4 values indicating the rgb/rgba + value for the new background. Values should be in the interval [0,1]. + Returns: + images: Tensor of shape (N, 4, image_size, image_size), where pixels with + no nearest points have features set to the background color, and other + pixels with accumulated features have unchanged values. + """ + # Initialize background mask + background_mask = pix_idxs[:, 0] < 0 # (N, H, W) + + # Convert background_color to an appropriate tensor and check shape + if not torch.is_tensor(background_color): + background_color = images.new_tensor(background_color) + + if background_color.ndim == 0: + background_color = background_color.expand(images.shape[1]) + + if background_color.ndim > 1: + raise ValueError("Wrong shape of background_color") + + background_color = background_color.to(images) + + # add alpha channel if needed + if background_color.shape[0] + 1 == images.shape[1]: + alpha = images.new_ones(1) + background_color = torch.cat([background_color, alpha]) + + if images.shape[1] != background_color.shape[0]: + raise ValueError( + "Background color has %s channels not %s" + % (background_color.shape[0], images.shape[1]) + ) + + num_background_pixels = background_mask.sum() + + # permute so that features are the last dimension for masked_scatter to work + masked_images = images.permute(0, 2, 3, 1).masked_scatter( + background_mask[..., None], + background_color[None, :].expand(num_background_pixels, -1), + ) + + return masked_images.permute(0, 3, 1, 2) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..504dace63d92a8e4a964fc68e5e7f3e42c027b13 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from .camera_conversions import ( + cameras_from_opencv_projection, + opencv_from_cameras_projection, + pulsar_from_cameras_projection, + pulsar_from_opencv_projection, +) + +from .checkerboard import checkerboard + +from .ico_sphere import ico_sphere + +from .torus import torus + + +__all__ = [k for k in globals().keys() if not k.startswith("_")] diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/camera_conversions.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/camera_conversions.py new file mode 100644 index 0000000000000000000000000000000000000000..b9fa43be089c64f8924cd8335b5a1c2d4b793b4b --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/camera_conversions.py @@ -0,0 +1,159 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Tuple + +import torch + +from ..renderer import PerspectiveCameras +from ..renderer.camera_conversions import ( + _cameras_from_opencv_projection, + _opencv_from_cameras_projection, + _pulsar_from_cameras_projection, + _pulsar_from_opencv_projection, +) + + +def cameras_from_opencv_projection( + R: torch.Tensor, + tvec: torch.Tensor, + camera_matrix: torch.Tensor, + image_size: torch.Tensor, +) -> PerspectiveCameras: + """ + Converts a batch of OpenCV-conventioned cameras parametrized with the + rotation matrices `R`, translation vectors `tvec`, and the camera + calibration matrices `camera_matrix` to `PerspectiveCameras` in PyTorch3D + convention. + + More specifically, the conversion is carried out such that a projection + of a 3D shape to the OpenCV-conventioned screen of size `image_size` results + in the same image as a projection with the corresponding PyTorch3D camera + to the NDC screen convention of PyTorch3D. + + More specifically, the OpenCV convention projects points to the OpenCV screen + space as follows:: + + x_screen_opencv = camera_matrix @ (R @ x_world + tvec) + + followed by the homogenization of `x_screen_opencv`. + + Note: + The parameters `R, tvec, camera_matrix` correspond to the inputs of + `cv2.projectPoints(x_world, rvec, tvec, camera_matrix, [])`, + where `rvec` is an axis-angle vector that can be obtained from + the rotation matrix `R` expected here by calling the `so3_log_map` function. + Correspondingly, `R` can be obtained from `rvec` by calling `so3_exp_map`. + + Args: + R: A batch of rotation matrices of shape `(N, 3, 3)`. + tvec: A batch of translation vectors of shape `(N, 3)`. + camera_matrix: A batch of camera calibration matrices of shape `(N, 3, 3)`. + image_size: A tensor of shape `(N, 2)` containing the sizes of the images + (height, width) attached to each camera. + + Returns: + cameras_pytorch3d: A batch of `N` cameras in the PyTorch3D convention. + """ + return _cameras_from_opencv_projection(R, tvec, camera_matrix, image_size) + + +def opencv_from_cameras_projection( + cameras: PerspectiveCameras, + image_size: torch.Tensor, +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Converts a batch of `PerspectiveCameras` into OpenCV-convention + rotation matrices `R`, translation vectors `tvec`, and the camera + calibration matrices `camera_matrix`. This operation is exactly the inverse + of `cameras_from_opencv_projection`. + + Note: + The outputs `R, tvec, camera_matrix` correspond to the inputs of + `cv2.projectPoints(x_world, rvec, tvec, camera_matrix, [])`, + where `rvec` is an axis-angle vector that can be obtained from + the rotation matrix `R` output here by calling the `so3_log_map` function. + Correspondingly, `R` can be obtained from `rvec` by calling `so3_exp_map`. + + Args: + cameras: A batch of `N` cameras in the PyTorch3D convention. + image_size: A tensor of shape `(N, 2)` containing the sizes of the images + (height, width) attached to each camera. + return_as_rotmat (bool): If set to True, return the full 3x3 rotation + matrices. Otherwise, return an axis-angle vector (default). + + Returns: + R: A batch of rotation matrices of shape `(N, 3, 3)`. + tvec: A batch of translation vectors of shape `(N, 3)`. + camera_matrix: A batch of camera calibration matrices of shape `(N, 3, 3)`. + """ + return _opencv_from_cameras_projection(cameras, image_size) + + +def pulsar_from_opencv_projection( + R: torch.Tensor, + tvec: torch.Tensor, + camera_matrix: torch.Tensor, + image_size: torch.Tensor, + znear: float = 0.1, +) -> torch.Tensor: + """ + Convert OpenCV style camera parameters to Pulsar style camera parameters. + + Note: + * Pulsar does NOT support different focal lengths for x and y. + For conversion, we use the average of fx and fy. + * The Pulsar renderer MUST use a left-handed coordinate system for this + mapping to work. + * The resulting image will be vertically flipped - which has to be + addressed AFTER rendering by the user. + * The parameters `R, tvec, camera_matrix` correspond to the outputs + of `cv2.decomposeProjectionMatrix`. + + Args: + R: A batch of rotation matrices of shape `(N, 3, 3)`. + tvec: A batch of translation vectors of shape `(N, 3)`. + camera_matrix: A batch of camera calibration matrices of shape `(N, 3, 3)`. + image_size: A tensor of shape `(N, 2)` containing the sizes of the images + (height, width) attached to each camera. + znear (float): The near clipping value to use for Pulsar. + + Returns: + cameras_pulsar: A batch of `N` Pulsar camera vectors in the Pulsar + convention `(N, 13)` (3 translation, 6 rotation, focal_length, sensor_width, + c_x, c_y). + """ + return _pulsar_from_opencv_projection(R, tvec, camera_matrix, image_size, znear) + + +def pulsar_from_cameras_projection( + cameras: PerspectiveCameras, + image_size: torch.Tensor, +) -> torch.Tensor: + """ + Convert PyTorch3D `PerspectiveCameras` to Pulsar style camera parameters. + + Note: + * Pulsar does NOT support different focal lengths for x and y. + For conversion, we use the average of fx and fy. + * The Pulsar renderer MUST use a left-handed coordinate system for this + mapping to work. + * The resulting image will be vertically flipped - which has to be + addressed AFTER rendering by the user. + + Args: + cameras: A batch of `N` cameras in the PyTorch3D convention. + image_size: A tensor of shape `(N, 2)` containing the sizes of the images + (height, width) attached to each camera. + + Returns: + cameras_pulsar: A batch of `N` Pulsar camera vectors in the Pulsar + convention `(N, 13)` (3 translation, 6 rotation, focal_length, sensor_width, + c_x, c_y). + """ + return _pulsar_from_cameras_projection(cameras, image_size) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/checkerboard.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/checkerboard.py new file mode 100644 index 0000000000000000000000000000000000000000..a0d53fd9b3c56bc0f9bccb0424c23b76e16410e0 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/checkerboard.py @@ -0,0 +1,91 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +from typing import Optional, Tuple + +import torch +from pytorch3d.common.compat import meshgrid_ij +from pytorch3d.renderer.mesh.textures import TexturesAtlas +from pytorch3d.structures.meshes import Meshes + + +def checkerboard( + radius: int = 4, + color1: Tuple[float, ...] = (0.0, 0.0, 0.0), + color2: Tuple[float, ...] = (1.0, 1.0, 1.0), + device: Optional[torch.types._device] = None, +) -> Meshes: + """ + Returns a mesh of squares in the xy-plane where each unit is one of the two given + colors and adjacent squares have opposite colors. + Args: + radius: how many squares in each direction from the origin + color1: background color + color2: foreground color (must have the same number of channels as color1) + Returns: + new Meshes object containing one mesh. + """ + + if device is None: + device = torch.device("cpu") + if radius < 1: + raise ValueError("radius must be > 0") + + num_verts_per_row = 2 * radius + 1 + + # construct 2D grid of 3D vertices + x = torch.arange(-radius, radius + 1, device=device) + grid_y, grid_x = meshgrid_ij(x, x) + verts = torch.stack( + [grid_x, grid_y, torch.zeros((2 * radius + 1, 2 * radius + 1))], dim=-1 + ) + verts = verts.view(1, -1, 3) + + top_triangle_idx = torch.arange(0, num_verts_per_row * (num_verts_per_row - 1)) + top_triangle_idx = torch.stack( + [ + top_triangle_idx, + top_triangle_idx + 1, + top_triangle_idx + num_verts_per_row + 1, + ], + dim=-1, + ) + + bottom_triangle_idx = top_triangle_idx[:, [0, 2, 1]] + torch.tensor( + [0, 0, num_verts_per_row - 1] + ) + + faces = torch.zeros( + (1, len(top_triangle_idx) + len(bottom_triangle_idx), 3), + dtype=torch.long, + device=device, + ) + faces[0, ::2] = top_triangle_idx + faces[0, 1::2] = bottom_triangle_idx + + # construct range of indices that excludes the boundary to avoid wrong triangles + indexing_range = torch.arange(0, 2 * num_verts_per_row * num_verts_per_row).view( + num_verts_per_row, num_verts_per_row, 2 + ) + indexing_range = indexing_range[:-1, :-1] # removes boundaries from list of indices + indexing_range = indexing_range.reshape( + 2 * (num_verts_per_row - 1) * (num_verts_per_row - 1) + ) + + faces = faces[:, indexing_range] + + # adding color + colors = torch.tensor(color1).repeat(2 * num_verts_per_row * num_verts_per_row, 1) + colors[2::4] = torch.tensor(color2) + colors[3::4] = torch.tensor(color2) + colors = colors[None, indexing_range, None, None] + + texture_atlas = TexturesAtlas(colors) + + return Meshes(verts=verts, faces=faces, textures=texture_atlas) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/ico_sphere.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/ico_sphere.py new file mode 100644 index 0000000000000000000000000000000000000000..bc7ca24fd4aac046ebc5aa949b7268ce418a1b28 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/ico_sphere.py @@ -0,0 +1,86 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + + +import torch +from pytorch3d.ops.subdivide_meshes import SubdivideMeshes +from pytorch3d.structures.meshes import Meshes + + +# Vertex coordinates for a level 0 ico-sphere. +_ico_verts0 = [ + [-0.5257, 0.8507, 0.0000], + [0.5257, 0.8507, 0.0000], + [-0.5257, -0.8507, 0.0000], + [0.5257, -0.8507, 0.0000], + [0.0000, -0.5257, 0.8507], + [0.0000, 0.5257, 0.8507], + [0.0000, -0.5257, -0.8507], + [0.0000, 0.5257, -0.8507], + [0.8507, 0.0000, -0.5257], + [0.8507, 0.0000, 0.5257], + [-0.8507, 0.0000, -0.5257], + [-0.8507, 0.0000, 0.5257], +] + + +# Faces for level 0 ico-sphere +_ico_faces0 = [ + [0, 11, 5], + [0, 5, 1], + [0, 1, 7], + [0, 7, 10], + [0, 10, 11], + [1, 5, 9], + [5, 11, 4], + [11, 10, 2], + [10, 7, 6], + [7, 1, 8], + [3, 9, 4], + [3, 4, 2], + [3, 2, 6], + [3, 6, 8], + [3, 8, 9], + [4, 9, 5], + [2, 4, 11], + [6, 2, 10], + [8, 6, 7], + [9, 8, 1], +] + + +def ico_sphere(level: int = 0, device=None): + """ + Create verts and faces for a unit ico-sphere, with all faces oriented + consistently. + + Args: + level: integer specifying the number of iterations for subdivision + of the mesh faces. Each additional level will result in four new + faces per face. + device: A torch.device object on which the outputs will be allocated. + + Returns: + Meshes object with verts and faces. + """ + if device is None: + device = torch.device("cpu") + if level < 0: + raise ValueError("level must be >= 0.") + if level == 0: + verts = torch.tensor(_ico_verts0, dtype=torch.float32, device=device) + faces = torch.tensor(_ico_faces0, dtype=torch.int64, device=device) + + else: + mesh = ico_sphere(level - 1, device) + subdivide = SubdivideMeshes() + mesh = subdivide(mesh) + verts = mesh.verts_list()[0] + verts /= verts.norm(p=2, dim=1, keepdim=True) + faces = mesh.faces_list()[0] + return Meshes(verts=[verts], faces=[faces]) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/torus.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/torus.py new file mode 100644 index 0000000000000000000000000000000000000000..853cd895c998e6b762ce2ae25414ede27e54c7e1 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/utils/torus.py @@ -0,0 +1,73 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from itertools import tee +from math import cos, pi, sin +from typing import Iterator, Optional, Tuple + +import torch +from pytorch3d.structures.meshes import Meshes + + +# Make an iterator over the adjacent pairs: (-1, 0), (0, 1), ..., (N - 2, N - 1) +def _make_pair_range(N: int) -> Iterator[Tuple[int, int]]: + i, j = tee(range(-1, N)) + next(j, None) + return zip(i, j) + + +def torus( + r: float, R: float, sides: int, rings: int, device: Optional[torch.device] = None +) -> Meshes: + """ + Create vertices and faces for a torus. + + Args: + r: Inner radius of the torus. + R: Outer radius of the torus. + sides: Number of inner divisions. + rings: Number of outer divisions. + device: Device on which the outputs will be allocated. + + Returns: + Meshes object with the generated vertices and faces. + """ + if not (sides > 0): + raise ValueError("sides must be > 0.") + if not (rings > 0): + raise ValueError("rings must be > 0.") + device = device if device else torch.device("cpu") + + verts = [] + for i in range(rings): + # phi ranges from 0 to 2 pi (rings - 1) / rings + phi = 2 * pi * i / rings + for j in range(sides): + # theta ranges from 0 to 2 pi (sides - 1) / sides + theta = 2 * pi * j / sides + x = (R + r * cos(theta)) * cos(phi) + y = (R + r * cos(theta)) * sin(phi) + z = r * sin(theta) + # This vertex has index i * sides + j + verts.append([x, y, z]) + + faces = [] + for i0, i1 in _make_pair_range(rings): + index0 = (i0 % rings) * sides + index1 = (i1 % rings) * sides + for j0, j1 in _make_pair_range(sides): + index00 = index0 + (j0 % sides) + index01 = index0 + (j1 % sides) + index10 = index1 + (j0 % sides) + index11 = index1 + (j1 % sides) + faces.append([index00, index10, index11]) + faces.append([index11, index01, index00]) + + verts_list = [torch.tensor(verts, dtype=torch.float32, device=device)] + faces_list = [torch.tensor(faces, dtype=torch.int64, device=device)] + return Meshes(verts_list, faces_list) diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/vis/__init__.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/vis/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb0266b9e9ac1877a9084f31aa3207d2f0685753 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/vis/__init__.py @@ -0,0 +1,23 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings + + +try: + from .plotly_vis import get_camera_wireframe, plot_batch_individually, plot_scene +except ModuleNotFoundError as err: + if "plotly" in str(err): + warnings.warn( + "Cannot import plotly-based visualization code." + " Please install plotly to enable (pip install plotly)." + ) + else: + raise + +from .texture_vis import texturesuv_image_matplotlib, texturesuv_image_PIL diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/vis/plotly_vis.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/vis/plotly_vis.py new file mode 100644 index 0000000000000000000000000000000000000000..595a7b5498747adc4a05e6deb5be642e036d90be --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/vis/plotly_vis.py @@ -0,0 +1,1055 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +import warnings +from typing import Dict, List, NamedTuple, Optional, Tuple, Union + +import plotly.graph_objects as go +import torch +from plotly.subplots import make_subplots +from pytorch3d.renderer import ( + HeterogeneousRayBundle, + ray_bundle_to_ray_points, + RayBundle, + TexturesAtlas, + TexturesVertex, +) +from pytorch3d.renderer.camera_utils import camera_to_eye_at_up +from pytorch3d.renderer.cameras import CamerasBase +from pytorch3d.structures import join_meshes_as_scene, Meshes, Pointclouds + + +Struct = Union[CamerasBase, Meshes, Pointclouds, RayBundle, HeterogeneousRayBundle] + + +def _get_len(struct: Union[Struct, List[Struct]]) -> int: # pragma: no cover + """ + Returns the length (usually corresponds to the batch size) of the input structure. + """ + # pyre-ignore[6] + if not _is_ray_bundle(struct): + # pyre-ignore[6] + return len(struct) + if _is_heterogeneous_ray_bundle(struct): + # pyre-ignore[16] + return len(struct.camera_counts) + # pyre-ignore[16] + return len(struct.directions) + + +def _is_ray_bundle(struct: Struct) -> bool: + """ + Args: + struct: Struct object to test + Returns: + True if something is a RayBundle, HeterogeneousRayBundle or + ImplicitronRayBundle, else False + """ + return hasattr(struct, "directions") + + +def _is_heterogeneous_ray_bundle(struct: Union[List[Struct], Struct]) -> bool: + """ + Args: + struct :object to test + Returns: + True if something is a HeterogeneousRayBundle or ImplicitronRayBundle + and cant be reduced to RayBundle else False + """ + # pyre-ignore[16] + return hasattr(struct, "camera_counts") and struct.camera_counts is not None + + +def get_camera_wireframe(scale: float = 0.3): # pragma: no cover + """ + Returns a wireframe of a 3D line-plot of a camera symbol. + """ + a = 0.5 * torch.tensor([-2, 1.5, 4]) + up1 = 0.5 * torch.tensor([0, 1.5, 4]) + up2 = 0.5 * torch.tensor([0, 2, 4]) + b = 0.5 * torch.tensor([2, 1.5, 4]) + c = 0.5 * torch.tensor([-2, -1.5, 4]) + d = 0.5 * torch.tensor([2, -1.5, 4]) + C = torch.zeros(3) + F = torch.tensor([0, 0, 3]) + camera_points = [a, up1, up2, up1, b, d, c, a, C, b, d, C, c, C, F] + lines = torch.stack([x.float() for x in camera_points]) * scale + return lines + + +class AxisArgs(NamedTuple): # pragma: no cover + showgrid: bool = False + zeroline: bool = False + showline: bool = False + ticks: str = "" + showticklabels: bool = False + backgroundcolor: str = "#fff" + showaxeslabels: bool = False + + +class Lighting(NamedTuple): # pragma: no cover + ambient: float = 0.8 + diffuse: float = 1.0 + fresnel: float = 0.0 + specular: float = 0.0 + roughness: float = 0.5 + facenormalsepsilon: float = 1e-6 + vertexnormalsepsilon: float = 1e-12 + + +@torch.no_grad() +def plot_scene( + plots: Dict[str, Dict[str, Struct]], + *, + viewpoint_cameras: Optional[CamerasBase] = None, + ncols: int = 1, + camera_scale: float = 0.3, + pointcloud_max_points: int = 20000, + pointcloud_marker_size: int = 1, + raybundle_max_rays: int = 20000, + raybundle_max_points_per_ray: int = 1000, + raybundle_ray_point_marker_size: int = 1, + raybundle_ray_line_width: int = 1, + **kwargs, +): # pragma: no cover + """ + Main function to visualize Cameras, Meshes, Pointclouds, and RayBundle. + Plots input Cameras, Meshes, Pointclouds, and RayBundle data into named subplots, + with named traces based on the dictionary keys. Cameras are + rendered at the camera center location using a wireframe. + + Args: + plots: A dict containing subplot and trace names, + as well as the Meshes, Cameras and Pointclouds objects to be rendered. + See below for examples of the format. + viewpoint_cameras: an instance of a Cameras object providing a location + to view the plotly plot from. If the batch size is equal + to the number of subplots, it is a one to one mapping. + If the batch size is 1, then that viewpoint will be used + for all the subplots will be viewed from that point. + Otherwise, the viewpoint_cameras will not be used. + ncols: the number of subplots per row + camera_scale: determines the size of the wireframe used to render cameras. + pointcloud_max_points: the maximum number of points to plot from + a pointcloud. If more are present, a random sample of size + pointcloud_max_points is used. + pointcloud_marker_size: the size of the points rendered by plotly + when plotting a pointcloud. + raybundle_max_rays: maximum number of rays of a RayBundle to visualize. Randomly + subsamples without replacement in case the number of rays is bigger than max_rays. + raybundle_max_points_per_ray: the maximum number of points per ray in RayBundle + to visualize. If more are present, a random sample of size + max_points_per_ray is used. + raybundle_ray_point_marker_size: the size of the ray points of a plotted RayBundle + raybundle_ray_line_width: the width of the plotted rays of a RayBundle + **kwargs: Accepts lighting (a Lighting object) and any of the args xaxis, + yaxis and zaxis which Plotly's scene accepts. Accepts axis_args, + which is an AxisArgs object that is applied to all 3 axes. + Example settings for axis_args and lighting are given at the + top of this file. + + Example: + + ..code-block::python + + mesh = ... + point_cloud = ... + fig = plot_scene({ + "subplot_title": { + "mesh_trace_title": mesh, + "pointcloud_trace_title": point_cloud + } + }) + fig.show() + + The above example will render one subplot which has both a mesh and pointcloud. + + If the Meshes, Pointclouds, or Cameras objects are batched, then every object in that batch + will be plotted in a single trace. + + ..code-block::python + mesh = ... # batch size 2 + point_cloud = ... # batch size 2 + fig = plot_scene({ + "subplot_title": { + "mesh_trace_title": mesh, + "pointcloud_trace_title": point_cloud + } + }) + fig.show() + + The above example renders one subplot with 2 traces, each of which renders + both objects from their respective batched data. + + Multiple subplots follow the same pattern: + ..code-block::python + mesh = ... # batch size 2 + point_cloud = ... # batch size 2 + fig = plot_scene({ + "subplot1_title": { + "mesh_trace_title": mesh[0], + "pointcloud_trace_title": point_cloud[0] + }, + "subplot2_title": { + "mesh_trace_title": mesh[1], + "pointcloud_trace_title": point_cloud[1] + } + }, + ncols=2) # specify the number of subplots per row + fig.show() + + The above example will render two subplots, each containing a mesh + and a pointcloud. The ncols argument will render two subplots in one row + instead of having them vertically stacked because the default is one subplot + per row. + + To view plotly plots from a PyTorch3D camera's point of view, we can use + viewpoint_cameras: + ..code-block::python + mesh = ... # batch size 2 + R, T = look_at_view_transform(2.7, 0, [0, 180]) # 2 camera angles, front and back + # Any instance of CamerasBase works, here we use FoVPerspectiveCameras + cameras = FoVPerspectiveCameras(device=device, R=R, T=T) + fig = plot_scene({ + "subplot1_title": { + "mesh_trace_title": mesh[0] + }, + "subplot2_title": { + "mesh_trace_title": mesh[1] + } + }, + viewpoint_cameras=cameras) + fig.show() + + The above example will render the first subplot seen from the camera on the +z axis, + and the second subplot from the viewpoint of the camera on the -z axis. + + We can visualize these cameras as well: + ..code-block::python + mesh = ... + R, T = look_at_view_transform(2.7, 0, [0, 180]) # 2 camera angles, front and back + # Any instance of CamerasBase works, here we use FoVPerspectiveCameras + cameras = FoVPerspectiveCameras(device=device, R=R, T=T) + fig = plot_scene({ + "subplot1_title": { + "mesh_trace_title": mesh, + "cameras_trace_title": cameras, + }, + }) + fig.show() + + The above example will render one subplot with the mesh object + and two cameras. + + RayBundle visualization is also supproted: + ..code-block::python + cameras = PerspectiveCameras(...) + ray_bundle = RayBundle(origins=..., lengths=..., directions=..., xys=...) + fig = plot_scene({ + "subplot1_title": { + "ray_bundle_trace_title": ray_bundle, + "cameras_trace_title": cameras, + }, + }) + fig.show() + + For an example of using kwargs, see below: + ..code-block::python + mesh = ... + point_cloud = ... + fig = plot_scene({ + "subplot_title": { + "mesh_trace_title": mesh, + "pointcloud_trace_title": point_cloud + } + }, + axis_args=AxisArgs(backgroundcolor="rgb(200,230,200)")) # kwarg axis_args + fig.show() + + The above example will render each axis with the input background color. + + See the tutorials in pytorch3d/docs/tutorials for more examples + (namely rendered_color_points.ipynb and rendered_textured_meshes.ipynb). + """ + + subplots = list(plots.keys()) + fig = _gen_fig_with_subplots(len(subplots), ncols, subplots) + lighting = kwargs.get("lighting", Lighting())._asdict() + axis_args_dict = kwargs.get("axis_args", AxisArgs())._asdict() + + # Set axis arguments to defaults defined at the top of this file + x_settings = {**axis_args_dict} + y_settings = {**axis_args_dict} + z_settings = {**axis_args_dict} + + # Update the axes with any axis settings passed in as kwargs. + x_settings.update(**kwargs.get("xaxis", {})) + y_settings.update(**kwargs.get("yaxis", {})) + z_settings.update(**kwargs.get("zaxis", {})) + + camera = { + "up": { + "x": 0.0, + "y": 1.0, + "z": 0.0, + } # set the up vector to match PyTorch3D world coordinates conventions + } + viewpoints_eye_at_up_world = None + if viewpoint_cameras: + n_viewpoint_cameras = len(viewpoint_cameras) + if n_viewpoint_cameras == len(subplots) or n_viewpoint_cameras == 1: + # Calculate the vectors eye, at, up in world space + # to initialize the position of the camera in + # the plotly figure + viewpoints_eye_at_up_world = camera_to_eye_at_up( + viewpoint_cameras.get_world_to_view_transform().cpu() + ) + else: + msg = "Invalid number {} of viewpoint cameras were provided. Either 1 \ + or {} cameras are required".format(len(viewpoint_cameras), len(subplots)) + warnings.warn(msg) + + for subplot_idx in range(len(subplots)): + subplot_name = subplots[subplot_idx] + traces = plots[subplot_name] + for trace_name, struct in traces.items(): + if isinstance(struct, Meshes): + _add_mesh_trace(fig, struct, trace_name, subplot_idx, ncols, lighting) + elif isinstance(struct, Pointclouds): + _add_pointcloud_trace( + fig, + struct, + trace_name, + subplot_idx, + ncols, + pointcloud_max_points, + pointcloud_marker_size, + ) + elif isinstance(struct, CamerasBase): + _add_camera_trace( + fig, struct, trace_name, subplot_idx, ncols, camera_scale + ) + elif _is_ray_bundle(struct): + _add_ray_bundle_trace( + fig, + struct, + trace_name, + subplot_idx, + ncols, + raybundle_max_rays, + raybundle_max_points_per_ray, + raybundle_ray_point_marker_size, + raybundle_ray_line_width, + ) + else: + raise ValueError( + "struct {} is not a Cameras, Meshes, Pointclouds,".format(struct) + + " , RayBundle or HeterogeneousRayBundle object." + ) + + # Ensure update for every subplot. + plot_scene = "scene" + str(subplot_idx + 1) + current_layout = fig["layout"][plot_scene] + xaxis = current_layout["xaxis"] + yaxis = current_layout["yaxis"] + zaxis = current_layout["zaxis"] + + # Update the axes with our above default and provided settings. + xaxis.update(**x_settings) + yaxis.update(**y_settings) + zaxis.update(**z_settings) + + # update camera viewpoint if provided + if viewpoints_eye_at_up_world is not None: + # Use camera params for batch index or the first camera if only one provided. + # pyre-fixme[61]: `n_viewpoint_cameras` is undefined, or not always defined. + viewpoint_idx = min(n_viewpoint_cameras - 1, subplot_idx) + + eye, at, up = (i[viewpoint_idx] for i in viewpoints_eye_at_up_world) + eye_x, eye_y, eye_z = eye.tolist() + at_x, at_y, at_z = at.tolist() + up_x, up_y, up_z = up.tolist() + + # scale camera eye to plotly [-1, 1] ranges + x_range = xaxis["range"] + y_range = yaxis["range"] + z_range = zaxis["range"] + + eye_x = _scale_camera_to_bounds(eye_x, x_range, True) + eye_y = _scale_camera_to_bounds(eye_y, y_range, True) + eye_z = _scale_camera_to_bounds(eye_z, z_range, True) + + at_x = _scale_camera_to_bounds(at_x, x_range, True) + at_y = _scale_camera_to_bounds(at_y, y_range, True) + at_z = _scale_camera_to_bounds(at_z, z_range, True) + + up_x = _scale_camera_to_bounds(up_x, x_range, False) + up_y = _scale_camera_to_bounds(up_y, y_range, False) + up_z = _scale_camera_to_bounds(up_z, z_range, False) + + camera["eye"] = {"x": eye_x, "y": eye_y, "z": eye_z} + camera["center"] = {"x": at_x, "y": at_y, "z": at_z} + camera["up"] = {"x": up_x, "y": up_y, "z": up_z} + + current_layout.update( + { + "xaxis": xaxis, + "yaxis": yaxis, + "zaxis": zaxis, + "aspectmode": "cube", + "camera": camera, + } + ) + + return fig + + +@torch.no_grad() +def plot_batch_individually( + batched_structs: Union[ + List[Struct], + Struct, + ], + *, + viewpoint_cameras: Optional[CamerasBase] = None, + ncols: int = 1, + extend_struct: bool = True, + subplot_titles: Optional[List[str]] = None, + **kwargs, +): # pragma: no cover + """ + This is a higher level plotting function than plot_scene, for plotting + Cameras, Meshes, Pointclouds, and RayBundle in simple cases. The simplest use + is to plot a single Cameras, Meshes, Pointclouds, or a RayBundle object, + where you just pass it in as a one element list. This will plot each batch + element in a separate subplot. + + More generally, you can supply multiple Cameras, Meshes, Pointclouds, or RayBundle + having the same batch size `n`. In this case, there will be `n` subplots, + each depicting the corresponding batch element of all the inputs. + + In addition, you can include Cameras, Meshes, Pointclouds, or RayBundle of size 1 in + the input. These will either be rendered in the first subplot + (if extend_struct is False), or in every subplot. + RayBundle includes ImplicitronRayBundle and HeterogeneousRaybundle. + + Args: + batched_structs: a list of Cameras, Meshes, Pointclouds and RayBundle to be + rendered. Each structure's corresponding batch element will be plotted in a + single subplot, resulting in n subplots for a batch of size n. Every struct + should either have the same batch size or be of batch size 1. See extend_struct + and the description above for how batch size 1 structs are handled. Also accepts + a single Cameras, Meshes, Pointclouds, and RayBundle object, which will have + each individual element plotted in its own subplot. + viewpoint_cameras: an instance of a Cameras object providing a location + to view the plotly plot from. If the batch size is equal + to the number of subplots, it is a one to one mapping. + If the batch size is 1, then that viewpoint will be used + for all the subplots will be viewed from that point. + Otherwise, the viewpoint_cameras will not be used. + ncols: the number of subplots per row + extend_struct: if True, indicates that structs of batch size 1 + should be plotted in every subplot. + subplot_titles: strings to name each subplot + **kwargs: keyword arguments which are passed to plot_scene. + See plot_scene documentation for details. + + Example: + + ..code-block::python + + mesh = ... # mesh of batch size 2 + point_cloud = ... # point_cloud of batch size 2 + fig = plot_batch_individually([mesh, point_cloud], subplot_titles=["plot1", "plot2"]) + fig.show() + + # this is equivalent to the below figure + fig = plot_scene({ + "plot1": { + "trace1-1": mesh[0], + "trace1-2": point_cloud[0] + }, + "plot2":{ + "trace2-1": mesh[1], + "trace2-2": point_cloud[1] + } + }) + fig.show() + + The above example will render two subplots which each have both a mesh and pointcloud. + For more examples look at the pytorch3d tutorials at `pytorch3d/docs/tutorials`, + in particular the files rendered_color_points.ipynb and rendered_textured_meshes.ipynb. + """ + + # check that every batch is the same size or is size 1 + if _get_len(batched_structs) == 0: + msg = "No structs to plot" + warnings.warn(msg) + return + max_size = 0 + if isinstance(batched_structs, list): + max_size = max(_get_len(s) for s in batched_structs) + for struct in batched_structs: + struct_len = _get_len(struct) + if struct_len not in (1, max_size): + msg = "invalid batch size {} provided: {}".format(struct_len, struct) + raise ValueError(msg) + else: + max_size = _get_len(batched_structs) + + if max_size == 0: + msg = "No data is provided with at least one element" + raise ValueError(msg) + + if subplot_titles: + if len(subplot_titles) != max_size: + msg = "invalid number of subplot titles" + raise ValueError(msg) + + # if we are dealing with HeterogeneousRayBundle of ImplicitronRayBundle create + # first indexes for faster + first_idxs = None + if _is_heterogeneous_ray_bundle(batched_structs): + # pyre-ignore[16] + cumsum = batched_structs.camera_counts.cumsum(dim=0) + first_idxs = torch.cat((cumsum.new_zeros((1,)), cumsum)) + + scene_dictionary = {} + # construct the scene dictionary + for scene_num in range(max_size): + subplot_title = ( + subplot_titles[scene_num] + if subplot_titles + else "subplot " + str(scene_num + 1) + ) + scene_dictionary[subplot_title] = {} + + if isinstance(batched_structs, list): + for i, batched_struct in enumerate(batched_structs): + first_idxs = None + if _is_heterogeneous_ray_bundle(batched_structs[i]): + # pyre-ignore[16] + cumsum = batched_struct.camera_counts.cumsum(dim=0) + first_idxs = torch.cat((cumsum.new_zeros((1,)), cumsum)) + # check for whether this struct needs to be extended + batched_struct_len = _get_len(batched_struct) + if i >= batched_struct_len and not extend_struct: + continue + _add_struct_from_batch( + batched_struct, + scene_num, + subplot_title, + scene_dictionary, + i + 1, + first_idxs=first_idxs, + ) + else: # batched_structs is a single struct + _add_struct_from_batch( + batched_structs, + scene_num, + subplot_title, + scene_dictionary, + first_idxs=first_idxs, + ) + + return plot_scene( + scene_dictionary, viewpoint_cameras=viewpoint_cameras, ncols=ncols, **kwargs + ) + + +def _add_struct_from_batch( + batched_struct: Struct, + scene_num: int, + subplot_title: str, + scene_dictionary: Dict[str, Dict[str, Struct]], + trace_idx: int = 1, + first_idxs: Optional[torch.Tensor] = None, +) -> None: # pragma: no cover + """ + Adds the struct corresponding to the given scene_num index to + a provided scene_dictionary to be passed in to plot_scene + + Args: + batched_struct: the batched data structure to add to the dict + scene_num: the subplot from plot_batch_individually which this struct + should be added to + subplot_title: the title of the subplot + scene_dictionary: the dictionary to add the indexed struct to + trace_idx: the trace number, starting at 1 for this struct's trace + """ + struct = None + if isinstance(batched_struct, CamerasBase): + # we can't index directly into camera batches + R, T = batched_struct.R, batched_struct.T + # pyre-fixme[6]: For 1st argument expected + # `pyre_extensions.PyreReadOnly[Sized]` but got `Union[Tensor, Module]`. + r_idx = min(scene_num, len(R) - 1) + # pyre-fixme[6]: For 1st argument expected + # `pyre_extensions.PyreReadOnly[Sized]` but got `Union[Tensor, Module]`. + t_idx = min(scene_num, len(T) - 1) + # pyre-fixme[29]: `Union[(self: TensorBase, indices: Union[None, slice[Any, A... + R = R[r_idx].unsqueeze(0) + # pyre-fixme[29]: `Union[(self: TensorBase, indices: Union[None, slice[Any, A... + T = T[t_idx].unsqueeze(0) + struct = CamerasBase(device=batched_struct.device, R=R, T=T) + elif _is_ray_bundle(batched_struct) and not _is_heterogeneous_ray_bundle( + batched_struct + ): + # for RayBundle we treat the camera count as the batch index + struct_idx = min(scene_num, _get_len(batched_struct) - 1) + + struct = RayBundle( + **{ + attr: getattr(batched_struct, attr)[struct_idx] + for attr in ["origins", "directions", "lengths", "xys"] + } + ) + elif _is_heterogeneous_ray_bundle(batched_struct): + # for RayBundle we treat the camera count as the batch index + struct_idx = min(scene_num, _get_len(batched_struct) - 1) + + struct = RayBundle( + **{ + attr: getattr(batched_struct, attr)[ + # pyre-ignore[16] + first_idxs[struct_idx] : first_idxs[struct_idx + 1] + ] + for attr in ["origins", "directions", "lengths", "xys"] + } + ) + + else: # batched meshes and pointclouds are indexable + struct_idx = min(scene_num, _get_len(batched_struct) - 1) + # pyre-ignore[16] + struct = batched_struct[struct_idx] + trace_name = "trace{}-{}".format(scene_num + 1, trace_idx) + scene_dictionary[subplot_title][trace_name] = struct + + +def _add_mesh_trace( + fig: go.Figure, + meshes: Meshes, + trace_name: str, + subplot_idx: int, + ncols: int, + lighting: Lighting, +) -> None: # pragma: no cover + """ + Adds a trace rendering a Meshes object to the passed in figure, with + a given name and in a specific subplot. + + Args: + fig: plotly figure to add the trace within. + meshes: Meshes object to render. It can be batched. + trace_name: name to label the trace with. + subplot_idx: identifies the subplot, with 0 being the top left. + ncols: the number of subplots per row. + lighting: a Lighting object that specifies the Mesh3D lighting. + """ + + mesh = join_meshes_as_scene(meshes) + mesh = mesh.detach().cpu() + verts = mesh.verts_packed() + faces = mesh.faces_packed() + # If mesh has vertex colors or face colors, use them + # for figure, otherwise use plotly's default colors. + verts_rgb = None + faces_rgb = None + if isinstance(mesh.textures, TexturesVertex): + verts_rgb = mesh.textures.verts_features_packed() + verts_rgb.clamp_(min=0.0, max=1.0) + verts_rgb = torch.tensor(255.0) * verts_rgb + if isinstance(mesh.textures, TexturesAtlas): + atlas = mesh.textures.atlas_packed() + # If K==1 + if atlas.shape[1] == 1 and atlas.shape[3] == 3: + faces_rgb = atlas[:, 0, 0] + + # Reposition the unused vertices to be "inside" the object + # (i.e. they won't be visible in the plot). + verts_used = torch.zeros((verts.shape[0],), dtype=torch.bool) + verts_used[torch.unique(faces)] = True + verts_center = verts[verts_used].mean(0) + verts[~verts_used] = verts_center + + row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1 + fig.add_trace( + go.Mesh3d( + x=verts[:, 0], + y=verts[:, 1], + z=verts[:, 2], + vertexcolor=verts_rgb, + facecolor=faces_rgb, + i=faces[:, 0], + j=faces[:, 1], + k=faces[:, 2], + lighting=lighting, + name=trace_name, + ), + row=row, + col=col, + ) + + # Access the current subplot's scene configuration + plot_scene = "scene" + str(subplot_idx + 1) + current_layout = fig["layout"][plot_scene] + + # update the bounds of the axes for the current trace + max_expand = (verts.max(0)[0] - verts.min(0)[0]).max() + _update_axes_bounds(verts_center, max_expand, current_layout) + + +def _add_pointcloud_trace( + fig: go.Figure, + pointclouds: Pointclouds, + trace_name: str, + subplot_idx: int, + ncols: int, + max_points_per_pointcloud: int, + marker_size: int, +) -> None: # pragma: no cover + """ + Adds a trace rendering a Pointclouds object to the passed in figure, with + a given name and in a specific subplot. + + Args: + fig: plotly figure to add the trace within. + pointclouds: Pointclouds object to render. It can be batched. + trace_name: name to label the trace with. + subplot_idx: identifies the subplot, with 0 being the top left. + ncols: the number of subplots per row. + max_points_per_pointcloud: the number of points to render, which are randomly sampled. + marker_size: the size of the rendered points + """ + pointclouds = pointclouds.detach().cpu().subsample(max_points_per_pointcloud) + verts = pointclouds.points_packed() + features = pointclouds.features_packed() + + color = None + if features is not None: + if features.shape[1] == 4: # rgba + template = "rgb(%d, %d, %d, %f)" + rgb = (features[:, :3].clamp(0.0, 1.0) * 255).int() + color = [template % (*rgb_, a_) for rgb_, a_ in zip(rgb, features[:, 3])] + + if features.shape[1] == 3: + template = "rgb(%d, %d, %d)" + rgb = (features.clamp(0.0, 1.0) * 255).int() + color = [template % (r, g, b) for r, g, b in rgb] + + row = subplot_idx // ncols + 1 + col = subplot_idx % ncols + 1 + fig.add_trace( + go.Scatter3d( + x=verts[:, 0], + y=verts[:, 1], + z=verts[:, 2], + marker={"color": color, "size": marker_size}, + mode="markers", + name=trace_name, + ), + row=row, + col=col, + ) + + # Access the current subplot's scene configuration + plot_scene = "scene" + str(subplot_idx + 1) + current_layout = fig["layout"][plot_scene] + + # update the bounds of the axes for the current trace + verts_center = verts.mean(0) + max_expand = (verts.max(0)[0] - verts.min(0)[0]).max() + _update_axes_bounds(verts_center, max_expand, current_layout) + + +def _add_camera_trace( + fig: go.Figure, + cameras: CamerasBase, + trace_name: str, + subplot_idx: int, + ncols: int, + camera_scale: float, +) -> None: # pragma: no cover + """ + Adds a trace rendering a Cameras object to the passed in figure, with + a given name and in a specific subplot. + + Args: + fig: plotly figure to add the trace within. + cameras: the Cameras object to render. It can be batched. + trace_name: name to label the trace with. + subplot_idx: identifies the subplot, with 0 being the top left. + ncols: the number of subplots per row. + camera_scale: the size of the wireframe used to render the Cameras object. + """ + cam_wires = get_camera_wireframe(camera_scale).to(cameras.device) + cam_trans = cameras.get_world_to_view_transform().inverse() + cam_wires_trans = cam_trans.transform_points(cam_wires).detach().cpu() + # if batch size is 1, unsqueeze to add dimension + if len(cam_wires_trans.shape) < 3: + cam_wires_trans = cam_wires_trans.unsqueeze(0) + + nan_tensor = torch.Tensor([[float("NaN")] * 3]) + all_cam_wires = cam_wires_trans[0] + for wire in cam_wires_trans[1:]: + # We combine camera points into a single tensor to plot them in a + # single trace. The NaNs are inserted between sets of camera + # points so that the lines drawn by Plotly are not drawn between + # points that belong to different cameras. + all_cam_wires = torch.cat((all_cam_wires, nan_tensor, wire)) + x, y, z = all_cam_wires.detach().cpu().numpy().T.astype(float) + + row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1 + fig.add_trace( + go.Scatter3d(x=x, y=y, z=z, marker={"size": 1}, name=trace_name), + row=row, + col=col, + ) + + # Access the current subplot's scene configuration + plot_scene = "scene" + str(subplot_idx + 1) + current_layout = fig["layout"][plot_scene] + + # flatten for bounds calculations + flattened_wires = cam_wires_trans.flatten(0, 1) + verts_center = flattened_wires.mean(0) + max_expand = (flattened_wires.max(0)[0] - flattened_wires.min(0)[0]).max() + _update_axes_bounds(verts_center, max_expand, current_layout) + + +def _add_ray_bundle_trace( + fig: go.Figure, + ray_bundle: Union[RayBundle, HeterogeneousRayBundle], + trace_name: str, + subplot_idx: int, + ncols: int, + max_rays: int, + max_points_per_ray: int, + marker_size: int, + line_width: int, +) -> None: # pragma: no cover + """ + Adds a trace rendering a ray bundle object + to the passed in figure, with a given name and in a specific subplot. + + Args: + fig: plotly figure to add the trace within. + ray_bundle: the RayBundle, ImplicitronRayBundle or HeterogeneousRaybundle to render. + It can be batched. + trace_name: name to label the trace with. + subplot_idx: identifies the subplot, with 0 being the top left. + ncols: the number of subplots per row. + max_rays: maximum number of plotted rays in total. Randomly subsamples + without replacement in case the number of rays is bigger than max_rays. + max_points_per_ray: maximum number of points plotted per ray. + marker_size: the size of the ray point markers. + line_width: the width of the ray lines. + """ + + n_pts_per_ray = ray_bundle.lengths.shape[-1] + n_rays = ray_bundle.lengths.shape[:-1].numel() + + # flatten all batches of rays into a single big bundle + ray_bundle_flat = RayBundle( + **{ + attr: torch.flatten(getattr(ray_bundle, attr), start_dim=0, end_dim=-2) + for attr in ["origins", "directions", "lengths", "xys"] + } + ) + + # subsample the rays (if needed) + if n_rays > max_rays: + indices_rays = torch.randperm(n_rays)[:max_rays] + ray_bundle_flat = RayBundle( + **{ + attr: getattr(ray_bundle_flat, attr)[indices_rays] + for attr in ["origins", "directions", "lengths", "xys"] + } + ) + + # make ray line endpoints + min_max_ray_depth = torch.stack( + [ + ray_bundle_flat.lengths.min(dim=1).values, + ray_bundle_flat.lengths.max(dim=1).values, + ], + dim=-1, + ) + ray_lines_endpoints = ray_bundle_to_ray_points( + ray_bundle_flat._replace(lengths=min_max_ray_depth) + ) + + # make the ray lines for plotly plotting + nan_tensor = torch.tensor( + [[float("NaN")] * 3], + device=ray_lines_endpoints.device, + dtype=ray_lines_endpoints.dtype, + ) + ray_lines = torch.empty(size=(1, 3), device=ray_lines_endpoints.device) + for ray_line in ray_lines_endpoints: + # We combine the ray lines into a single tensor to plot them in a + # single trace. The NaNs are inserted between sets of ray lines + # so that the lines drawn by Plotly are not drawn between + # lines that belong to different rays. + ray_lines = torch.cat((ray_lines, nan_tensor, ray_line)) + x, y, z = ray_lines.detach().cpu().numpy().T.astype(float) + row, col = subplot_idx // ncols + 1, subplot_idx % ncols + 1 + fig.add_trace( + go.Scatter3d( + x=x, + y=y, + z=z, + marker={"size": 0.1}, + line={"width": line_width}, + name=trace_name, + ), + row=row, + col=col, + ) + + # subsample the ray points (if needed) + if n_pts_per_ray > max_points_per_ray: + indices_ray_pts = torch.cat( + [ + torch.randperm(n_pts_per_ray)[:max_points_per_ray] + ri * n_pts_per_ray + for ri in range(ray_bundle_flat.lengths.shape[0]) + ] + ) + ray_bundle_flat = ray_bundle_flat._replace( + lengths=ray_bundle_flat.lengths.reshape(-1)[indices_ray_pts].reshape( + ray_bundle_flat.lengths.shape[0], -1 + ) + ) + + # plot the ray points + ray_points = ( + ray_bundle_to_ray_points(ray_bundle_flat) + .view(-1, 3) + .detach() + .cpu() + .numpy() + .astype(float) + ) + fig.add_trace( + go.Scatter3d( + x=ray_points[:, 0], + y=ray_points[:, 1], + z=ray_points[:, 2], + mode="markers", + name=trace_name + "_points", + marker={"size": marker_size}, + ), + row=row, + col=col, + ) + + # Access the current subplot's scene configuration + plot_scene = "scene" + str(subplot_idx + 1) + current_layout = fig["layout"][plot_scene] + + # update the bounds of the axes for the current trace + all_ray_points = ray_bundle_to_ray_points(ray_bundle).reshape(-1, 3) + ray_points_center = all_ray_points.mean(dim=0) + max_expand = (all_ray_points.max(0)[0] - all_ray_points.min(0)[0]).max().item() + _update_axes_bounds(ray_points_center, float(max_expand), current_layout) + + +def _gen_fig_with_subplots( + batch_size: int, ncols: int, subplot_titles: List[str] +): # pragma: no cover + """ + Takes in the number of objects to be plotted and generate a plotly figure + with the appropriate number and orientation of titled subplots. + Args: + batch_size: the number of elements in the batch of objects to be visualized. + ncols: number of subplots in the same row. + subplot_titles: titles for the subplot(s). list of strings of length batch_size. + + Returns: + Plotly figure with ncols subplots per row, and batch_size subplots. + """ + fig_rows = batch_size // ncols + if batch_size % ncols != 0: + fig_rows += 1 # allow for non-uniform rows + fig_cols = ncols + fig_type = [{"type": "scene"}] + specs = [fig_type * fig_cols] * fig_rows + # subplot_titles must have one title per subplot + fig = make_subplots( + rows=fig_rows, + cols=fig_cols, + specs=specs, + subplot_titles=subplot_titles, + column_widths=[1.0] * fig_cols, + ) + return fig + + +def _update_axes_bounds( + verts_center: torch.Tensor, + max_expand: float, + current_layout: go.Scene, +) -> None: # pragma: no cover + """ + Takes in the vertices' center point and max spread, and the current plotly figure + layout and updates the layout to have bounds that include all traces for that subplot. + Args: + verts_center: tensor of size (3) corresponding to a trace's vertices' center point. + max_expand: the maximum spread in any dimension of the trace's vertices. + current_layout: the plotly figure layout scene corresponding to the referenced trace. + """ + verts_center = verts_center.detach().cpu() + verts_min = verts_center - max_expand + verts_max = verts_center + max_expand + bounds = torch.t(torch.stack((verts_min, verts_max))) + + # Ensure that within a subplot, the bounds capture all traces + old_xrange, old_yrange, old_zrange = ( + current_layout["xaxis"]["range"], + current_layout["yaxis"]["range"], + current_layout["zaxis"]["range"], + ) + x_range, y_range, z_range = bounds + if old_xrange is not None: + x_range[0] = min(x_range[0], old_xrange[0]) + x_range[1] = max(x_range[1], old_xrange[1]) + if old_yrange is not None: + y_range[0] = min(y_range[0], old_yrange[0]) + y_range[1] = max(y_range[1], old_yrange[1]) + if old_zrange is not None: + z_range[0] = min(z_range[0], old_zrange[0]) + z_range[1] = max(z_range[1], old_zrange[1]) + + xaxis = {"range": x_range} + yaxis = {"range": y_range} + zaxis = {"range": z_range} + current_layout.update({"xaxis": xaxis, "yaxis": yaxis, "zaxis": zaxis}) + + +def _scale_camera_to_bounds( + coordinate: float, axis_bounds: Tuple[float, float], is_position: bool +) -> float: # pragma: no cover + """ + We set our plotly plot's axes' bounding box to [-1,1]x[-1,1]x[-1,1]. As such, + the plotly camera location has to be scaled accordingly to have its world coordinates + correspond to its relative plotted coordinates for viewing the plotly plot. + This function does the scaling and offset to transform the coordinates. + + Args: + coordinate: the float value to be transformed + axis_bounds: the bounds of the plotly plot for the axis which + the coordinate argument refers to + is_position: If true, the float value is the coordinate of a position, and so must + be moved in to [-1,1]. Otherwise it is a component of a direction, and so needs only + to be scaled. + """ + scale = (axis_bounds[1] - axis_bounds[0]) / 2 + if not is_position: + return coordinate / scale + offset = (axis_bounds[1] / scale) - 1 + return coordinate / scale - offset diff --git a/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/vis/texture_vis.py b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/vis/texture_vis.py new file mode 100644 index 0000000000000000000000000000000000000000..9f044b2c00110e6ce70928cc0adac51e127ecb65 --- /dev/null +++ b/project/ManiSkill3/src/maniskill2_benchmark/cfdp_experiment/cfdp/pytorch3d/pytorch3d/vis/texture_vis.py @@ -0,0 +1,114 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. + +# pyre-unsafe + +from typing import Any, Optional + +import numpy as np +from PIL import Image, ImageDraw +from pytorch3d.renderer.mesh import TexturesUV + + +def texturesuv_image_matplotlib( + texture: TexturesUV, + *, + texture_index: int = 0, + radius: float = 1, + color=(1.0, 0.0, 0.0), + subsample: Optional[int] = 10000, + origin: str = "upper", +) -> None: # pragma: no cover + """ + Plot the texture image for one element of a TexturesUV with + matplotlib together with verts_uvs positions circled. + In particular a value in verts_uvs which is never referenced + in faces_uvs will still be plotted. + This is for debugging purposes, e.g. to align the map with + the uv coordinates. In particular, matplotlib + is used which is not an official dependency of PyTorch3D. + + Args: + texture: a TexturesUV object with one mesh + texture_index: index in the batch to plot + radius: plotted circle radius in pixels + color: any matplotlib-understood color for the circles. + subsample: if not None, number of points to plot. + Otherwise all points are plotted. + origin: "upper" or "lower" like matplotlib.imshow . + upper (the default) matches texturesuv_image_PIL. + """ + + import matplotlib.pyplot as plt + from matplotlib.patches import Circle + + texture_image = texture.maps_padded() + centers = texture.centers_for_image(index=texture_index).numpy() + + ax = plt.gca() + ax.imshow(texture_image[texture_index].detach().cpu().numpy(), origin=origin) + + n_points = centers.shape[0] + if subsample is None or n_points <= subsample: + indices = range(n_points) + else: + indices = np.random.choice(n_points, subsample, replace=False) + for i in indices: + # setting clip_on=False makes it obvious when + # we have UV coordinates outside the correct range + # pyre-fixme[6]: For 1st argument expected `Tuple[float, float]` but got + # `ndarray[Any, Any]`. + ax.add_patch(Circle(centers[i], radius, color=color, clip_on=False)) + + +def texturesuv_image_PIL( + texture: TexturesUV, + *, + texture_index: int = 0, + radius: float = 1, + color: Any = "red", + subsample: Optional[int] = 10000, +): # pragma: no cover + """ + Return a PIL image of the texture image of one element of the batch + from a TexturesUV, together with the verts_uvs positions circled. + In particular a value in verts_uvs which is never referenced + in faces_uvs will still be plotted. + This is for debugging purposes, e.g. to align the map with + the uv coordinates. In particular, matplotlib + is used which is not an official dependency of PyTorch3D. + + Args: + texture: a TexturesUV object with one mesh + texture_index: index in the batch to plot + radius: plotted circle radius in pixels + color: any PIL-understood color for the circles. + subsample: if not None, number of points to plot. + Otherwise all points are plotted. + + Returns: + PIL Image object. + """ + + centers = texture.centers_for_image(index=texture_index).numpy() + texture_image = texture.maps_padded() + texture_array = (texture_image[texture_index] * 255).cpu().numpy().astype(np.uint8) + + image = Image.fromarray(texture_array) + draw = ImageDraw.Draw(image) + + n_points = centers.shape[0] + if subsample is None or n_points <= subsample: + indices = range(n_points) + else: + indices = np.random.choice(n_points, subsample, replace=False) + + for i in indices: + x = centers[i][0] + y = centers[i][1] + draw.ellipse([(x - radius, y - radius), (x + radius, y + radius)], fill=color) + + return image