| _target_: sam3d_objects.pipeline.inference_pipeline_pointmap.InferencePipelinePointMap | |
| ss_generator_config_path: ss_generator.yaml | |
| ss_generator_ckpt_path: ss_generator.ckpt | |
| slat_generator_config_path: slat_generator.yaml | |
| slat_generator_ckpt_path: slat_generator.ckpt | |
| ss_decoder_config_path: ss_decoder.yaml | |
| ss_decoder_ckpt_path: ss_decoder.ckpt | |
| slat_decoder_gs_config_path: slat_decoder_gs.yaml | |
| slat_decoder_gs_ckpt_path: slat_decoder_gs.ckpt | |
| slat_decoder_gs_4_config_path: slat_decoder_gs_4.yaml | |
| slat_decoder_gs_4_ckpt_path: slat_decoder_gs_4.ckpt | |
| slat_decoder_mesh_config_path: slat_decoder_mesh.yaml | |
| slat_decoder_mesh_ckpt_path: slat_decoder_mesh.ckpt | |
| pad_size: 1.0 | |
| dtype: float16 | |
| version: 3dfy_v9 | |
| slat_cfg_strength: 1 | |
| slat_rescale_t: 1 | |
| downsample_ss_dist: 1 | |
| compile_model: true | |
| ss_condition_input_mapping: [] | |
| ss_preprocessor: | |
| _target_: sam3d_objects.data.dataset.tdfy.preprocessor.PreProcessor | |
| img_mask_joint_transform: [] | |
| img_mask_pointmap_joint_transform: | |
| - _partial_: true | |
| _target_: sam3d_objects.data.dataset.tdfy.img_and_mask_transforms.resize_all_to_same_size | |
| - _partial_: true | |
| _target_: sam3d_objects.data.dataset.tdfy.img_and_mask_transforms.crop_around_mask_with_padding | |
| box_size_factor: 1.2 | |
| padding_factor: 0.0 | |
| img_transform: | |
| _target_: torchvision.transforms.Compose | |
| transforms: | |
| - _partial_: true | |
| _target_: sam3d_objects.data.dataset.tdfy.img_processing.pad_to_square_centered | |
| - _target_: torchvision.transforms.Resize | |
| size: 518 | |
| mask_transform: | |
| _target_: torchvision.transforms.Compose | |
| transforms: | |
| - _partial_: true | |
| _target_: sam3d_objects.data.dataset.tdfy.img_processing.pad_to_square_centered | |
| - _target_: torchvision.transforms.Resize | |
| interpolation: 0 | |
| size: 518 | |
| normalize_pointmap: true | |
| pointmap_normalizer: | |
| _target_: sam3d_objects.data.dataset.tdfy.img_and_mask_transforms.ObjectCentricSSI | |
| allow_scale_and_shift_override: true | |
| use_scene_scale: true | |
| pointmap_transform: | |
| _target_: torchvision.transforms.Compose | |
| transforms: | |
| - _partial_: true | |
| _target_: sam3d_objects.data.dataset.tdfy.img_processing.pad_to_square_centered | |
| - _target_: torchvision.transforms.Resize | |
| interpolation: 0 | |
| size: 518 | |
| pose_decoder_name: ScaleShiftInvariant | |
| depth_model: | |
| _target_: sam3d_objects.pipeline.depth_models.moge.MoGe | |
| model: | |
| _target_: moge.model.v1.MoGeModel.from_pretrained | |
| pretrained_model_name_or_path: Ruicheng/moge-vitl | |
| slat_condition_input_mapping: [] | |
| slat_preprocessor: | |
| _target_: sam3d_objects.data.dataset.tdfy.preprocessor.PreProcessor | |
| img_transform: | |
| _target_: torchvision.transforms.Compose | |
| transforms: | |
| - _target_: sam3d_objects.data.dataset.tdfy.img_processing.pad_to_square_centered | |
| _partial_: true | |
| - _target_: torchvision.transforms.Resize | |
| size: 518 | |
| mask_transform: | |
| _target_: torchvision.transforms.Compose | |
| transforms: | |
| - _target_: sam3d_objects.data.dataset.tdfy.img_processing.pad_to_square_centered | |
| _partial_: true | |
| - _target_: torchvision.transforms.Resize | |
| size: 518 | |
| interpolation: 0 | |
| img_mask_joint_transform: | |
| - _target_: sam3d_objects.data.dataset.tdfy.img_and_mask_transforms.crop_around_mask_with_padding | |
| _partial_: true | |
| box_size_factor: 1.2 | |
| padding_factor: 0.0 | |
| slat_mean: | |
| - 0.12211431 | |
| - 0.37204156 | |
| - -1.26521907 | |
| - -2.05276058 | |
| - -3.10432536 | |
| - -0.11294304 | |
| - -0.85146744 | |
| - 0.45506954 | |
| slat_std: | |
| - 2.37326008 | |
| - 2.13174402 | |
| - 2.2413953 | |
| - 2.30589401 | |
| - 2.1191894 | |
| - 1.8969511 | |
| - 2.41684989 | |
| - 2.08374642 | |