File size: 3,548 Bytes
ee0b416 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
_target_: sam3d_objects.pipeline.inference_pipeline_pointmap.InferencePipelinePointMap
ss_generator_config_path: ss_generator.yaml
ss_generator_ckpt_path: ss_generator.ckpt
slat_generator_config_path: slat_generator.yaml
slat_generator_ckpt_path: slat_generator.ckpt
ss_decoder_config_path: ss_decoder.yaml
ss_decoder_ckpt_path: ss_decoder.ckpt
slat_decoder_gs_config_path: slat_decoder_gs.yaml
slat_decoder_gs_ckpt_path: slat_decoder_gs.ckpt
slat_decoder_gs_4_config_path: slat_decoder_gs_4.yaml
slat_decoder_gs_4_ckpt_path: slat_decoder_gs_4.ckpt
slat_decoder_mesh_config_path: slat_decoder_mesh.yaml
slat_decoder_mesh_ckpt_path: slat_decoder_mesh.ckpt
pad_size: 1.0
dtype: float16
version: 3dfy_v9
slat_cfg_strength: 1
slat_rescale_t: 1
downsample_ss_dist: 1
compile_model: true
ss_condition_input_mapping: []
ss_preprocessor:
_target_: sam3d_objects.data.dataset.tdfy.preprocessor.PreProcessor
img_mask_joint_transform: []
img_mask_pointmap_joint_transform:
- _partial_: true
_target_: sam3d_objects.data.dataset.tdfy.img_and_mask_transforms.resize_all_to_same_size
- _partial_: true
_target_: sam3d_objects.data.dataset.tdfy.img_and_mask_transforms.crop_around_mask_with_padding
box_size_factor: 1.2
padding_factor: 0.0
img_transform:
_target_: torchvision.transforms.Compose
transforms:
- _partial_: true
_target_: sam3d_objects.data.dataset.tdfy.img_processing.pad_to_square_centered
- _target_: torchvision.transforms.Resize
size: 518
mask_transform:
_target_: torchvision.transforms.Compose
transforms:
- _partial_: true
_target_: sam3d_objects.data.dataset.tdfy.img_processing.pad_to_square_centered
- _target_: torchvision.transforms.Resize
interpolation: 0
size: 518
normalize_pointmap: true
pointmap_normalizer:
_target_: sam3d_objects.data.dataset.tdfy.img_and_mask_transforms.ObjectCentricSSI
allow_scale_and_shift_override: true
use_scene_scale: true
pointmap_transform:
_target_: torchvision.transforms.Compose
transforms:
- _partial_: true
_target_: sam3d_objects.data.dataset.tdfy.img_processing.pad_to_square_centered
- _target_: torchvision.transforms.Resize
interpolation: 0
size: 518
pose_decoder_name: ScaleShiftInvariant
depth_model:
_target_: sam3d_objects.pipeline.depth_models.moge.MoGe
model:
_target_: moge.model.v1.MoGeModel.from_pretrained
pretrained_model_name_or_path: Ruicheng/moge-vitl
slat_condition_input_mapping: []
slat_preprocessor:
_target_: sam3d_objects.data.dataset.tdfy.preprocessor.PreProcessor
img_transform:
_target_: torchvision.transforms.Compose
transforms:
- _target_: sam3d_objects.data.dataset.tdfy.img_processing.pad_to_square_centered
_partial_: true
- _target_: torchvision.transforms.Resize
size: 518
mask_transform:
_target_: torchvision.transforms.Compose
transforms:
- _target_: sam3d_objects.data.dataset.tdfy.img_processing.pad_to_square_centered
_partial_: true
- _target_: torchvision.transforms.Resize
size: 518
interpolation: 0
img_mask_joint_transform:
- _target_: sam3d_objects.data.dataset.tdfy.img_and_mask_transforms.crop_around_mask_with_padding
_partial_: true
box_size_factor: 1.2
padding_factor: 0.0
slat_mean:
- 0.12211431
- 0.37204156
- -1.26521907
- -2.05276058
- -3.10432536
- -0.11294304
- -0.85146744
- 0.45506954
slat_std:
- 2.37326008
- 2.13174402
- 2.2413953
- 2.30589401
- 2.1191894
- 1.8969511
- 2.41684989
- 2.08374642
|