### # Pretrain on all data with all losses ### # Experiment general info name: "FinalOVPretrain" rng_seed: 42 num_gpu: 1 mode: "warmup" note: "" num_views: 32 # Choose keywords to feature your saving directory naming_keywords: ["dataloader.batchsize", "task", "note", "time"] base_dir: "results" exp_dir: "" save_frequency: 10 resume: False stage: ['warmup', 'pretrain'] is_pretrain: True debug: flag: False debug_size: 20 hard_debug: False logger: name: "wandb" entity: "yem" project: "3DReason" # dataset details data: note: "all" # train: ['ScanNetSpatialRefer','ARKitSceneSpatialRefer','MultiScanSpatialRefer','HMSpatialRefer','RScanSpatialRefer'] warmup: ['LSUNSpatialRefer'] train: ['ScanNetSpatialRefer'] val: ['ScanNetSpatialRefer'] test: ['ScanNetSpatialRefer'] # train: ['ScanNetSpatialRefer'] # val: ['ScanNetSpatialRefer'] # test: ['ScanNetSpatialRefer'] args: max_obj_len: 80 max_seq_len: 50 num_points: 1024 pc_type: 'pred' sem_type: '607' filter_lang: False txt_mask_ratio: 0.15 pc_mask_ratio: 0.1 rot_aug: True mask_strategy: random use_scene_cap: False max_scene_cap_len: 600 ScanNetSpatialRefer: train: # sources: [ 'scanrefer', 'referit3d', 'sgrefer', 'sgcaption' ] sources: ['scannet_view_cap'] referit3d: anno_type: ['sr3d', 'nr3d'] sr3d_plus_aug: True sgrefer: anno_type: [ 'rel2_gpt', 'rel2_template', 'relm_gpt', 'relm_template', 'star_gpt', 'star_template'] # sgcaption: anno_type: ['gpt', 'template'] val: sources: ['scannet_view_cap'] referit3d: anno_type: ['sr3d'] # 'nr3d', 'sr3d' sr3d_plus_aug: False sgrefer: anno_type: ['template'] # 'template', 'gpt_chain' sgcaption: anno_type: ['gpt'] test: sources: ['scannet_view_cap'] referit3d: anno_type: ['sr3d'] # 'nr3d', 'sr3d' sr3d_plus_aug: False sgrefer: anno_type: ['template'] # 'template', 'gpt', 'gpt_chain' sgcaption: anno_type: ['gpt'] LSUNSpatialRefer: warmup: sources: ['lsun_view_cap'] RScanSpatialRefer: train: sources: ['3rscan_view_cap'] MultiScanSpatialRefer: train: sources: ['anno','rel2_template','rel2_gpt','relm_template','relm_gpt','star_template','star_gpt'] val: sources: [ 'anno', 'rel2_template', 'relm_gpt', 'relm_template', 'star_template', 'star_gpt' ] test: sources: [ 'anno', 'rel2_template', 'relm_gpt', 'relm_template', 'star_template', 'star_gpt' ] ARKitSceneSpatialRefer: train: sources: ['arkitscenes_view_cap'] HMSpatialRefer: train: sources: ['anno','rel2_template','rel2_gpt','relm_template','relm_gpt','star_template','star_gpt'] val: sources: [ 'anno', 'rel2_template', 'relm_gpt', 'relm_template', 'star_template' ] test: sources: [ 'anno', 'rel2_template', 'relm_gpt', 'relm_template', 'star_template' ] use_voxel: False scan_family_base: "../PointMapVerse/existing_datasets/ScanNet" lsun_base: "../PointMapVerse/existing_datasets/lsun" rscan_base: "../PointMapVerse/existing_datasets/3RScan" arkitscene_base: '../PointMapVerse/existing_datasets/Arkitscenes' multiscan_base: '../PointMapVerse/existing_datasets/MultiScan' hm_base: '../PointMapVerse/existing_datasets/HM3D' procthor_base: '../PointMapVerse/existing_datasets/ProcThor' s3d_base: '../PointMapVerse/existing_datasets/Structured3D' data_aug: aug_list: ['scene_aug'] scene_aug: translation: enabled: False value: [1.0, 1.0, 1.0] p: 1.0 scaling: enabled: False p: 1.0 value: [0.9, 1.1] flip: enabled: False p: 0.5 rotation: enabled: True p: 1.0 axis_align: True value: [0.0, 0.0, 1.0] shuffle: True color_jitter: False order_shuffle: False obj_aug: translation: enabled: False value: [0.1, 0.1, 0.1] p: 1.0 rotation: enabled: False p: 1.0 axis_align: False value: [0.0, 0.0, 0.1] shuffle: True random_jitter: enabled: False value: 0.01 accord_to_size: False p: 1.0 pts_shuffle: True # task details: 'Pretrain', 'scanqa', 'spatialrefer' task: 'Pretrain' # 'MaskDatasetWrapper', 'ScanFamilyDatasetWrapper', 'MaskMVDatasetWrapper' data_wrapper: warmup: 'SceneDatasetWrapper' train: 'SceneDatasetWrapper' val: 'SceneDatasetWrapper' test: 'SceneDatasetWrapper' # Training details trainer: "OpenVocabTrainer" ckpt_path: "" pretrain_ckpt_path: "" # dataloader details dataloader: batchsize: 256 num_workers: 2 balance_dataset: False filter_empty_annotations: False solver: gradient_accumulation_steps: 1 epochs_per_save: 20 epochs_per_eval: 1 lr: 1e-4 grad_norm: 5.0 warmup_epochs: 100 epochs: 100 optim: name: 'AdamW' args: betas: [0.9, 0.98] weight_decay: 0.05 sched: name: 'warmup_cosine' args: warmup_steps: 500 minimum_ratio: 0.1 eval: train: name: 'PretrainEval' val: name: 'ScanReferEval' save: False # Model details model: name: OpenVocab language: # This part could be further optimized to be using # huggingface yaml config files name: 'SigLIPLanguageEncoder' args: weights: 'fg-clip-base' # hidden_size: 768 # num_hidden_layers: 4 # num_attention_heads: 12 # type_vocab_size: 2 lr: 1e-4 vision: name: 'fg-clip-base' args: backbone: 'pointnet++' hidden_size: 768 freeze: True path: 'pretrained_weights/pointnetpp-open-bert' num_attention_heads: 12 spatial_dim: 5 num_layers: 4 dim_loc: 6 dim_feedforward: 2048 attn_type: spatial pairwise_rel_type: 'center' use_matmul_label: False lang_type: 'bert' lang_path: 'pretrained_weights/607_text_embeddings' lr: 1e-4 grounding: name: 'UnifiedSpatialCrossEncoderV2' args: hidden_size: 768 num_attention_heads: 12 num_layers: 4 dim_feedforward: 2048 dim_loc: 6 lr: 1e-4 inter: before heads: head_list: [] pretrain_head: name: 'OVPretrainHead' args: hidden_size: 768 vocab_size: 30522 loss_type: 'ListLoss' loss_list: [ 'WarmUpPM_loss' ] vis_loss_list: [ 'WarmUpPM_loss' ]