# Experiment general info name: "OV_ScanQA" rng_seed: 1234 num_gpu: 1 mode: "train" note: "" # Choose keywords to feature your saving directory naming_keywords: ["dataloader.batchsize", "task", "note", "time"] base_dir: "results" exp_dir: "" save_frequency: 100 resume: False debug: flag: False hard_debug: False debug_size: 20 logger: name: "wandb" entity: "yem" # dataset details data: train: ['ScanNetScanQAOld'] val: ['ScanNetScanQAOld'] test: ['ScanNetScanQAOld'] args: max_obj_len: 80 max_seq_len: 50 num_points: 1024 pc_type: 'pred' sem_type: '607' filter_lang: False rot_aug: True ScanNetScanQAOld: train: use_unanswer: True val: use_unanswer: True test: use_unanswer: True test_file: "test_w_obj" # or "test_wo_obj" use_voxel: False scan_family_base: "PointMapVerse/existing_datasets/ScanNet" rscan_base: "PointMapVerse/existing_datasets/3RScan" # task details: 'pretrain', ' scanrefer', 'referit3d', 'scanqa', 'default' task: 'ScanQA' data_wrapper: train: 'ScanFamilyDatasetWrapperQA' val: 'ScanFamilyDatasetWrapperQA' test: 'ScanFamilyDatasetWrapperQA' # Training details trainer: "DefaultTrainer" ckpt_path: "" pretrain_ckpt_path: "/home/m50048399/transfered/ye_project/UniPointMap/results/scanqa_sft_run1_b64_ScanQA_ScanNetScanQAOld_scanqa_sft_run1/2025-10-08-14:59:55.120964/ckpt/best.pth" # dataloader details dataloader: # This is a per-gpu batchsize batchsize: 64 num_workers: 2 balance_dataset: False filter_empty_annotations: False solver: gradient_accumulation_steps: 1 epochs_per_save: 100 epochs_per_eval: 1 lr: 1e-4 grad_norm: 5.0 epochs: 100 optim: name: "AdamW" args: betas: [0.9, 0.98] sched: name: "warmup_cosine" args: warmup_steps: 5000 eval: name: "ScanQAEval" save: False # Model details model: name: OpenVocab language: # This part could be further optimized to be using # huggingface yaml config files name: "BERTLanguageEncoder" args: weights: "fg-clip-base" hidden_size: 768 num_hidden_layers: 4 num_attention_heads: 12 type_vocab_size: 2 lr: 1e-4 vision: # name: "pointnet_point_encoder" # args: # path: None # freeze: False name: 'fg-clip-base' args: backbone: "pointnet++" hidden_size: 768 freeze: True path: 'fg-clip-base' num_attention_heads: 12 spatial_dim: 5 num_layers: 4 dim_loc: 6 dim_feedforward: 2048 attn_type: spatial pairwise_rel_type: 'center' use_matmul_label: False lang_type: 'bert' lang_path: 'pretrained_weights/607_text_embeddings' lr: 1e-4 grounding: name: 'UnifiedSpatialCrossEncoderV2' args: hidden_size: 1024 num_attention_heads: 16 num_layers: 4 dim_feedforward: 2048 dim_loc: 6 lr: 1e-4 inter: before heads: head_list: ["qa_head"] qa_head: name: "QAHeadV1" args: hidden_size: 768 mlp_size: 256 glimpse: 1 flat_out_size: 512 num_answers: 8864 loss_type: "ListLoss" loss_list: [ "answer_loss", # 'TextObjWithinBatch', ] vis_loss_list: [ "answer_loss", # 'TextObjWithinBatch', ]