backup / configs /final /finetune /msqa_finetune.yaml
MatchLab's picture
Upload folder using huggingface_hub
c94c8c9 verified
# Experiment general info
name: "OV_MSQA"
rng_seed: 42
num_gpu: 2
mode: "train"
note: ""
# Choose keywords to feature your saving directory
naming_keywords: ["dataloader.batchsize", "task", "note", "time"]
base_dir: "results"
exp_dir: ""
save_frequency: 100
resume: False
debug:
flag: False
hard_debug: False
debug_size: 20
logger:
name: "wandb"
entity: "yem"
# dataset details
data:
train: ['ScanNetMSQA']
val: ['ScanNetMSQA']
test: ['ScanNetMSQA']
args:
max_obj_len: 80
max_seq_len: 50
num_points: 1024
pc_type: 'pred'
sem_type: '607'
filter_lang: False
rot_aug: True
ScanNetMSQA:
train:
use_unanswer: True
val:
use_unanswer: True
test:
use_unanswer: True
use_voxel: False
scan_family_base: "PointMapVerse/existing_datasets/ScanNet"
rscan_base: "PointMapVerse/existing_datasets/3RScan"
# task details: 'pretrain', 'scanrefer', 'referit3d', 'scanqa', 'default'
task: 'MSQA'
data_wrapper:
train: 'ScanFamilyDatasetWrapperQA'
val: 'ScanFamilyDatasetWrapperQA'
test: 'ScanFamilyDatasetWrapperQA'
# Training details
trainer: "DefaultTrainer"
ckpt_path: ""
pretrain_ckpt_path: ""
# dataloader details
dataloader:
# This is a per-gpu batchsize
batchsize: 32
num_workers: 2
balance_dataset: False
filter_empty_annotations: False
solver:
gradient_accumulation_steps: 1
epochs_per_save: 20
epochs_per_eval: 1
lr: 1e-4
grad_norm: 5.0
epochs: 100
optim:
name: "AdamW"
args:
betas: [0.9, 0.98]
sched:
name: "warmup_cosine"
args:
warmup_steps: 5000
eval:
name: "MSQAEval"
save: False
# Model details
model:
name: OpenVocab
language:
# This part could be further optimized to be using
# huggingface yaml config files
name: "BERTLanguageEncoder"
args:
weights: "fg-clip-base"
hidden_size: 768
num_hidden_layers: 4
num_attention_heads: 12
type_vocab_size: 2
lr: 1e-5
vision:
# name: "pointnet_point_encoder"
# args:
# path: None
# freeze: False
name: 'fg-clip-base'
args:
backbone: "SigLIPLanguageEncoder"
hidden_size: 768
freeze: True
path: 'fg-clip-base'
num_attention_heads: 12
spatial_dim: 5
num_layers: 4
dim_loc: 6
dim_feedforward: 2048
attn_type: spatial
pairwise_rel_type: 'center'
use_matmul_label: False
lang_type: 'bert'
lang_path: 'pretrained_weights/607_text_embeddings'
lr: 1e-4
grounding:
name: 'UnifiedSpatialCrossEncoderV2'
args:
hidden_size: 768
num_attention_heads: 12
num_layers: 4
dim_feedforward: 2048
dim_loc: 6
lr: 1e-4
inter: before
heads:
head_list: ["qa_head"]
qa_head:
name: "QAHeadV1"
args:
hidden_size: 768
mlp_size: 256
glimpse: 1
flat_out_size: 512
num_answers: 42654
loss_type: "ListLoss"
loss_list: [
"answer_loss"
]
vis_loss_list: [
"answer_loss"
]