File size: 3,054 Bytes
c94c8c9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 | # Experiment general info
name: "OV_MSQA"
rng_seed: 42
num_gpu: 2
mode: "train"
note: ""
# Choose keywords to feature your saving directory
naming_keywords: ["dataloader.batchsize", "task", "note", "time"]
base_dir: "results"
exp_dir: ""
save_frequency: 100
resume: False
debug:
flag: False
hard_debug: False
debug_size: 20
logger:
name: "wandb"
entity: "yem"
# dataset details
data:
train: ['ScanNetMSQA']
val: ['ScanNetMSQA']
test: ['ScanNetMSQA']
args:
max_obj_len: 80
max_seq_len: 50
num_points: 1024
pc_type: 'pred'
sem_type: '607'
filter_lang: False
rot_aug: True
ScanNetMSQA:
train:
use_unanswer: True
val:
use_unanswer: True
test:
use_unanswer: True
use_voxel: False
scan_family_base: "PointMapVerse/existing_datasets/ScanNet"
rscan_base: "PointMapVerse/existing_datasets/3RScan"
# task details: 'pretrain', 'scanrefer', 'referit3d', 'scanqa', 'default'
task: 'MSQA'
data_wrapper:
train: 'ScanFamilyDatasetWrapperQA'
val: 'ScanFamilyDatasetWrapperQA'
test: 'ScanFamilyDatasetWrapperQA'
# Training details
trainer: "DefaultTrainer"
ckpt_path: ""
pretrain_ckpt_path: ""
# dataloader details
dataloader:
# This is a per-gpu batchsize
batchsize: 32
num_workers: 2
balance_dataset: False
filter_empty_annotations: False
solver:
gradient_accumulation_steps: 1
epochs_per_save: 20
epochs_per_eval: 1
lr: 1e-4
grad_norm: 5.0
epochs: 100
optim:
name: "AdamW"
args:
betas: [0.9, 0.98]
sched:
name: "warmup_cosine"
args:
warmup_steps: 5000
eval:
name: "MSQAEval"
save: False
# Model details
model:
name: OpenVocab
language:
# This part could be further optimized to be using
# huggingface yaml config files
name: "BERTLanguageEncoder"
args:
weights: "fg-clip-base"
hidden_size: 768
num_hidden_layers: 4
num_attention_heads: 12
type_vocab_size: 2
lr: 1e-5
vision:
# name: "pointnet_point_encoder"
# args:
# path: None
# freeze: False
name: 'fg-clip-base'
args:
backbone: "SigLIPLanguageEncoder"
hidden_size: 768
freeze: True
path: 'fg-clip-base'
num_attention_heads: 12
spatial_dim: 5
num_layers: 4
dim_loc: 6
dim_feedforward: 2048
attn_type: spatial
pairwise_rel_type: 'center'
use_matmul_label: False
lang_type: 'bert'
lang_path: 'pretrained_weights/607_text_embeddings'
lr: 1e-4
grounding:
name: 'UnifiedSpatialCrossEncoderV2'
args:
hidden_size: 768
num_attention_heads: 12
num_layers: 4
dim_feedforward: 2048
dim_loc: 6
lr: 1e-4
inter: before
heads:
head_list: ["qa_head"]
qa_head:
name: "QAHeadV1"
args:
hidden_size: 768
mlp_size: 256
glimpse: 1
flat_out_size: 512
num_answers: 42654
loss_type: "ListLoss"
loss_list: [
"answer_loss"
]
vis_loss_list: [
"answer_loss"
]
|