File size: 2,385 Bytes
c94c8c9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
###
# Pretrain on all data with all losses
###

# Experiment general info
name: "FinalOVPretrain"
rng_seed: 42
num_gpu: 1
mode: "pretrain"
note: ""
num_views: 32
# Choose keywords to feature your saving directory
naming_keywords: ["dataloader.batchsize", "task", "note", "time"]
base_dir: "results"
exp_dir: ""
save_frequency: 10

resume: False

debug:
  flag: False
  debug_size: 20
  hard_debug: False

logger:
  name: "wandb"
  entity: "yem"
  project: "3DReason"

# dataset details
data:
  note: "all"
  # warmup: ['ScanNetSpatialRefer']
  # pretrain: ['RScanSpatialRefer', 'ARKitSceneSpatialRefer']
  pretrain: ['ScanNetSpatialRefer']
  args:
    use_scene_cap: True
    max_scene_cap_len: 600
  ScanNetSpatialRefer:
    pretrain:
      sources: ['scannet_view_cap', 'ssg_ref_total_by_view_full']
      # sgcaption:
      #   anno_type: ['gpt', 'template']
  lsunSpatialRefer:
    warmup:
      sources: ['lsun_view_cap']
  RScanSpatialRefer:
    pretrain:
      sources: ['3rscan_view_cap']
  ARKitSceneSpatialRefer:
    pretrain:
      sources: ['arkitscenes_view_cap']
  scan_family_base: "../PointMapVerse/existing_datasets/ScanNet"
  lsun_base: "../PointMapVerse/existing_datasets/lsun"
  rscan_base: "../PointMapVerse/existing_datasets/3RScan"
  arkitscene_base: '../PointMapVerse/existing_datasets/Arkitscenes'

# task details: 'Pretrain', 'scanqa', 'spatialrefer'
task: 'Pretrain'
data_wrapper:
  pretrain: 'SceneDatasetWrapper'

# Training details
trainer: "OpenVocabTrainer"
ckpt_path: ""
pretrain_ckpt_path: ""
dataloader:
  batchsize: 16
  num_workers: 2
  balance_dataset: False
  filter_empty_annotations: False

solver:
  gradient_accumulation_steps: 1
  epochs_per_save: 5
  epochs_per_eval: 5
  lr: 1e-4
  grad_norm: 5.0
  warmup_epochs: 300
  epochs: 20
  optim:
    name: 'AdamW'
    args:
      betas: [0.9, 0.98]
      weight_decay: 0.05
  sched:
    name: 'warmup_cosine'
    args:
      warmup_steps: 500
      minimum_ratio: 0.1
eval:
  train:
    name: 'PretrainEval'
  val:
    name: 'ScanReferEval'
  save: False


# Model details
model:
  name: OpenVocab
  vision:
    name: 'fg-clip-base'
    lr: 1e-4
  heads:
    head_list: []
    pretrain_head:
      name: 'OVPretrainHead'
      args:
        hidden_size: 768
        vocab_size: 30522
  loss_type: 'ListLoss'
  loss_list: [
    'SceneViewPM_loss'
  ]
  vis_loss_list: [
    'SceneViewPM_loss'
  ]