Upload checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins
Browse files- checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/wandb/offline-run-20260125_205640-checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins-run0/files/config.yaml +185 -0
- checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/wandb/offline-run-20260125_205640-checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins-run0/files/output.log +184 -0
- checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/wandb/offline-run-20260125_205640-checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins-run0/files/wandb-metadata.json +1 -0
checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/wandb/offline-run-20260125_205640-checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins-run0/files/config.yaml
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
wandb_version: 1
|
| 2 |
+
|
| 3 |
+
_wandb:
|
| 4 |
+
desc: null
|
| 5 |
+
value:
|
| 6 |
+
python_version: 3.11.10
|
| 7 |
+
cli_version: 0.23.1
|
| 8 |
+
framework: huggingface
|
| 9 |
+
huggingface_version: 4.49.0
|
| 10 |
+
is_jupyter_run: false
|
| 11 |
+
is_kaggle_kernel: false
|
| 12 |
+
start_time: 1769374601
|
| 13 |
+
t:
|
| 14 |
+
1:
|
| 15 |
+
- 1
|
| 16 |
+
- 5
|
| 17 |
+
- 11
|
| 18 |
+
- 41
|
| 19 |
+
- 49
|
| 20 |
+
- 53
|
| 21 |
+
- 71
|
| 22 |
+
- 105
|
| 23 |
+
2:
|
| 24 |
+
- 1
|
| 25 |
+
- 5
|
| 26 |
+
- 11
|
| 27 |
+
- 41
|
| 28 |
+
- 49
|
| 29 |
+
- 53
|
| 30 |
+
- 71
|
| 31 |
+
- 105
|
| 32 |
+
3:
|
| 33 |
+
- 4
|
| 34 |
+
- 13
|
| 35 |
+
- 14
|
| 36 |
+
- 37
|
| 37 |
+
- 42
|
| 38 |
+
4: 3.11.10
|
| 39 |
+
5: 0.23.1
|
| 40 |
+
6: 4.49.0
|
| 41 |
+
13: linux-x86_64
|
| 42 |
+
e:
|
| 43 |
+
qivwap4wkbv1rrpyy3tc8ok85jcfa4gf:
|
| 44 |
+
os: Linux-6.6.93+-x86_64-with-glibc2.35
|
| 45 |
+
python: CPython 3.11.10
|
| 46 |
+
started_at: '2026-01-25T20:56:40.641636Z'
|
| 47 |
+
args:
|
| 48 |
+
- --dataset_config_file
|
| 49 |
+
- ./data/configs/vlm_gym_counting_mark_all_train_celoss.yaml
|
| 50 |
+
- --eval_dataset_config_file
|
| 51 |
+
- ./data/configs/vlm_gym_counting_mark_all_eval_celoss.yaml
|
| 52 |
+
- --viz_dataset_config_file
|
| 53 |
+
- ./data/configs/vlm_gym_counting_mark_all_eval_celoss.yaml
|
| 54 |
+
- --inference_hash_file
|
| 55 |
+
- /home/clouduser/Code/Github/launch_new/hashes_test_set_v10.json
|
| 56 |
+
- --task_name
|
| 57 |
+
- counting-mark_all_v5
|
| 58 |
+
- --instructions_dir
|
| 59 |
+
- ./data/instructions
|
| 60 |
+
- --train_data_dir
|
| 61 |
+
- /home/clouduser/Code/data/gym/counting-mark_all_v5/train/
|
| 62 |
+
- --train_jsonl_path
|
| 63 |
+
- /home/clouduser/Code/data/gym/counting-mark_all_v5/train/
|
| 64 |
+
- --eval_data_dir
|
| 65 |
+
- /home/clouduser/Code/data/gym/counting-mark_all_v5/val/
|
| 66 |
+
- --eval_jsonl_path
|
| 67 |
+
- /home/clouduser/Code/data/gym/counting-mark_all_v5/val/
|
| 68 |
+
- --model_path
|
| 69 |
+
- /home/clouduser/Code/Models/BAGEL-7B-MoT
|
| 70 |
+
- --layer_module
|
| 71 |
+
- Qwen2MoTDecoderLayer
|
| 72 |
+
- --max_latent_size
|
| 73 |
+
- '64'
|
| 74 |
+
- --resume-from
|
| 75 |
+
- /home/clouduser/Code/Models/BAGEL-7B-MoT
|
| 76 |
+
- --finetune_from_hf
|
| 77 |
+
- 'True'
|
| 78 |
+
- --auto_resume
|
| 79 |
+
- 'False'
|
| 80 |
+
- --resume-model-only
|
| 81 |
+
- 'True'
|
| 82 |
+
- --finetune-from-ema
|
| 83 |
+
- 'True'
|
| 84 |
+
- --log_every
|
| 85 |
+
- '1'
|
| 86 |
+
- --lr
|
| 87 |
+
- 2e-5
|
| 88 |
+
- --warmup_steps
|
| 89 |
+
- '300'
|
| 90 |
+
- --lr_scheduler
|
| 91 |
+
- cosine
|
| 92 |
+
- --num_worker
|
| 93 |
+
- '1'
|
| 94 |
+
- --expected_num_tokens
|
| 95 |
+
- '30000'
|
| 96 |
+
- --max_num_tokens
|
| 97 |
+
- '30000'
|
| 98 |
+
- --max_num_tokens_per_sample
|
| 99 |
+
- '30000'
|
| 100 |
+
- --visual_und
|
| 101 |
+
- 'True'
|
| 102 |
+
- --save_every
|
| 103 |
+
- '2500'
|
| 104 |
+
- --total_steps
|
| 105 |
+
- '5000'
|
| 106 |
+
- --text_cond_dropout_prob
|
| 107 |
+
- '0.0'
|
| 108 |
+
- --vae_cond_dropout_prob
|
| 109 |
+
- '0.3'
|
| 110 |
+
- --vit_cond_dropout_prob
|
| 111 |
+
- '0.0'
|
| 112 |
+
- --ema
|
| 113 |
+
- '0.993'
|
| 114 |
+
- --checkpoint_dir
|
| 115 |
+
- /dev/shm/models/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins
|
| 116 |
+
- --wandb_project
|
| 117 |
+
- bagel
|
| 118 |
+
- --wandb_name
|
| 119 |
+
- checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins
|
| 120 |
+
- --wandb_dir
|
| 121 |
+
- /dev/shm/models/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins
|
| 122 |
+
- --wandb_offline
|
| 123 |
+
- 'True'
|
| 124 |
+
program: /home/clouduser/Code/Github/unified_world_model/train/pretrain_unified_navit.py
|
| 125 |
+
code_path: train/pretrain_unified_navit.py
|
| 126 |
+
code_path_local: train/pretrain_unified_navit.py
|
| 127 |
+
git:
|
| 128 |
+
remote_url: https://github.com/para-lost/unified_world_model
|
| 129 |
+
commit: 45495bf06d28509bc54cbbda532f4b97404a7d66
|
| 130 |
+
root: /dev/shm/models/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins
|
| 131 |
+
host: junyizhang-launch-new-225900672-1-0
|
| 132 |
+
executable: /opt/conda/bin/python3.11
|
| 133 |
+
cpu_count: 48
|
| 134 |
+
cpu_count_logical: 96
|
| 135 |
+
gpu_type: NVIDIA A100-SXM4-80GB
|
| 136 |
+
gpu_count: 8
|
| 137 |
+
disk:
|
| 138 |
+
/:
|
| 139 |
+
total: '1052461830144'
|
| 140 |
+
used: '354507616256'
|
| 141 |
+
memory:
|
| 142 |
+
total: '1437332611072'
|
| 143 |
+
gpu_nvidia:
|
| 144 |
+
- name: NVIDIA A100-SXM4-80GB
|
| 145 |
+
memory_total: '85899345920'
|
| 146 |
+
cuda_cores: 6912
|
| 147 |
+
architecture: Ampere
|
| 148 |
+
uuid: GPU-27013fed-9784-d445-a1eb-01629cf403cc
|
| 149 |
+
- name: NVIDIA A100-SXM4-80GB
|
| 150 |
+
memory_total: '85899345920'
|
| 151 |
+
cuda_cores: 6912
|
| 152 |
+
architecture: Ampere
|
| 153 |
+
uuid: GPU-c4922cf6-bc87-9458-c12f-23210cb43686
|
| 154 |
+
- name: NVIDIA A100-SXM4-80GB
|
| 155 |
+
memory_total: '85899345920'
|
| 156 |
+
cuda_cores: 6912
|
| 157 |
+
architecture: Ampere
|
| 158 |
+
uuid: GPU-1af9405a-c062-486e-383f-7ea6c6ef5158
|
| 159 |
+
- name: NVIDIA A100-SXM4-80GB
|
| 160 |
+
memory_total: '85899345920'
|
| 161 |
+
cuda_cores: 6912
|
| 162 |
+
architecture: Ampere
|
| 163 |
+
uuid: GPU-793b7211-7436-7429-8bd7-cc05be70cc75
|
| 164 |
+
- name: NVIDIA A100-SXM4-80GB
|
| 165 |
+
memory_total: '85899345920'
|
| 166 |
+
cuda_cores: 6912
|
| 167 |
+
architecture: Ampere
|
| 168 |
+
uuid: GPU-5eb44009-8d7d-911d-0730-f219cb50498c
|
| 169 |
+
- name: NVIDIA A100-SXM4-80GB
|
| 170 |
+
memory_total: '85899345920'
|
| 171 |
+
cuda_cores: 6912
|
| 172 |
+
architecture: Ampere
|
| 173 |
+
uuid: GPU-62c85054-47c8-b915-18e9-e4433fc0f9bb
|
| 174 |
+
- name: NVIDIA A100-SXM4-80GB
|
| 175 |
+
memory_total: '85899345920'
|
| 176 |
+
cuda_cores: 6912
|
| 177 |
+
architecture: Ampere
|
| 178 |
+
uuid: GPU-c3b59f2c-b6b6-7730-54ff-8cf5fee4ea9c
|
| 179 |
+
- name: NVIDIA A100-SXM4-80GB
|
| 180 |
+
memory_total: '85899345920'
|
| 181 |
+
cuda_cores: 6912
|
| 182 |
+
architecture: Ampere
|
| 183 |
+
uuid: GPU-e988baaf-6bc5-3bb9-91fb-ab2cb214233d
|
| 184 |
+
cuda_version: '12.2'
|
| 185 |
+
writer_id: qivwap4wkbv1rrpyy3tc8ok85jcfa4gf
|
checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/wandb/offline-run-20260125_205640-checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins-run0/files/output.log
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
wandb: Detected [huggingface_hub.inference] in use.
|
| 2 |
+
wandb: Use W&B Weave for improved LLM call tracing. Install Weave with `pip install weave` then add `import weave` to the top of your script.
|
| 3 |
+
wandb: For more information, check out the docs at: https://weave-docs.wandb.ai/
|
| 4 |
+
[[34m2026-01-25 20:56:47[39m] Training arguments TrainingArguments(visual_gen=True, visual_und=True, results_dir='results', checkpoint_dir='/dev/shm/models/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins', wandb_project='bagel', wandb_name='checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins', wandb_runid='0', wandb_resume='allow', wandb_offline=True, wandb_dir='/dev/shm/models/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins', global_seed=4396, auto_resume=False, resume_from='/home/clouduser/Code/Models/BAGEL-7B-MoT', resume_model_only=True, finetune_from_ema=True, finetune_from_hf=True, log_every=1, save_every=2500, total_steps=5000, warmup_steps=300, lr_scheduler='cosine', lr=2e-05, min_lr=1e-07, beta1=0.9, beta2=0.95, eps=1e-15, ema=0.993, max_grad_norm=1.0, timestep_shift=1.0, mse_weight=1.0, ce_weight=1.0, ce_loss_reweighting=False, expected_num_tokens=30000, num_replicate=1, num_shard=8, sharding_strategy='HYBRID_SHARD', backward_prefetch='BACKWARD_PRE', cpu_offload=False, freeze_llm=False, freeze_vit=False, freeze_vae=True, freeze_und=False, copy_init_moe=True, use_flex=False, eval_every=500, num_eval_batches=20, use_ema_for_eval=True, eval_log_dir=None, eval_run_tag='', viz_every=500, viz_n=8, viz_outdir='results/viz', eval_dataset_config_file='./data/configs/vlm_gym_counting_mark_all_eval_celoss.yaml', viz_dataset_config_file='./data/configs/vlm_gym_counting_mark_all_eval_celoss.yaml', eval_print_n=3, save_ema_only=True, save_optimizer=False)
|
| 5 |
+
[[34m2026-01-25 20:56:47[39m] Model arguments ModelArguments(model_path='/home/clouduser/Code/Models/BAGEL-7B-MoT', llm_path='hf/Qwen2.5-0.5B-Instruct/', llm_qk_norm=True, tie_word_embeddings=False, layer_module='Qwen2MoTDecoderLayer', vae_path='flux/vae/ae.safetensors', vit_path='hf/siglip-so400m-14-980-flash-attn2-navit/', max_latent_size=64, latent_patch_size=2, vit_patch_size=14, vit_max_num_patch_per_side=70, connector_act='gelu_pytorch_tanh', interpolate_pos=False, vit_select_layer=-2, vit_rope=False, text_cond_dropout_prob=0.0, vae_cond_dropout_prob=0.3, vit_cond_dropout_prob=0.0)
|
| 6 |
+
[[34m2026-01-25 20:56:47[39m] Data arguments DataArguments(dataset_config_file='./data/configs/vlm_gym_counting_mark_all_train_celoss.yaml', train_data_dir='/home/clouduser/Code/data/gym/counting-mark_all_v5/train/', train_jsonl_path='/home/clouduser/Code/data/gym/counting-mark_all_v5/train/', eval_data_dir='/home/clouduser/Code/data/gym/counting-mark_all_v5/val/', eval_jsonl_path='/home/clouduser/Code/data/gym/counting-mark_all_v5/val/', inference_hash_file='/home/clouduser/Code/Github/launch_new/hashes_test_set_v10.json', task_name='counting-mark_all_v5', instructions_dir='./data/instructions', prefetch_factor=2, num_workers=1, max_num_tokens_per_sample=30000, max_num_tokens=30000, prefer_buffer_before=16384, max_buffer_size=50, data_seed=42)
|
| 7 |
+
[[34m2026-01-25 21:01:13[39m] Loading checkpoint from /home/clouduser/Code/Models/BAGEL-7B-MoT.
|
| 8 |
+
[[34m2026-01-25 21:01:25[39m] _IncompatibleKeys(missing_keys=['latent_pos_embed.pos_embed'], unexpected_keys=[])
|
| 9 |
+
[[34m2026-01-25 21:01:38[39m] _IncompatibleKeys(missing_keys=['latent_pos_embed.pos_embed'], unexpected_keys=[])
|
| 10 |
+
[[34m2026-01-25 21:02:29[39m] Training for 5000 steps, starting at 0...
|
| 11 |
+
FullyShardedDataParallel(
|
| 12 |
+
(_fsdp_wrapped_module): Bagel(
|
| 13 |
+
(language_model): Qwen2ForCausalLM(
|
| 14 |
+
(model): Qwen2Model(
|
| 15 |
+
(embed_tokens): Embedding(152064, 3584)
|
| 16 |
+
(layers): ModuleList(
|
| 17 |
+
(0-27): 28 x FullyShardedDataParallel(
|
| 18 |
+
(_fsdp_wrapped_module): CheckpointWrapper(
|
| 19 |
+
(_checkpoint_wrapped_module): Qwen2MoTDecoderLayer(
|
| 20 |
+
(self_attn): PackedAttentionMoT(
|
| 21 |
+
(q_proj): Linear(in_features=3584, out_features=3584, bias=True)
|
| 22 |
+
(k_proj): Linear(in_features=3584, out_features=512, bias=True)
|
| 23 |
+
(v_proj): Linear(in_features=3584, out_features=512, bias=True)
|
| 24 |
+
(o_proj): Linear(in_features=3584, out_features=3584, bias=False)
|
| 25 |
+
(q_norm): Qwen2RMSNorm((128,), eps=1e-06)
|
| 26 |
+
(k_norm): Qwen2RMSNorm((128,), eps=1e-06)
|
| 27 |
+
(q_norm_moe_gen): Qwen2RMSNorm((128,), eps=1e-06)
|
| 28 |
+
(k_norm_moe_gen): Qwen2RMSNorm((128,), eps=1e-06)
|
| 29 |
+
(q_proj_moe_gen): Linear(in_features=3584, out_features=3584, bias=True)
|
| 30 |
+
(k_proj_moe_gen): Linear(in_features=3584, out_features=512, bias=True)
|
| 31 |
+
(v_proj_moe_gen): Linear(in_features=3584, out_features=512, bias=True)
|
| 32 |
+
(o_proj_moe_gen): Linear(in_features=3584, out_features=3584, bias=False)
|
| 33 |
+
)
|
| 34 |
+
(mlp): Qwen2MLP(
|
| 35 |
+
(gate_proj): Linear(in_features=3584, out_features=18944, bias=False)
|
| 36 |
+
(up_proj): Linear(in_features=3584, out_features=18944, bias=False)
|
| 37 |
+
(down_proj): Linear(in_features=18944, out_features=3584, bias=False)
|
| 38 |
+
(act_fn): SiLU()
|
| 39 |
+
)
|
| 40 |
+
(mlp_moe_gen): Qwen2MLP(
|
| 41 |
+
(gate_proj): Linear(in_features=3584, out_features=18944, bias=False)
|
| 42 |
+
(up_proj): Linear(in_features=3584, out_features=18944, bias=False)
|
| 43 |
+
(down_proj): Linear(in_features=18944, out_features=3584, bias=False)
|
| 44 |
+
(act_fn): SiLU()
|
| 45 |
+
)
|
| 46 |
+
(input_layernorm): Qwen2RMSNorm((3584,), eps=1e-06)
|
| 47 |
+
(input_layernorm_moe_gen): Qwen2RMSNorm((3584,), eps=1e-06)
|
| 48 |
+
(post_attention_layernorm): Qwen2RMSNorm((3584,), eps=1e-06)
|
| 49 |
+
(post_attention_layernorm_moe_gen): Qwen2RMSNorm((3584,), eps=1e-06)
|
| 50 |
+
)
|
| 51 |
+
)
|
| 52 |
+
)
|
| 53 |
+
)
|
| 54 |
+
(norm): Qwen2RMSNorm((3584,), eps=1e-06)
|
| 55 |
+
(norm_moe_gen): Qwen2RMSNorm((3584,), eps=1e-06)
|
| 56 |
+
(rotary_emb): Qwen2RotaryEmbedding()
|
| 57 |
+
)
|
| 58 |
+
(lm_head): Linear(in_features=3584, out_features=152064, bias=False)
|
| 59 |
+
)
|
| 60 |
+
(time_embedder): FullyShardedDataParallel(
|
| 61 |
+
(_fsdp_wrapped_module): TimestepEmbedder(
|
| 62 |
+
(mlp): Sequential(
|
| 63 |
+
(0): Linear(in_features=256, out_features=3584, bias=True)
|
| 64 |
+
(1): SiLU()
|
| 65 |
+
(2): Linear(in_features=3584, out_features=3584, bias=True)
|
| 66 |
+
)
|
| 67 |
+
)
|
| 68 |
+
)
|
| 69 |
+
(vae2llm): Linear(in_features=64, out_features=3584, bias=True)
|
| 70 |
+
(llm2vae): Linear(in_features=3584, out_features=64, bias=True)
|
| 71 |
+
(latent_pos_embed): FullyShardedDataParallel(
|
| 72 |
+
(_fsdp_wrapped_module): PositionEmbedding()
|
| 73 |
+
)
|
| 74 |
+
(vit_model): SiglipVisionModel(
|
| 75 |
+
(vision_model): FullyShardedDataParallel(
|
| 76 |
+
(_fsdp_wrapped_module): SiglipVisionTransformer(
|
| 77 |
+
(embeddings): SiglipVisionEmbeddings(
|
| 78 |
+
(position_embedding): Embedding(4900, 1152)
|
| 79 |
+
(patch_embedding): Linear(in_features=588, out_features=1152, bias=True)
|
| 80 |
+
)
|
| 81 |
+
(encoder): SiglipEncoder(
|
| 82 |
+
(layers): ModuleList(
|
| 83 |
+
(0-25): 26 x FullyShardedDataParallel(
|
| 84 |
+
(_fsdp_wrapped_module): CheckpointWrapper(
|
| 85 |
+
(_checkpoint_wrapped_module): SiglipEncoderLayer(
|
| 86 |
+
(self_attn): SiglipFlashAttention2(
|
| 87 |
+
(k_proj): Linear(in_features=1152, out_features=1152, bias=True)
|
| 88 |
+
(v_proj): Linear(in_features=1152, out_features=1152, bias=True)
|
| 89 |
+
(q_proj): Linear(in_features=1152, out_features=1152, bias=True)
|
| 90 |
+
(out_proj): Linear(in_features=1152, out_features=1152, bias=True)
|
| 91 |
+
)
|
| 92 |
+
(layer_norm1): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)
|
| 93 |
+
(mlp): SiglipMLP(
|
| 94 |
+
(activation_fn): PytorchGELUTanh()
|
| 95 |
+
(fc1): Linear(in_features=1152, out_features=4304, bias=True)
|
| 96 |
+
(fc2): Linear(in_features=4304, out_features=1152, bias=True)
|
| 97 |
+
)
|
| 98 |
+
(layer_norm2): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)
|
| 99 |
+
)
|
| 100 |
+
)
|
| 101 |
+
)
|
| 102 |
+
)
|
| 103 |
+
)
|
| 104 |
+
(post_layernorm): LayerNorm((1152,), eps=1e-06, elementwise_affine=True)
|
| 105 |
+
)
|
| 106 |
+
)
|
| 107 |
+
)
|
| 108 |
+
(connector): FullyShardedDataParallel(
|
| 109 |
+
(_fsdp_wrapped_module): CheckpointWrapper(
|
| 110 |
+
(_checkpoint_wrapped_module): MLPconnector(
|
| 111 |
+
(activation_fn): PytorchGELUTanh()
|
| 112 |
+
(fc1): Linear(in_features=1152, out_features=3584, bias=True)
|
| 113 |
+
(fc2): Linear(in_features=3584, out_features=3584, bias=True)
|
| 114 |
+
)
|
| 115 |
+
)
|
| 116 |
+
)
|
| 117 |
+
(vit_pos_embed): FullyShardedDataParallel(
|
| 118 |
+
(_fsdp_wrapped_module): PositionEmbedding()
|
| 119 |
+
)
|
| 120 |
+
)
|
| 121 |
+
)
|
| 122 |
+
_flat_param True
|
| 123 |
+
language_model.model.layers.0._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 124 |
+
language_model.model.layers.1._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 125 |
+
language_model.model.layers.2._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 126 |
+
language_model.model.layers.3._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 127 |
+
language_model.model.layers.4._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 128 |
+
language_model.model.layers.5._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 129 |
+
language_model.model.layers.6._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 130 |
+
language_model.model.layers.7._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 131 |
+
language_model.model.layers.8._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 132 |
+
language_model.model.layers.9._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 133 |
+
language_model.model.layers.10._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 134 |
+
language_model.model.layers.11._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 135 |
+
language_model.model.layers.12._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 136 |
+
language_model.model.layers.13._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 137 |
+
language_model.model.layers.14._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 138 |
+
language_model.model.layers.15._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 139 |
+
language_model.model.layers.16._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 140 |
+
language_model.model.layers.17._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 141 |
+
language_model.model.layers.18._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 142 |
+
language_model.model.layers.19._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 143 |
+
language_model.model.layers.20._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 144 |
+
language_model.model.layers.21._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 145 |
+
language_model.model.layers.22._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 146 |
+
language_model.model.layers.23._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 147 |
+
language_model.model.layers.24._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 148 |
+
language_model.model.layers.25._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 149 |
+
language_model.model.layers.26._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 150 |
+
language_model.model.layers.27._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 151 |
+
time_embedder._fsdp_wrapped_module._flat_param True
|
| 152 |
+
latent_pos_embed._fsdp_wrapped_module._flat_param False
|
| 153 |
+
vit_model.vision_model._fsdp_wrapped_module._flat_param True
|
| 154 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.0._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 155 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.1._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 156 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.2._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 157 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.3._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 158 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.4._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 159 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.5._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 160 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.6._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 161 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.7._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 162 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.8._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 163 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.9._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 164 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.10._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 165 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.11._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 166 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.12._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 167 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.13._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 168 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.14._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 169 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.15._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 170 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.16._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 171 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.17._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 172 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.18._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 173 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.19._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 174 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.20._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 175 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.21._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 176 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.22._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 177 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.23._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 178 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.24._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 179 |
+
vit_model.vision_model._fsdp_wrapped_module.encoder.layers.25._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 180 |
+
connector._fsdp_wrapped_module._checkpoint_wrapped_module._flat_param True
|
| 181 |
+
vit_pos_embed._fsdp_wrapped_module._flat_param False
|
| 182 |
+
Preparing Dataset vlm_gym_counting_mark_all_celoss/vlm_gym_counting_mark_all_train
|
| 183 |
+
base_dir is /dev/shm/models/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/eval_used_rows, step_tag is checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins_step0
|
| 184 |
+
Preparing Dataset vlm_gym_counting_mark_all_celoss_evalonce/vlm_gym_counting_mark_all_val
|
checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins/wandb/offline-run-20260125_205640-checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins-run0/files/wandb-metadata.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"os": "Linux-6.6.93+-x86_64-with-glibc2.35", "python": "CPython 3.11.10", "started_at": "2026-01-25T20:56:40.641636Z", "args": ["--dataset_config_file", "./data/configs/vlm_gym_counting_mark_all_train_celoss.yaml", "--eval_dataset_config_file", "./data/configs/vlm_gym_counting_mark_all_eval_celoss.yaml", "--viz_dataset_config_file", "./data/configs/vlm_gym_counting_mark_all_eval_celoss.yaml", "--inference_hash_file", "/home/clouduser/Code/Github/launch_new/hashes_test_set_v10.json", "--task_name", "counting-mark_all_v5", "--instructions_dir", "./data/instructions", "--train_data_dir", "/home/clouduser/Code/data/gym/counting-mark_all_v5/train/", "--train_jsonl_path", "/home/clouduser/Code/data/gym/counting-mark_all_v5/train/", "--eval_data_dir", "/home/clouduser/Code/data/gym/counting-mark_all_v5/val/", "--eval_jsonl_path", "/home/clouduser/Code/data/gym/counting-mark_all_v5/val/", "--model_path", "/home/clouduser/Code/Models/BAGEL-7B-MoT", "--layer_module", "Qwen2MoTDecoderLayer", "--max_latent_size", "64", "--resume-from", "/home/clouduser/Code/Models/BAGEL-7B-MoT", "--finetune_from_hf", "True", "--auto_resume", "False", "--resume-model-only", "True", "--finetune-from-ema", "True", "--log_every", "1", "--lr", "2e-5", "--warmup_steps", "300", "--lr_scheduler", "cosine", "--num_worker", "1", "--expected_num_tokens", "30000", "--max_num_tokens", "30000", "--max_num_tokens_per_sample", "30000", "--visual_und", "True", "--save_every", "2500", "--total_steps", "5000", "--text_cond_dropout_prob", "0.0", "--vae_cond_dropout_prob", "0.3", "--vit_cond_dropout_prob", "0.0", "--ema", "0.993", "--checkpoint_dir", "/dev/shm/models/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins", "--wandb_project", "bagel", "--wandb_name", "checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins", "--wandb_dir", "/dev/shm/models/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins", "--wandb_offline", "True"], "program": "/home/clouduser/Code/Github/unified_world_model/train/pretrain_unified_navit.py", "code_path": "train/pretrain_unified_navit.py", "code_path_local": "train/pretrain_unified_navit.py", "git": {"remote_url": "https://github.com/para-lost/unified_world_model", "commit": "45495bf06d28509bc54cbbda532f4b97404a7d66"}, "root": "/dev/shm/models/checkpoints_vlm_gym_counting_mark_all_one_image_lr2e_5_ce_ins", "host": "junyizhang-launch-new-225900672-1-0", "executable": "/opt/conda/bin/python3.11", "cpu_count": 48, "cpu_count_logical": 96, "gpu_type": "NVIDIA A100-SXM4-80GB", "gpu_count": 8, "disk": {"/": {"total": "1052461830144", "used": "354507616256"}}, "memory": {"total": "1437332611072"}, "gpu_nvidia": [{"name": "NVIDIA A100-SXM4-80GB", "memory_total": "85899345920", "cuda_cores": 6912, "architecture": "Ampere", "uuid": "GPU-27013fed-9784-d445-a1eb-01629cf403cc"}, {"name": "NVIDIA A100-SXM4-80GB", "memory_total": "85899345920", "cuda_cores": 6912, "architecture": "Ampere", "uuid": "GPU-c4922cf6-bc87-9458-c12f-23210cb43686"}, {"name": "NVIDIA A100-SXM4-80GB", "memory_total": "85899345920", "cuda_cores": 6912, "architecture": "Ampere", "uuid": "GPU-1af9405a-c062-486e-383f-7ea6c6ef5158"}, {"name": "NVIDIA A100-SXM4-80GB", "memory_total": "85899345920", "cuda_cores": 6912, "architecture": "Ampere", "uuid": "GPU-793b7211-7436-7429-8bd7-cc05be70cc75"}, {"name": "NVIDIA A100-SXM4-80GB", "memory_total": "85899345920", "cuda_cores": 6912, "architecture": "Ampere", "uuid": "GPU-5eb44009-8d7d-911d-0730-f219cb50498c"}, {"name": "NVIDIA A100-SXM4-80GB", "memory_total": "85899345920", "cuda_cores": 6912, "architecture": "Ampere", "uuid": "GPU-62c85054-47c8-b915-18e9-e4433fc0f9bb"}, {"name": "NVIDIA A100-SXM4-80GB", "memory_total": "85899345920", "cuda_cores": 6912, "architecture": "Ampere", "uuid": "GPU-c3b59f2c-b6b6-7730-54ff-8cf5fee4ea9c"}, {"name": "NVIDIA A100-SXM4-80GB", "memory_total": "85899345920", "cuda_cores": 6912, "architecture": "Ampere", "uuid": "GPU-e988baaf-6bc5-3bb9-91fb-ab2cb214233d"}], "cuda_version": "12.2", "writer_id": "qivwap4wkbv1rrpyy3tc8ok85jcfa4gf"}
|