Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +71 -72
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/.hydra/config.yaml +124 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/.hydra/hydra.yaml +159 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/.hydra/overrides.yaml +3 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/epoch=0525-val_loss=2.274.ckpt +3 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/epoch=0618-val_loss=2.264.ckpt +3 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/epoch=0795-val_loss=2.277.ckpt +3 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/epoch=0805-val_loss=2.265.ckpt +3 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/epoch=0989-val_loss=2.290.ckpt +3 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/latest.ckpt +3 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/logs.json.txt +3 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/train_mlp_projector_workspace.log +0 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/debug-internal.log +96 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/debug.log +27 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/files/config.yaml +228 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/files/output.log +1 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/files/wandb-metadata.json +54 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/files/wandb-summary.json +1 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug-core.log +14 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug-internal.log +96 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug.log +27 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/run-zfajcmpj.wandb +3 -0
- 2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/wandb-resume.json +1 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/.hydra/config.yaml +116 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/.hydra/hydra.yaml +156 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/.hydra/overrides.yaml +1 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/README.md +202 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/adapter_config.json +37 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/adapter_model.safetensors +3 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/README.md +202 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/adapter_config.json +37 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/adapter_model.safetensors +3 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/config.json +43 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/merges.txt +0 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/mlp_projector.bin +3 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/optimizer.pt +3 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/rng_state.pth +3 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/scheduler.pt +3 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/special_tokens_map.json +34 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/tokenizer.json +0 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/tokenizer_config.json +155 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/trainer_state.json +0 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/training_args.bin +3 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/vocab.json +0 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/normalizer.pt +3 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/train_llm_workspace.log +5 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/debug-internal.log +19 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/debug.log +35 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/run-20250814_004704-1q0db7hd/files/config.yaml +748 -0
- 2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/run-20250814_004704-1q0db7hd/files/output.log +0 -0
.gitattributes
CHANGED
|
@@ -318,75 +318,74 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 318 |
2025.07.09/16.45.24_train_llm_lowdim_sweep-v2/wandb/run-20250709_164525-t40cau46/run-t40cau46.wandb filter=lfs diff=lfs merge=lfs -text
|
| 319 |
2025.08.14/00.39.25_train_llm_lowdim_sweep-v2/wandb/run-20250814_003926-gdya2cav/run-gdya2cav.wandb filter=lfs diff=lfs merge=lfs -text
|
| 320 |
2025.07.17/03.07.22_train_llm_lowdim_sweep-v2/wandb/run-20250717_030723-n8ug3x9r/run-n8ug3x9r.wandb filter=lfs diff=lfs merge=lfs -text
|
| 321 |
-
2025.
|
| 322 |
-
2025.
|
| 323 |
-
2025.
|
| 324 |
-
2025.
|
| 325 |
-
2025.
|
| 326 |
-
2025.
|
| 327 |
-
2025.
|
| 328 |
-
2025.
|
| 329 |
-
2025.
|
| 330 |
-
2025.
|
| 331 |
-
2025.09.
|
| 332 |
-
2025.09.
|
| 333 |
-
2025.
|
| 334 |
-
2025.09.
|
| 335 |
-
2025.09.
|
| 336 |
-
2025.09.
|
| 337 |
-
2025.09.
|
| 338 |
-
2025.09.
|
| 339 |
-
2025.09.
|
| 340 |
-
2025.09.
|
| 341 |
-
2025.09.
|
| 342 |
-
2025.09.
|
| 343 |
-
2025.09.
|
| 344 |
-
2025.09.
|
| 345 |
-
2025.09.
|
| 346 |
-
2025.09.
|
| 347 |
-
2025.09.
|
| 348 |
-
2025.09.
|
| 349 |
-
2025.09.
|
| 350 |
-
2025.09.
|
| 351 |
-
2025.09.
|
| 352 |
-
2025.09.
|
| 353 |
-
2025.09.
|
| 354 |
-
2025.09.
|
| 355 |
-
2025.09.
|
| 356 |
-
2025.09.
|
| 357 |
-
2025.09.
|
| 358 |
-
2025.09.
|
| 359 |
-
2025.09.
|
| 360 |
-
2025.09.
|
| 361 |
-
2025.09.
|
| 362 |
-
2025.09.
|
| 363 |
-
2025.09.
|
| 364 |
-
2025.09.
|
| 365 |
-
2025.09.
|
| 366 |
-
2025.09.
|
| 367 |
-
2025.09.
|
| 368 |
-
2025.09.
|
| 369 |
-
2025.09.
|
| 370 |
-
2025.09.
|
| 371 |
-
2025.09.
|
| 372 |
-
2025.09.
|
| 373 |
-
2025.09.
|
| 374 |
-
2025.09.
|
| 375 |
-
2025.09.
|
| 376 |
-
2025.09.
|
| 377 |
-
2025.09.
|
| 378 |
-
2025.09.
|
| 379 |
-
2025.09.
|
| 380 |
-
2025.09.
|
| 381 |
-
2025.09.
|
| 382 |
-
2025.09.
|
| 383 |
-
2025.09.
|
| 384 |
-
2025.09.
|
| 385 |
-
2025.09.
|
| 386 |
-
2025.09.
|
| 387 |
-
2025.09.
|
| 388 |
-
2025.09.
|
| 389 |
-
2025.09.
|
| 390 |
-
2025.09.
|
| 391 |
-
2025.09.
|
| 392 |
-
2025.09.15/03.36.55_train_llmbc_lowdim_sweep-v2/wandb/run-20250915_033727-lrwdqu8w/run-lrwdqu8w.wandb filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 318 |
2025.07.09/16.45.24_train_llm_lowdim_sweep-v2/wandb/run-20250709_164525-t40cau46/run-t40cau46.wandb filter=lfs diff=lfs merge=lfs -text
|
| 319 |
2025.08.14/00.39.25_train_llm_lowdim_sweep-v2/wandb/run-20250814_003926-gdya2cav/run-gdya2cav.wandb filter=lfs diff=lfs merge=lfs -text
|
| 320 |
2025.07.17/03.07.22_train_llm_lowdim_sweep-v2/wandb/run-20250717_030723-n8ug3x9r/run-n8ug3x9r.wandb filter=lfs diff=lfs merge=lfs -text
|
| 321 |
+
2025.09.09/12.33.01_train_llm_lowdim_box-close-v2/wandb/run-20250909_123305-udgl2cjk/run-udgl2cjk.wandb filter=lfs diff=lfs merge=lfs -text
|
| 322 |
+
2025.09.09/12.35.35_train_llm_lowdim_box-close-v2/wandb/run-20250909_123539-r3bg5rf9/run-r3bg5rf9.wandb filter=lfs diff=lfs merge=lfs -text
|
| 323 |
+
2025.09.10/00.14.14_train_llm_lowdim_box-close-v2/wandb/run-20250910_001419-w8yrd23w/run-w8yrd23w.wandb filter=lfs diff=lfs merge=lfs -text
|
| 324 |
+
2025.09.09/12.55.21_train_llm_lowdim_box-close-v2/wandb/run-20250909_125526-4s25rjh2/run-4s25rjh2.wandb filter=lfs diff=lfs merge=lfs -text
|
| 325 |
+
2025.09.09/12.33.10_train_llm_lowdim_sweep-v2/wandb/run-20250909_123314-p9zpqw0o/run-p9zpqw0o.wandb filter=lfs diff=lfs merge=lfs -text
|
| 326 |
+
2025.09.09/12.36.27_train_llm_lowdim_sweep-v2/wandb/run-20250909_123631-kabg2vv6/run-kabg2vv6.wandb filter=lfs diff=lfs merge=lfs -text
|
| 327 |
+
2025.09.09/12.56.10_train_llm_lowdim_sweep-v2/wandb/run-20250909_125614-0cqjt5lk/run-0cqjt5lk.wandb filter=lfs diff=lfs merge=lfs -text
|
| 328 |
+
2025.09.10/00.14.16_train_llm_lowdim_sweep-v2/wandb/run-20250910_001421-q1gpajvi/run-q1gpajvi.wandb filter=lfs diff=lfs merge=lfs -text
|
| 329 |
+
2025.07.11/03.58.46_train_mlp_projector_metaworld/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 330 |
+
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/run-zfajcmpj.wandb filter=lfs diff=lfs merge=lfs -text
|
| 331 |
+
2025.09.13/12.31.56_train_llm_lowdim_maze2d-medium-v0/wandb/run-20250913_123157-gq40yeik/run-gq40yeik.wandb filter=lfs diff=lfs merge=lfs -text
|
| 332 |
+
2025.09.13/12.33.55_train_llm_lowdim_parking-v0/wandb/run-20250913_123356-bvhs8o85/run-bvhs8o85.wandb filter=lfs diff=lfs merge=lfs -text
|
| 333 |
+
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/run-20250814_004704-1q0db7hd/run-1q0db7hd.wandb filter=lfs diff=lfs merge=lfs -text
|
| 334 |
+
2025.09.14/09.34.49_train_llm_lowdim_push-v2/wandb/run-20250914_093450-1795j0wo/run-1795j0wo.wandb filter=lfs diff=lfs merge=lfs -text
|
| 335 |
+
2025.09.14/09.34.57_train_llm_lowdim_push-back-v2/wandb/run-20250914_093458-pbv2m1gy/run-pbv2m1gy.wandb filter=lfs diff=lfs merge=lfs -text
|
| 336 |
+
2025.09.04/02.18.32_train_llmbc_lowdim_sweep-v2/wandb/run-20250904_021848-222pf63x/run-222pf63x.wandb filter=lfs diff=lfs merge=lfs -text
|
| 337 |
+
2025.09.04/02.20.59_train_llmbc_lowdim_sweep-v2/wandb/run-20250904_022116-xvflegt3/run-xvflegt3.wandb filter=lfs diff=lfs merge=lfs -text
|
| 338 |
+
2025.09.04/07.23.34_train_llmbc_lowdim_sweep-v2/wandb/run-20250904_072350-3fb1dte7/run-3fb1dte7.wandb filter=lfs diff=lfs merge=lfs -text
|
| 339 |
+
2025.09.04/08.34.15_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 340 |
+
2025.09.04/08.34.15_train_llmbc_lowdim_sweep-v2/wandb/run-20250904_083433-vebgsoia/run-vebgsoia.wandb filter=lfs diff=lfs merge=lfs -text
|
| 341 |
+
2025.09.04/09.07.48_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 342 |
+
2025.09.04/09.07.48_train_llmbc_lowdim_sweep-v2/wandb/run-20250904_090804-a1xsgnzn/run-a1xsgnzn.wandb filter=lfs diff=lfs merge=lfs -text
|
| 343 |
+
2025.09.06/12.40.51_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 344 |
+
2025.09.06/12.40.51_train_llmbc_lowdim_sweep-v2/wandb/run-20250906_124108-v3xp8466/run-v3xp8466.wandb filter=lfs diff=lfs merge=lfs -text
|
| 345 |
+
2025.09.07/04.40.39_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 346 |
+
2025.09.07/04.40.39_train_llmbc_lowdim_sweep-v2/wandb/run-20250907_044057-o6d33td1/run-o6d33td1.wandb filter=lfs diff=lfs merge=lfs -text
|
| 347 |
+
2025.09.07/16.18.58_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 348 |
+
2025.09.07/16.18.58_train_llmbc_lowdim_sweep-v2/wandb/run-20250907_161914-rybnv8oq/run-rybnv8oq.wandb filter=lfs diff=lfs merge=lfs -text
|
| 349 |
+
2025.09.08/16.33.32_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 350 |
+
2025.09.08/16.33.32_train_llmbc_lowdim_sweep-v2/wandb/run-20250908_163348-yc8mbt0r/run-yc8mbt0r.wandb filter=lfs diff=lfs merge=lfs -text
|
| 351 |
+
2025.09.10/00.53.36_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 352 |
+
2025.09.10/00.53.36_train_llmbc_lowdim_sweep-v2/wandb/run-20250910_005350-ue0yl2uw/run-ue0yl2uw.wandb filter=lfs diff=lfs merge=lfs -text
|
| 353 |
+
2025.09.10/03.30.47_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 354 |
+
2025.09.10/03.30.47_train_llmbc_lowdim_sweep-v2/wandb/run-20250910_033104-ev2fr4vl/run-ev2fr4vl.wandb filter=lfs diff=lfs merge=lfs -text
|
| 355 |
+
2025.09.10/19.55.03_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 356 |
+
2025.09.10/19.55.03_train_llmbc_lowdim_sweep-v2/wandb/run-20250910_195519-sqfr6eyt/run-sqfr6eyt.wandb filter=lfs diff=lfs merge=lfs -text
|
| 357 |
+
2025.09.12/11.37.29_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 358 |
+
2025.09.12/11.37.29_train_llmbc_lowdim_sweep-v2/wandb/run-20250912_113745-boytd5y5/run-boytd5y5.wandb filter=lfs diff=lfs merge=lfs -text
|
| 359 |
+
2025.09.12/18.51.54_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 360 |
+
2025.09.12/18.51.54_train_llmbc_lowdim_sweep-v2/wandb/run-20250912_185210-2fp9dc4k/run-2fp9dc4k.wandb filter=lfs diff=lfs merge=lfs -text
|
| 361 |
+
2025.09.13/00.03.53_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 362 |
+
2025.09.13/00.03.53_train_llmbc_lowdim_sweep-v2/wandb/run-20250913_000410-kq3tp70u/run-kq3tp70u.wandb filter=lfs diff=lfs merge=lfs -text
|
| 363 |
+
2025.09.14/02.07.30_train_llmbc_lowdim_sweep-v2/wandb/run-20250914_020746-d741bnzk/run-d741bnzk.wandb filter=lfs diff=lfs merge=lfs -text
|
| 364 |
+
2025.09.14/02.23.47_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 365 |
+
2025.09.14/02.23.47_train_llmbc_lowdim_sweep-v2/wandb/run-20250914_022404-r6erapwd/run-r6erapwd.wandb filter=lfs diff=lfs merge=lfs -text
|
| 366 |
+
2025.09.14/02.37.02_train_llmbc_unet_lowdim_blockpush_lowdim_seed/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 367 |
+
2025.09.14/02.37.02_train_llmbc_unet_lowdim_blockpush_lowdim_seed/wandb/run-20250914_023730-pvhtccw3/run-pvhtccw3.wandb filter=lfs diff=lfs merge=lfs -text
|
| 368 |
+
2025.09.14/10.37.22_train_llmbc_lowdim_sweep-v2/wandb/run-20250914_103740-hfxojmy8/run-hfxojmy8.wandb filter=lfs diff=lfs merge=lfs -text
|
| 369 |
+
2025.09.14/10.38.44_train_llmbc_lowdim_sweep-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 370 |
+
2025.09.14/10.38.44_train_llmbc_lowdim_sweep-v2/wandb/run-20250914_103901-daw9wrye/run-daw9wrye.wandb filter=lfs diff=lfs merge=lfs -text
|
| 371 |
+
2025.09.15/02.49.32_train_llmbc_lowdim_push-v2/wandb/run-20250915_024948-6k5dxyez/run-6k5dxyez.wandb filter=lfs diff=lfs merge=lfs -text
|
| 372 |
+
2025.09.15/02.51.01_train_llmbc_lowdim_push-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 373 |
+
2025.09.15/02.51.01_train_llmbc_lowdim_push-v2/wandb/run-20250915_025118-oupposv4/run-oupposv4.wandb filter=lfs diff=lfs merge=lfs -text
|
| 374 |
+
2025.09.15/02.51.09_train_llmbc_lowdim_push-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 375 |
+
2025.09.15/02.51.09_train_llmbc_lowdim_push-v2/wandb/run-20250915_025129-spxxx9n6/run-spxxx9n6.wandb filter=lfs diff=lfs merge=lfs -text
|
| 376 |
+
2025.09.15/02.51.23_train_llmbc_lowdim_push-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 377 |
+
2025.09.15/02.51.23_train_llmbc_lowdim_push-v2/wandb/run-20250915_025142-4uwwcju2/run-4uwwcju2.wandb filter=lfs diff=lfs merge=lfs -text
|
| 378 |
+
2025.09.15/02.54.17_train_llmbc_lowdim_push-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 379 |
+
2025.09.15/02.54.17_train_llmbc_lowdim_push-v2/wandb/run-20250915_025434-08amewlb/run-08amewlb.wandb filter=lfs diff=lfs merge=lfs -text
|
| 380 |
+
2025.09.15/02.54.36_train_llmbc_lowdim_push-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 381 |
+
2025.09.15/02.54.36_train_llmbc_lowdim_push-v2/wandb/run-20250915_025453-1hopusp9/run-1hopusp9.wandb filter=lfs diff=lfs merge=lfs -text
|
| 382 |
+
2025.09.16/03.42.45_train_llmbc_lowdim_push-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 383 |
+
2025.09.16/03.42.45_train_llmbc_lowdim_push-v2/wandb/run-20250916_034303-sy6wc65f/run-sy6wc65f.wandb filter=lfs diff=lfs merge=lfs -text
|
| 384 |
+
2025.09.16/03.43.22_train_llmbc_lowdim_push-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 385 |
+
2025.09.16/03.43.22_train_llmbc_lowdim_push-v2/wandb/run-20250916_034339-lucv67m3/run-lucv67m3.wandb filter=lfs diff=lfs merge=lfs -text
|
| 386 |
+
2025.09.16/03.44.26_train_llmbc_lowdim_push-back-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 387 |
+
2025.09.16/03.44.26_train_llmbc_lowdim_push-back-v2/wandb/run-20250916_034440-owa5rai2/run-owa5rai2.wandb filter=lfs diff=lfs merge=lfs -text
|
| 388 |
+
2025.09.16/03.45.04_train_llmbc_lowdim_push-back-v2/logs.json.txt filter=lfs diff=lfs merge=lfs -text
|
| 389 |
+
2025.09.16/03.45.04_train_llmbc_lowdim_push-back-v2/wandb/run-20250916_034521-6ldn7but/run-6ldn7but.wandb filter=lfs diff=lfs merge=lfs -text
|
| 390 |
+
2025.09.17/00.09.10_train_llmbc_lowdim_push-v2/wandb/run-20250917_000928-1vr5o6h9/run-1vr5o6h9.wandb filter=lfs diff=lfs merge=lfs -text
|
| 391 |
+
2025.09.17/00.09.54_train_llmbc_lowdim_push-v2/wandb/run-20250917_001010-uz3qg79n/run-uz3qg79n.wandb filter=lfs diff=lfs merge=lfs -text
|
|
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/.hydra/config.yaml
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: train_mlp_projector
|
| 2 |
+
_target_: llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace
|
| 3 |
+
obs_dim: ${task.obs_dim}
|
| 4 |
+
action_dim: ${task.action_dim}
|
| 5 |
+
task_name: ${task.name}
|
| 6 |
+
exp_name: default
|
| 7 |
+
model_name: ${llm.name}
|
| 8 |
+
horizon: 1
|
| 9 |
+
n_obs_steps: 1
|
| 10 |
+
n_action_steps: 1
|
| 11 |
+
n_latency_steps: 0
|
| 12 |
+
past_action_visible: false
|
| 13 |
+
llm_translator:
|
| 14 |
+
_target_: llmbc.translator.llm_translator.LLMTranslator
|
| 15 |
+
cfg: ${llm}
|
| 16 |
+
obs_dim: ${task.obs_dim}
|
| 17 |
+
action_dim: ${task.action_dim}
|
| 18 |
+
horizon: ${horizon}
|
| 19 |
+
n_obs_steps: ${n_obs_steps}
|
| 20 |
+
n_action_steps: ${n_action_steps}
|
| 21 |
+
dataloader:
|
| 22 |
+
batch_size: 8
|
| 23 |
+
num_workers: 0
|
| 24 |
+
shuffle: true
|
| 25 |
+
pin_memory: false
|
| 26 |
+
persistent_workers: false
|
| 27 |
+
val_dataloader:
|
| 28 |
+
batch_size: 8
|
| 29 |
+
num_workers: 0
|
| 30 |
+
shuffle: true
|
| 31 |
+
pin_memory: false
|
| 32 |
+
persistent_workers: false
|
| 33 |
+
optimizer:
|
| 34 |
+
_target_: torch.optim.Adam
|
| 35 |
+
lr: 0.0001
|
| 36 |
+
betas:
|
| 37 |
+
- 0.95
|
| 38 |
+
- 0.999
|
| 39 |
+
eps: 1.0e-08
|
| 40 |
+
weight_decay: 1.0e-06
|
| 41 |
+
training:
|
| 42 |
+
device: cuda
|
| 43 |
+
seed: 42
|
| 44 |
+
debug: false
|
| 45 |
+
resume: true
|
| 46 |
+
lr_scheduler: cosine
|
| 47 |
+
lr_warmup_steps: 10
|
| 48 |
+
num_epochs: 1000
|
| 49 |
+
gradient_accumulate_every: 32
|
| 50 |
+
grad_norm_clip: 1
|
| 51 |
+
enable_normalizer: true
|
| 52 |
+
checkpoint_every: 1
|
| 53 |
+
val_every: 1
|
| 54 |
+
sample_every: 1
|
| 55 |
+
sample_max_batch: 128
|
| 56 |
+
max_train_steps: null
|
| 57 |
+
max_val_steps: null
|
| 58 |
+
tqdm_interval_sec: 1.0
|
| 59 |
+
logging:
|
| 60 |
+
project: llm_module_training
|
| 61 |
+
resume: true
|
| 62 |
+
mode: online
|
| 63 |
+
name: ${now:%Y.%m.%d-%H.%M.%S}_${name}_${task_name}
|
| 64 |
+
tags:
|
| 65 |
+
- ${name}
|
| 66 |
+
- ${task_name}
|
| 67 |
+
- ${exp_name}
|
| 68 |
+
id: null
|
| 69 |
+
group: null
|
| 70 |
+
checkpoint:
|
| 71 |
+
topk:
|
| 72 |
+
monitor_key: val_loss
|
| 73 |
+
mode: min
|
| 74 |
+
k: 5
|
| 75 |
+
format_str: epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt
|
| 76 |
+
save_last_ckpt: true
|
| 77 |
+
save_last_snapshot: false
|
| 78 |
+
multi_run:
|
| 79 |
+
run_dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
|
| 80 |
+
wandb_name_base: ${now:%Y.%m.%d-%H.%M.%S}_${name}_${task_name}
|
| 81 |
+
task:
|
| 82 |
+
name: metaworld
|
| 83 |
+
obs_dim: 9
|
| 84 |
+
action_dim: 4
|
| 85 |
+
dataset:
|
| 86 |
+
_target_: llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset
|
| 87 |
+
data_path: datasets/metaworld-general-split0.02.pt
|
| 88 |
+
data_path2: datasets/metaworld-split0.02.pt
|
| 89 |
+
horizon: ${horizon}
|
| 90 |
+
pad_before: ${eval:'${n_obs_steps}-1'}
|
| 91 |
+
pad_after: ${eval:'${n_action_steps}-1'}
|
| 92 |
+
obs_eef_target: true
|
| 93 |
+
use_manual_normalizer: false
|
| 94 |
+
val_ratio: 0.05
|
| 95 |
+
dummy_normalizer: false
|
| 96 |
+
llm:
|
| 97 |
+
name: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 98 |
+
model_name: SmolLM2-135M-Instruct
|
| 99 |
+
use_quantization: false
|
| 100 |
+
load_from_checkpoint: false
|
| 101 |
+
adaptor_path: data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744
|
| 102 |
+
use_orig_model: false
|
| 103 |
+
use_joint_mlp_projector: true
|
| 104 |
+
load_from_mlp_projector_checkpoint: false
|
| 105 |
+
mlp_projector_checkpoint_path: data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744/mlp_projector.bin
|
| 106 |
+
use_linear_normalizer: false
|
| 107 |
+
normalizer_checkpoint_path: ''
|
| 108 |
+
max_length: 100
|
| 109 |
+
config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
|
| 110 |
+
causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
|
| 111 |
+
lora_config:
|
| 112 |
+
r: 32
|
| 113 |
+
lora_alpha: 64
|
| 114 |
+
lora_dropout: 0.05
|
| 115 |
+
bias: none
|
| 116 |
+
task_type: CAUSAL_LM
|
| 117 |
+
prompter:
|
| 118 |
+
_target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
|
| 119 |
+
use_joint_mlp_projector: true
|
| 120 |
+
hydra:
|
| 121 |
+
job:
|
| 122 |
+
override_dirname: ${model_name}
|
| 123 |
+
run:
|
| 124 |
+
dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${model_name}
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/.hydra/hydra.yaml
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
simple:
|
| 72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
| 73 |
+
handlers:
|
| 74 |
+
console:
|
| 75 |
+
class: logging.StreamHandler
|
| 76 |
+
formatter: simple
|
| 77 |
+
stream: ext://sys.stdout
|
| 78 |
+
root:
|
| 79 |
+
level: INFO
|
| 80 |
+
handlers:
|
| 81 |
+
- console
|
| 82 |
+
loggers:
|
| 83 |
+
logging_example:
|
| 84 |
+
level: DEBUG
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
handlers:
|
| 92 |
+
console:
|
| 93 |
+
class: logging.StreamHandler
|
| 94 |
+
formatter: simple
|
| 95 |
+
stream: ext://sys.stdout
|
| 96 |
+
file:
|
| 97 |
+
class: logging.FileHandler
|
| 98 |
+
formatter: simple
|
| 99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
| 100 |
+
root:
|
| 101 |
+
level: INFO
|
| 102 |
+
handlers:
|
| 103 |
+
- console
|
| 104 |
+
- file
|
| 105 |
+
disable_existing_loggers: false
|
| 106 |
+
env: {}
|
| 107 |
+
mode: RUN
|
| 108 |
+
searchpath: []
|
| 109 |
+
callbacks: {}
|
| 110 |
+
output_subdir: .hydra
|
| 111 |
+
overrides:
|
| 112 |
+
hydra:
|
| 113 |
+
- hydra.mode=RUN
|
| 114 |
+
task:
|
| 115 |
+
- llm.load_from_checkpoint=False
|
| 116 |
+
- llm.load_from_mlp_projector_checkpoint=False
|
| 117 |
+
- task=metaworld
|
| 118 |
+
job:
|
| 119 |
+
name: train_mlp_projector_workspace
|
| 120 |
+
chdir: null
|
| 121 |
+
override_dirname: llm.load_from_checkpoint=False,llm.load_from_mlp_projector_checkpoint=False,task=metaworld
|
| 122 |
+
id: ???
|
| 123 |
+
num: ???
|
| 124 |
+
config_name: train_mlp_projector_workspace
|
| 125 |
+
env_set: {}
|
| 126 |
+
env_copy: []
|
| 127 |
+
config:
|
| 128 |
+
override_dirname:
|
| 129 |
+
kv_sep: '='
|
| 130 |
+
item_sep: ','
|
| 131 |
+
exclude_keys: []
|
| 132 |
+
runtime:
|
| 133 |
+
version: 1.2.0
|
| 134 |
+
version_base: '1.2'
|
| 135 |
+
cwd: /home/chyang/workspace/LLM-BC
|
| 136 |
+
config_sources:
|
| 137 |
+
- path: hydra.conf
|
| 138 |
+
schema: pkg
|
| 139 |
+
provider: hydra
|
| 140 |
+
- path: /home/chyang/workspace/LLM-BC/llmbc/config
|
| 141 |
+
schema: file
|
| 142 |
+
provider: main
|
| 143 |
+
- path: ''
|
| 144 |
+
schema: structured
|
| 145 |
+
provider: schema
|
| 146 |
+
output_dir: /home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld
|
| 147 |
+
choices:
|
| 148 |
+
llm: smollm2-135m-instruct
|
| 149 |
+
task: metaworld
|
| 150 |
+
hydra/env: default
|
| 151 |
+
hydra/callbacks: null
|
| 152 |
+
hydra/job_logging: default
|
| 153 |
+
hydra/hydra_logging: default
|
| 154 |
+
hydra/hydra_help: default
|
| 155 |
+
hydra/help: default
|
| 156 |
+
hydra/sweeper: basic
|
| 157 |
+
hydra/launcher: basic
|
| 158 |
+
hydra/output: default
|
| 159 |
+
verbose: false
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/.hydra/overrides.yaml
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
- llm.load_from_checkpoint=False
|
| 2 |
+
- llm.load_from_mlp_projector_checkpoint=False
|
| 3 |
+
- task=metaworld
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/epoch=0525-val_loss=2.274.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5c49149d743e74a37c8d483dad92404e8d0362d5e84494ed13ca83460d5c015
|
| 3 |
+
size 4111106
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/epoch=0618-val_loss=2.264.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:33faa021148acd8bf97d3191c2a1e55c8eb01e2b7c64a127067702fffd48e090
|
| 3 |
+
size 4111106
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/epoch=0795-val_loss=2.277.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e41ab9b951aeca31cd36d88391024db653970cce742209b769fb4e156d178b4f
|
| 3 |
+
size 4111106
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/epoch=0805-val_loss=2.265.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4afa8f1719d94b3b8ffef494c9bb89612c8c30f3aad3d23424defd803934387c
|
| 3 |
+
size 4111106
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/epoch=0989-val_loss=2.290.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3999ab98a6ef744178c866f28773e600cbc5026b4f8e94ab2bc26c15b6770d9e
|
| 3 |
+
size 4111106
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/checkpoints/latest.ckpt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e7daa9b5c6eefbfc37aa8902e01406aa654f2d099855715afe079dd6ac68a4b8
|
| 3 |
+
size 4111106
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/logs.json.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b3a1d2df0d4ea0ccfc17e5c86334a053db25357b19def845a120153f7f26836a
|
| 3 |
+
size 58019393
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/train_mlp_projector_workspace.log
ADDED
|
File without changes
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/debug-internal.log
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"time":"2025-07-11T03:58:49.407500315+08:00","level":"INFO","msg":"using version","core version":"0.18.6"}
|
| 2 |
+
{"time":"2025-07-11T03:58:49.4075215+08:00","level":"INFO","msg":"created symlink","path":"/home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug-core.log"}
|
| 3 |
+
{"time":"2025-07-11T03:58:49.515242597+08:00","level":"INFO","msg":"created new stream","id":"zfajcmpj"}
|
| 4 |
+
{"time":"2025-07-11T03:58:49.515306896+08:00","level":"INFO","msg":"stream: started","id":"zfajcmpj"}
|
| 5 |
+
{"time":"2025-07-11T03:58:49.515372436+08:00","level":"INFO","msg":"sender: started","stream_id":"zfajcmpj"}
|
| 6 |
+
{"time":"2025-07-11T03:58:49.515343096+08:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"zfajcmpj"}}
|
| 7 |
+
{"time":"2025-07-11T03:58:49.515378427+08:00","level":"INFO","msg":"handler: started","stream_id":{"value":"zfajcmpj"}}
|
| 8 |
+
{"time":"2025-07-11T03:58:50.111785423+08:00","level":"INFO","msg":"Starting system monitor"}
|
| 9 |
+
{"time":"2025-07-11T10:19:30.662765079+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:34981->127.0.0.53:53: i/o timeout"}
|
| 10 |
+
{"time":"2025-07-11T10:19:43.096709986+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:34013->127.0.0.53:53: i/o timeout"}
|
| 11 |
+
{"time":"2025-07-11T10:19:50.683315198+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}
|
| 12 |
+
{"time":"2025-07-11T10:19:57.798368435+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:58052->127.0.0.53:53: i/o timeout"}
|
| 13 |
+
{"time":"2025-07-11T10:20:15.828743949+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:52578->127.0.0.53:53: i/o timeout"}
|
| 14 |
+
{"time":"2025-07-11T10:20:22.719192496+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 15 |
+
{"time":"2025-07-11T10:20:43.16402417+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:60074->127.0.0.53:53: i/o timeout"}
|
| 16 |
+
{"time":"2025-07-11T10:20:57.651634305+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 17 |
+
{"time":"2025-07-11T10:21:25.739292077+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:52830->127.0.0.53:53: i/o timeout"}
|
| 18 |
+
{"time":"2025-07-11T10:21:36.639208833+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 19 |
+
{"time":"2025-07-11T10:22:26.344907838+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 20 |
+
{"time":"2025-07-11T10:22:35.741140727+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:58829->127.0.0.53:53: i/o timeout"}
|
| 21 |
+
{"time":"2025-07-11T10:23:31.491730215+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 22 |
+
{"time":"2025-07-11T10:23:45.744178376+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:39036->127.0.0.53:53: i/o timeout"}
|
| 23 |
+
{"time":"2025-07-11T10:24:55.746260548+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:57274->127.0.0.53:53: i/o timeout"}
|
| 24 |
+
{"time":"2025-07-11T10:25:01.493387422+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 25 |
+
{"time":"2025-07-11T10:26:05.748029554+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:43891->127.0.0.53:53: i/o timeout"}
|
| 26 |
+
{"time":"2025-07-11T10:26:31.494518293+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}
|
| 27 |
+
{"time":"2025-07-11T10:27:15.088761686+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 28 |
+
{"time":"2025-07-11T10:28:01.495712857+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 29 |
+
{"time":"2025-07-11T10:28:25.090345213+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:54163->127.0.0.53:53: i/o timeout"}
|
| 30 |
+
{"time":"2025-07-11T10:29:20.683049743+08:00","level":"WARN","msg":"sender: taking a long time","seconds":600.000348051,"work":"WorkRecord(*service_go_proto.Request_StopStatus); Control(local:true mailbox_slot:\"ykmb1zbab3au\" connection_id:\"127.0.0.1:56358\")"}
|
| 31 |
+
{"time":"2025-07-11T10:29:27.082442683+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.00003073,"work":"WorkRecord(*service_go_proto.Request_PartialHistory); Control(local:true connection_id:\"127.0.0.1:56358\")"}
|
| 32 |
+
{"time":"2025-07-11T10:29:30.113553628+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000175569,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 33 |
+
{"time":"2025-07-11T10:29:30.113706301+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000345821,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 34 |
+
{"time":"2025-07-11T10:29:30.115848756+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000949262,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 35 |
+
{"time":"2025-07-11T10:29:30.119005923+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.00050725,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 36 |
+
{"time":"2025-07-11T10:29:30.134183378+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.00003329,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 37 |
+
{"time":"2025-07-11T10:29:31.496836166+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 38 |
+
{"time":"2025-07-11T10:29:34.136294928+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:47212->127.0.0.53:53: i/o timeout"}
|
| 39 |
+
{"time":"2025-07-11T10:30:39.503948407+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 40 |
+
{"time":"2025-07-11T10:31:01.498883211+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 41 |
+
{"time":"2025-07-11T10:31:47.151814458+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 42 |
+
{"time":"2025-07-11T10:32:31.500519508+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 43 |
+
{"time":"2025-07-11T10:32:53.295630902+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 44 |
+
{"time":"2025-07-11T10:34:01.502180799+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 45 |
+
{"time":"2025-07-11T10:34:03.224735373+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:35463->127.0.0.53:53: i/o timeout"}
|
| 46 |
+
{"time":"2025-07-11T10:35:13.048211757+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:37851->127.0.0.53:53: i/o timeout"}
|
| 47 |
+
{"time":"2025-07-11T10:35:31.50265147+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 48 |
+
{"time":"2025-07-11T10:36:18.703729665+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 49 |
+
{"time":"2025-07-11T10:36:37.135832426+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 50 |
+
{"time":"2025-07-11T10:37:26.776550454+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:48638->127.0.0.53:53: i/o timeout"}
|
| 51 |
+
{"time":"2025-07-11T10:37:43.279470452+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 52 |
+
{"time":"2025-07-11T10:38:43.586319317+08:00","level":"INFO","msg":"sender: succeeded after taking longer than expected","seconds":1162.90365191,"work":"WorkRecord(*service_go_proto.Request_StopStatus); Control(local:true mailbox_slot:\"ykmb1zbab3au\" connection_id:\"127.0.0.1:56358\")"}
|
| 53 |
+
{"time":"2025-07-11T10:38:43.586581237+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1156.50415708,"work":"WorkRecord(*service_go_proto.Request_PartialHistory); Control(local:true connection_id:\"127.0.0.1:56358\")"}
|
| 54 |
+
{"time":"2025-07-11T10:38:43.586718446+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1153.473421569,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 55 |
+
{"time":"2025-07-11T10:38:43.586797174+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1153.471875371,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 56 |
+
{"time":"2025-07-11T10:38:43.586784719+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1153.473381097,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 57 |
+
{"time":"2025-07-11T10:38:43.586960887+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1153.468301722,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 58 |
+
{"time":"2025-07-11T10:38:43.587004666+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1153.452813238,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 59 |
+
{"time":"2025-07-11T10:39:43.592291332+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 60 |
+
{"time":"2025-07-11T10:40:15.763452594+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 61 |
+
{"time":"2025-07-11T10:40:49.844091828+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 62 |
+
{"time":"2025-07-11T10:41:29.285662282+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 63 |
+
{"time":"2025-07-11T10:41:34.255134979+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp 35.186.228.49:443: connect: connection timed out"}
|
| 64 |
+
{"time":"2025-07-11T10:42:16.052045567+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 65 |
+
{"time":"2025-07-11T10:43:22.42672178+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 66 |
+
{"time":"2025-07-11T10:44:52.428073385+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 67 |
+
{"time":"2025-07-11T10:46:22.429729908+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 68 |
+
{"time":"2025-07-11T10:47:52.431455398+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 69 |
+
{"time":"2025-07-11T10:49:13.591546149+08:00","level":"WARN","msg":"sender: taking a long time","seconds":600.000310986,"work":"WorkRecord(*service_go_proto.Request_StopStatus); Control(local:true mailbox_slot:\"39u0euc3gdqo\" connection_id:\"127.0.0.1:56358\")"}
|
| 70 |
+
{"time":"2025-07-11T10:49:19.991448173+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000419627,"work":"WorkRecord(*service_go_proto.Request_PartialHistory); Control(local:true connection_id:\"127.0.0.1:56358\")"}
|
| 71 |
+
{"time":"2025-07-11T10:49:20.113476668+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000669704,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 72 |
+
{"time":"2025-07-11T10:49:20.113666718+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000556595,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 73 |
+
{"time":"2025-07-11T10:49:20.115777548+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000262324,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 74 |
+
{"time":"2025-07-11T10:49:20.119884323+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000767915,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 75 |
+
{"time":"2025-07-11T10:49:20.134071176+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.001047034,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 76 |
+
{"time":"2025-07-11T10:49:22.432201148+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 77 |
+
{"time":"2025-07-11T10:50:52.434457185+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 78 |
+
{"time":"2025-07-11T10:52:22.436283572+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}
|
| 79 |
+
{"time":"2025-07-11T10:53:52.43745377+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}
|
| 80 |
+
{"time":"2025-07-11T10:54:52.979170008+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": read tcp 192.168.11.41:32904->35.186.228.49:443: read: connection timed out"}
|
| 81 |
+
{"time":"2025-07-11T10:55:53.226144695+08:00","level":"INFO","msg":"sender: succeeded after taking longer than expected","seconds":999.634939408,"work":"WorkRecord(*service_go_proto.Request_StopStatus); Control(local:true mailbox_slot:\"39u0euc3gdqo\" connection_id:\"127.0.0.1:56358\")"}
|
| 82 |
+
{"time":"2025-07-11T10:55:53.226264085+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.235293513,"work":"WorkRecord(*service_go_proto.Request_PartialHistory); Control(local:true connection_id:\"127.0.0.1:56358\")"}
|
| 83 |
+
{"time":"2025-07-11T10:55:53.226387716+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.113322681,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 84 |
+
{"time":"2025-07-11T10:55:53.226431815+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.110883112,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 85 |
+
{"time":"2025-07-11T10:55:53.226471261+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.113644133,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 86 |
+
{"time":"2025-07-11T10:55:53.226468934+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.107317404,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 87 |
+
{"time":"2025-07-11T10:55:53.226674708+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.093604107,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 88 |
+
{"time":"2025-07-11T19:27:48.228136062+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:58754->127.0.0.53:53: i/o timeout"}
|
| 89 |
+
{"time":"2025-07-11T20:11:16.628499237+08:00","level":"INFO","msg":"stream: closing","id":"zfajcmpj"}
|
| 90 |
+
{"time":"2025-07-11T20:11:16.628550137+08:00","level":"INFO","msg":"Stopping system monitor"}
|
| 91 |
+
{"time":"2025-07-11T20:11:16.62954204+08:00","level":"INFO","msg":"Stopped system monitor"}
|
| 92 |
+
{"time":"2025-07-11T20:11:17.552805733+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
|
| 93 |
+
{"time":"2025-07-11T20:11:17.843551844+08:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"zfajcmpj"}}
|
| 94 |
+
{"time":"2025-07-11T20:11:17.843631936+08:00","level":"INFO","msg":"sender: closed","stream_id":"zfajcmpj"}
|
| 95 |
+
{"time":"2025-07-11T20:11:17.843630871+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"zfajcmpj"}}
|
| 96 |
+
{"time":"2025-07-11T20:11:17.843761743+08:00","level":"INFO","msg":"stream: closed","id":"zfajcmpj"}
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/debug.log
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Current SDK version is 0.18.6
|
| 2 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Configure stats pid to 3404262
|
| 3 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/.config/wandb/settings
|
| 4 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/workspace/LLM-BC/wandb/settings
|
| 5 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
|
| 6 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': 'online', '_disable_service': None}
|
| 7 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'llmbc/workspace/train_mlp_projector_workspace.py', 'program_abspath': '/home/chyang/workspace/LLM-BC/llmbc/workspace/train_mlp_projector_workspace.py', 'program': '/home/chyang/workspace/LLM-BC/./llmbc/workspace/train_mlp_projector_workspace.py'}
|
| 8 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Applying login settings: {}
|
| 9 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:_log_setup():533] Logging user logs to /home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug.log
|
| 10 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:_log_setup():534] Logging internal logs to /home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug-internal.log
|
| 11 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:init():619] calling init triggers
|
| 12 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:init():626] wandb.init called with sweep_config: {}
|
| 13 |
+
config: {'name': 'train_mlp_projector', '_target_': 'llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace', 'obs_dim': 9, 'action_dim': 4, 'task_name': 'metaworld', 'exp_name': 'default', 'model_name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1, 'n_latency_steps': 0, 'past_action_visible': False, 'llm_translator': {'_target_': 'llmbc.translator.llm_translator.LLMTranslator', 'cfg': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'use_quantization': False, 'load_from_checkpoint': False, 'adaptor_path': 'data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744', 'use_orig_model': False, 'use_joint_mlp_projector': True, 'load_from_mlp_projector_checkpoint': False, 'mlp_projector_checkpoint_path': 'data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744/mlp_projector.bin', 'use_linear_normalizer': False, 'normalizer_checkpoint_path': '', 'max_length': 100, 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2025.07.11/03.58.46_HuggingFaceTB/SmolLM2-135M-Instruct'}}}, 'obs_dim': 9, 'action_dim': 4, 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1}, 'dataloader': {'batch_size': 8, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'val_dataloader': {'batch_size': 8, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'optimizer': {'_target_': 'torch.optim.Adam', 'lr': 0.0001, 'betas': [0.95, 0.999], 'eps': 1e-08, 'weight_decay': 1e-06}, 'training': {'device': 'cuda', 'seed': 42, 'debug': False, 'resume': True, 'lr_scheduler': 'cosine', 'lr_warmup_steps': 10, 'num_epochs': 1000, 'gradient_accumulate_every': 32, 'grad_norm_clip': 1, 'enable_normalizer': True, 'checkpoint_every': 1, 'val_every': 1, 'sample_every': 1, 'sample_max_batch': 128, 'max_train_steps': None, 'max_val_steps': None, 'tqdm_interval_sec': 1.0}, 'logging': {'project': 'llm_module_training', 'resume': True, 'mode': 'online', 'name': '2025.07.11-03.58.48_train_mlp_projector_metaworld', 'tags': ['train_mlp_projector', 'metaworld', 'default'], 'id': None, 'group': None}, 'checkpoint': {'topk': {'monitor_key': 'val_loss', 'mode': 'min', 'k': 5, 'format_str': 'epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt'}, 'save_last_ckpt': True, 'save_last_snapshot': False}, 'multi_run': {'run_dir': 'data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld', 'wandb_name_base': '2025.07.11-03.58.48_train_mlp_projector_metaworld'}, 'task': {'name': 'metaworld', 'obs_dim': 9, 'action_dim': 4, 'dataset': {'_target_': 'llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset', 'data_path': 'datasets/metaworld-general-split0.02.pt', 'data_path2': 'datasets/metaworld-split0.02.pt', 'horizon': 1, 'pad_before': 0, 'pad_after': 0, 'obs_eef_target': True, 'use_manual_normalizer': False, 'val_ratio': 0.05, 'dummy_normalizer': False}}, 'llm': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'use_quantization': False, 'load_from_checkpoint': False, 'adaptor_path': 'data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744', 'use_orig_model': False, 'use_joint_mlp_projector': True, 'load_from_mlp_projector_checkpoint': False, 'mlp_projector_checkpoint_path': 'data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744/mlp_projector.bin', 'use_linear_normalizer': False, 'normalizer_checkpoint_path': '', 'max_length': 100, 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2025.07.11/03.58.46_HuggingFaceTB/SmolLM2-135M-Instruct'}}}}
|
| 14 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:init():669] starting backend
|
| 15 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:init():673] sending inform_init request
|
| 16 |
+
2025-07-11 03:58:49,404 INFO MainThread:3404262 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
|
| 17 |
+
2025-07-11 03:58:49,404 INFO MainThread:3404262 [wandb_init.py:init():686] backend started and connected
|
| 18 |
+
2025-07-11 03:58:49,409 INFO MainThread:3404262 [wandb_init.py:init():781] updated telemetry
|
| 19 |
+
2025-07-11 03:58:49,436 INFO MainThread:3404262 [wandb_init.py:init():814] communicating run to backend with 90.0 second timeout
|
| 20 |
+
2025-07-11 03:58:50,108 INFO MainThread:3404262 [wandb_init.py:init():867] starting run threads in backend
|
| 21 |
+
2025-07-11 03:58:50,205 INFO MainThread:3404262 [wandb_run.py:_console_start():2451] atexit reg
|
| 22 |
+
2025-07-11 03:58:50,205 INFO MainThread:3404262 [wandb_run.py:_redirect():2299] redirect: wrap_raw
|
| 23 |
+
2025-07-11 03:58:50,205 INFO MainThread:3404262 [wandb_run.py:_redirect():2364] Wrapping output streams.
|
| 24 |
+
2025-07-11 03:58:50,206 INFO MainThread:3404262 [wandb_run.py:_redirect():2389] Redirects installed.
|
| 25 |
+
2025-07-11 03:58:50,207 INFO MainThread:3404262 [wandb_init.py:init():911] run started, returning control to user process
|
| 26 |
+
2025-07-11 03:58:50,207 INFO MainThread:3404262 [wandb_run.py:_config_callback():1389] config_cb None None {'output_dir': '/home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld'}
|
| 27 |
+
2025-07-11 20:11:16,628 WARNING MsgRouterThr:3404262 [router.py:message_loop():75] message_loop has been closed
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/files/config.yaml
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_target_:
|
| 2 |
+
value: llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace
|
| 3 |
+
_wandb:
|
| 4 |
+
value:
|
| 5 |
+
cli_version: 0.18.6
|
| 6 |
+
m: []
|
| 7 |
+
python_version: 3.9.21
|
| 8 |
+
t:
|
| 9 |
+
"1":
|
| 10 |
+
- 1
|
| 11 |
+
- 2
|
| 12 |
+
- 3
|
| 13 |
+
- 5
|
| 14 |
+
- 11
|
| 15 |
+
- 12
|
| 16 |
+
- 41
|
| 17 |
+
- 49
|
| 18 |
+
- 50
|
| 19 |
+
- 51
|
| 20 |
+
- 53
|
| 21 |
+
- 55
|
| 22 |
+
- 71
|
| 23 |
+
- 83
|
| 24 |
+
- 98
|
| 25 |
+
"2":
|
| 26 |
+
- 1
|
| 27 |
+
- 2
|
| 28 |
+
- 3
|
| 29 |
+
- 5
|
| 30 |
+
- 11
|
| 31 |
+
- 12
|
| 32 |
+
- 41
|
| 33 |
+
- 49
|
| 34 |
+
- 50
|
| 35 |
+
- 51
|
| 36 |
+
- 53
|
| 37 |
+
- 55
|
| 38 |
+
- 71
|
| 39 |
+
- 83
|
| 40 |
+
- 98
|
| 41 |
+
"3":
|
| 42 |
+
- 13
|
| 43 |
+
- 15
|
| 44 |
+
- 16
|
| 45 |
+
- 23
|
| 46 |
+
- 55
|
| 47 |
+
- 61
|
| 48 |
+
"4": 3.9.21
|
| 49 |
+
"5": 0.18.6
|
| 50 |
+
"6": 4.47.1
|
| 51 |
+
"8":
|
| 52 |
+
- 5
|
| 53 |
+
"12": 0.18.6
|
| 54 |
+
"13": linux-x86_64
|
| 55 |
+
action_dim:
|
| 56 |
+
value: 4
|
| 57 |
+
checkpoint:
|
| 58 |
+
value:
|
| 59 |
+
save_last_ckpt: true
|
| 60 |
+
save_last_snapshot: false
|
| 61 |
+
topk:
|
| 62 |
+
format_str: epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt
|
| 63 |
+
k: 5
|
| 64 |
+
mode: min
|
| 65 |
+
monitor_key: val_loss
|
| 66 |
+
dataloader:
|
| 67 |
+
value:
|
| 68 |
+
batch_size: 8
|
| 69 |
+
num_workers: 0
|
| 70 |
+
persistent_workers: false
|
| 71 |
+
pin_memory: false
|
| 72 |
+
shuffle: true
|
| 73 |
+
exp_name:
|
| 74 |
+
value: default
|
| 75 |
+
horizon:
|
| 76 |
+
value: 1
|
| 77 |
+
llm:
|
| 78 |
+
value:
|
| 79 |
+
adaptor_path: data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744
|
| 80 |
+
causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
|
| 81 |
+
config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
|
| 82 |
+
hydra:
|
| 83 |
+
job:
|
| 84 |
+
override_dirname: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 85 |
+
run:
|
| 86 |
+
dir: data/outputs/2025.07.11/03.58.46_HuggingFaceTB/SmolLM2-135M-Instruct
|
| 87 |
+
load_from_checkpoint: false
|
| 88 |
+
load_from_mlp_projector_checkpoint: false
|
| 89 |
+
lora_config:
|
| 90 |
+
bias: none
|
| 91 |
+
lora_alpha: 64
|
| 92 |
+
lora_dropout: 0.05
|
| 93 |
+
r: 32
|
| 94 |
+
task_type: CAUSAL_LM
|
| 95 |
+
max_length: 100
|
| 96 |
+
mlp_projector_checkpoint_path: data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744/mlp_projector.bin
|
| 97 |
+
model_name: SmolLM2-135M-Instruct
|
| 98 |
+
name: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 99 |
+
normalizer_checkpoint_path: ""
|
| 100 |
+
prompter:
|
| 101 |
+
_target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
|
| 102 |
+
use_joint_mlp_projector: true
|
| 103 |
+
use_joint_mlp_projector: true
|
| 104 |
+
use_linear_normalizer: false
|
| 105 |
+
use_orig_model: false
|
| 106 |
+
use_quantization: false
|
| 107 |
+
llm_translator:
|
| 108 |
+
value:
|
| 109 |
+
_target_: llmbc.translator.llm_translator.LLMTranslator
|
| 110 |
+
action_dim: 4
|
| 111 |
+
cfg:
|
| 112 |
+
adaptor_path: data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744
|
| 113 |
+
causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
|
| 114 |
+
config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
|
| 115 |
+
hydra:
|
| 116 |
+
job:
|
| 117 |
+
override_dirname: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 118 |
+
run:
|
| 119 |
+
dir: data/outputs/2025.07.11/03.58.46_HuggingFaceTB/SmolLM2-135M-Instruct
|
| 120 |
+
load_from_checkpoint: false
|
| 121 |
+
load_from_mlp_projector_checkpoint: false
|
| 122 |
+
lora_config:
|
| 123 |
+
bias: none
|
| 124 |
+
lora_alpha: 64
|
| 125 |
+
lora_dropout: 0.05
|
| 126 |
+
r: 32
|
| 127 |
+
task_type: CAUSAL_LM
|
| 128 |
+
max_length: 100
|
| 129 |
+
mlp_projector_checkpoint_path: data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744/mlp_projector.bin
|
| 130 |
+
model_name: SmolLM2-135M-Instruct
|
| 131 |
+
name: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 132 |
+
normalizer_checkpoint_path: ""
|
| 133 |
+
prompter:
|
| 134 |
+
_target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
|
| 135 |
+
use_joint_mlp_projector: true
|
| 136 |
+
use_joint_mlp_projector: true
|
| 137 |
+
use_linear_normalizer: false
|
| 138 |
+
use_orig_model: false
|
| 139 |
+
use_quantization: false
|
| 140 |
+
horizon: 1
|
| 141 |
+
n_action_steps: 1
|
| 142 |
+
n_obs_steps: 1
|
| 143 |
+
obs_dim: 9
|
| 144 |
+
logging:
|
| 145 |
+
value:
|
| 146 |
+
group: null
|
| 147 |
+
id: null
|
| 148 |
+
mode: online
|
| 149 |
+
name: 2025.07.11-03.58.48_train_mlp_projector_metaworld
|
| 150 |
+
project: llm_module_training
|
| 151 |
+
resume: true
|
| 152 |
+
tags:
|
| 153 |
+
- train_mlp_projector
|
| 154 |
+
- metaworld
|
| 155 |
+
- default
|
| 156 |
+
model_name:
|
| 157 |
+
value: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 158 |
+
multi_run:
|
| 159 |
+
value:
|
| 160 |
+
run_dir: data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld
|
| 161 |
+
wandb_name_base: 2025.07.11-03.58.48_train_mlp_projector_metaworld
|
| 162 |
+
n_action_steps:
|
| 163 |
+
value: 1
|
| 164 |
+
n_latency_steps:
|
| 165 |
+
value: 0
|
| 166 |
+
n_obs_steps:
|
| 167 |
+
value: 1
|
| 168 |
+
name:
|
| 169 |
+
value: train_mlp_projector
|
| 170 |
+
obs_dim:
|
| 171 |
+
value: 9
|
| 172 |
+
optimizer:
|
| 173 |
+
value:
|
| 174 |
+
_target_: torch.optim.Adam
|
| 175 |
+
betas:
|
| 176 |
+
- 0.95
|
| 177 |
+
- 0.999
|
| 178 |
+
eps: 1e-08
|
| 179 |
+
lr: 0.0001
|
| 180 |
+
weight_decay: 1e-06
|
| 181 |
+
output_dir:
|
| 182 |
+
value: /home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld
|
| 183 |
+
past_action_visible:
|
| 184 |
+
value: false
|
| 185 |
+
task:
|
| 186 |
+
value:
|
| 187 |
+
action_dim: 4
|
| 188 |
+
dataset:
|
| 189 |
+
_target_: llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset
|
| 190 |
+
data_path: datasets/metaworld-general-split0.02.pt
|
| 191 |
+
data_path2: datasets/metaworld-split0.02.pt
|
| 192 |
+
dummy_normalizer: false
|
| 193 |
+
horizon: 1
|
| 194 |
+
obs_eef_target: true
|
| 195 |
+
pad_after: 0
|
| 196 |
+
pad_before: 0
|
| 197 |
+
use_manual_normalizer: false
|
| 198 |
+
val_ratio: 0.05
|
| 199 |
+
name: metaworld
|
| 200 |
+
obs_dim: 9
|
| 201 |
+
task_name:
|
| 202 |
+
value: metaworld
|
| 203 |
+
training:
|
| 204 |
+
value:
|
| 205 |
+
checkpoint_every: 1
|
| 206 |
+
debug: false
|
| 207 |
+
device: cuda
|
| 208 |
+
enable_normalizer: true
|
| 209 |
+
grad_norm_clip: 1
|
| 210 |
+
gradient_accumulate_every: 32
|
| 211 |
+
lr_scheduler: cosine
|
| 212 |
+
lr_warmup_steps: 10
|
| 213 |
+
max_train_steps: null
|
| 214 |
+
max_val_steps: null
|
| 215 |
+
num_epochs: 1000
|
| 216 |
+
resume: true
|
| 217 |
+
sample_every: 1
|
| 218 |
+
sample_max_batch: 128
|
| 219 |
+
seed: 42
|
| 220 |
+
tqdm_interval_sec: 1
|
| 221 |
+
val_every: 1
|
| 222 |
+
val_dataloader:
|
| 223 |
+
value:
|
| 224 |
+
batch_size: 8
|
| 225 |
+
num_workers: 0
|
| 226 |
+
persistent_workers: false
|
| 227 |
+
pin_memory: false
|
| 228 |
+
shuffle: true
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/files/output.log
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/files/wandb-metadata.json
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"os": "Linux-5.15.0-101-generic-x86_64-with-glibc2.31",
|
| 3 |
+
"python": "3.9.21",
|
| 4 |
+
"startedAt": "2025-07-10T19:58:49.404819Z",
|
| 5 |
+
"args": [
|
| 6 |
+
"llm.load_from_checkpoint=False",
|
| 7 |
+
"llm.load_from_mlp_projector_checkpoint=False",
|
| 8 |
+
"task=metaworld"
|
| 9 |
+
],
|
| 10 |
+
"program": "/home/chyang/workspace/LLM-BC/./llmbc/workspace/train_mlp_projector_workspace.py",
|
| 11 |
+
"codePath": "llmbc/workspace/train_mlp_projector_workspace.py",
|
| 12 |
+
"git": {
|
| 13 |
+
"remote": "https://github.com/CHYang25/LLM-BC.git",
|
| 14 |
+
"commit": "527828c43adf72e7a6f3f2857ded760d6aaafe6f"
|
| 15 |
+
},
|
| 16 |
+
"email": "chris920325@gmail.com",
|
| 17 |
+
"root": "/home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld",
|
| 18 |
+
"host": "rllab1015",
|
| 19 |
+
"username": "chyang",
|
| 20 |
+
"executable": "/home/chyang/miniconda3/envs/llm-bc/bin/python3",
|
| 21 |
+
"codePathLocal": "llmbc/workspace/train_mlp_projector_workspace.py",
|
| 22 |
+
"cpu_count": 20,
|
| 23 |
+
"cpu_count_logical": 40,
|
| 24 |
+
"gpu": "NVIDIA GeForce RTX 4090",
|
| 25 |
+
"gpu_count": 2,
|
| 26 |
+
"disk": {
|
| 27 |
+
"/": {
|
| 28 |
+
"total": "1967317549056",
|
| 29 |
+
"used": "1595091705856"
|
| 30 |
+
}
|
| 31 |
+
},
|
| 32 |
+
"memory": {
|
| 33 |
+
"total": "134535372800"
|
| 34 |
+
},
|
| 35 |
+
"cpu": {
|
| 36 |
+
"count": 20,
|
| 37 |
+
"countLogical": 40
|
| 38 |
+
},
|
| 39 |
+
"gpu_nvidia": [
|
| 40 |
+
{
|
| 41 |
+
"name": "NVIDIA GeForce RTX 4090",
|
| 42 |
+
"memoryTotal": "25757220864",
|
| 43 |
+
"cudaCores": 16384,
|
| 44 |
+
"architecture": "Ada"
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"name": "NVIDIA GeForce RTX 4090",
|
| 48 |
+
"memoryTotal": "25757220864",
|
| 49 |
+
"cudaCores": 16384,
|
| 50 |
+
"architecture": "Ada"
|
| 51 |
+
}
|
| 52 |
+
],
|
| 53 |
+
"cudaVersion": "12.2"
|
| 54 |
+
}
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/files/wandb-summary.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"_step":436999,"epoch":999,"_runtime":58347.223713649,"_timestamp":1.7522358766111119e+09,"val_loss":2.6537208557128906,"_wandb":{"runtime":58347},"grad_norm":2.6950809955596924,"train_loss":2.5322251341708464,"lr":1.3250400887621084e-12,"global_step":436999}
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug-core.log
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"time":"2025-07-11T03:58:48.802094952+08:00","level":"INFO","msg":"started logging, with flags","port-filename":"/tmp/tmpsa69b4ku/port-3404262.txt","pid":3404262,"debug":false,"disable-analytics":false}
|
| 2 |
+
{"time":"2025-07-11T03:58:48.802117877+08:00","level":"INFO","msg":"FeatureState","shutdownOnParentExitEnabled":false}
|
| 3 |
+
{"time":"2025-07-11T03:58:48.803078444+08:00","level":"INFO","msg":"server is running","addr":{"IP":"127.0.0.1","Port":38279,"Zone":""}}
|
| 4 |
+
{"time":"2025-07-11T03:58:48.803169518+08:00","level":"INFO","msg":"Will exit if parent process dies.","ppid":3404262}
|
| 5 |
+
{"time":"2025-07-11T03:58:48.999953763+08:00","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"127.0.0.1:56358"}
|
| 6 |
+
{"time":"2025-07-11T03:58:49.407252096+08:00","level":"INFO","msg":"handleInformInit: received","streamId":"zfajcmpj","id":"127.0.0.1:56358"}
|
| 7 |
+
{"time":"2025-07-11T03:58:49.515318283+08:00","level":"INFO","msg":"handleInformInit: stream started","streamId":"zfajcmpj","id":"127.0.0.1:56358"}
|
| 8 |
+
{"time":"2025-07-11T20:11:16.62836004+08:00","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"127.0.0.1:56358"}
|
| 9 |
+
{"time":"2025-07-11T20:11:16.628569115+08:00","level":"INFO","msg":"connection: Close: initiating connection closure","id":"127.0.0.1:56358"}
|
| 10 |
+
{"time":"2025-07-11T20:11:16.62860047+08:00","level":"INFO","msg":"server is shutting down"}
|
| 11 |
+
{"time":"2025-07-11T20:11:16.628673237+08:00","level":"INFO","msg":"connection: Close: connection successfully closed","id":"127.0.0.1:56358"}
|
| 12 |
+
{"time":"2025-07-11T20:11:17.843841508+08:00","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"127.0.0.1:56358"}
|
| 13 |
+
{"time":"2025-07-11T20:11:17.843865971+08:00","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"127.0.0.1:56358"}
|
| 14 |
+
{"time":"2025-07-11T20:11:17.843879231+08:00","level":"INFO","msg":"server is closed"}
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug-internal.log
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"time":"2025-07-11T03:58:49.407500315+08:00","level":"INFO","msg":"using version","core version":"0.18.6"}
|
| 2 |
+
{"time":"2025-07-11T03:58:49.4075215+08:00","level":"INFO","msg":"created symlink","path":"/home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug-core.log"}
|
| 3 |
+
{"time":"2025-07-11T03:58:49.515242597+08:00","level":"INFO","msg":"created new stream","id":"zfajcmpj"}
|
| 4 |
+
{"time":"2025-07-11T03:58:49.515306896+08:00","level":"INFO","msg":"stream: started","id":"zfajcmpj"}
|
| 5 |
+
{"time":"2025-07-11T03:58:49.515372436+08:00","level":"INFO","msg":"sender: started","stream_id":"zfajcmpj"}
|
| 6 |
+
{"time":"2025-07-11T03:58:49.515343096+08:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"zfajcmpj"}}
|
| 7 |
+
{"time":"2025-07-11T03:58:49.515378427+08:00","level":"INFO","msg":"handler: started","stream_id":{"value":"zfajcmpj"}}
|
| 8 |
+
{"time":"2025-07-11T03:58:50.111785423+08:00","level":"INFO","msg":"Starting system monitor"}
|
| 9 |
+
{"time":"2025-07-11T10:19:30.662765079+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:34981->127.0.0.53:53: i/o timeout"}
|
| 10 |
+
{"time":"2025-07-11T10:19:43.096709986+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:34013->127.0.0.53:53: i/o timeout"}
|
| 11 |
+
{"time":"2025-07-11T10:19:50.683315198+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}
|
| 12 |
+
{"time":"2025-07-11T10:19:57.798368435+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:58052->127.0.0.53:53: i/o timeout"}
|
| 13 |
+
{"time":"2025-07-11T10:20:15.828743949+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:52578->127.0.0.53:53: i/o timeout"}
|
| 14 |
+
{"time":"2025-07-11T10:20:22.719192496+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 15 |
+
{"time":"2025-07-11T10:20:43.16402417+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:60074->127.0.0.53:53: i/o timeout"}
|
| 16 |
+
{"time":"2025-07-11T10:20:57.651634305+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 17 |
+
{"time":"2025-07-11T10:21:25.739292077+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:52830->127.0.0.53:53: i/o timeout"}
|
| 18 |
+
{"time":"2025-07-11T10:21:36.639208833+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 19 |
+
{"time":"2025-07-11T10:22:26.344907838+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 20 |
+
{"time":"2025-07-11T10:22:35.741140727+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:58829->127.0.0.53:53: i/o timeout"}
|
| 21 |
+
{"time":"2025-07-11T10:23:31.491730215+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 22 |
+
{"time":"2025-07-11T10:23:45.744178376+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:39036->127.0.0.53:53: i/o timeout"}
|
| 23 |
+
{"time":"2025-07-11T10:24:55.746260548+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:57274->127.0.0.53:53: i/o timeout"}
|
| 24 |
+
{"time":"2025-07-11T10:25:01.493387422+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 25 |
+
{"time":"2025-07-11T10:26:05.748029554+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:43891->127.0.0.53:53: i/o timeout"}
|
| 26 |
+
{"time":"2025-07-11T10:26:31.494518293+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}
|
| 27 |
+
{"time":"2025-07-11T10:27:15.088761686+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 28 |
+
{"time":"2025-07-11T10:28:01.495712857+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 29 |
+
{"time":"2025-07-11T10:28:25.090345213+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:54163->127.0.0.53:53: i/o timeout"}
|
| 30 |
+
{"time":"2025-07-11T10:29:20.683049743+08:00","level":"WARN","msg":"sender: taking a long time","seconds":600.000348051,"work":"WorkRecord(*service_go_proto.Request_StopStatus); Control(local:true mailbox_slot:\"ykmb1zbab3au\" connection_id:\"127.0.0.1:56358\")"}
|
| 31 |
+
{"time":"2025-07-11T10:29:27.082442683+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.00003073,"work":"WorkRecord(*service_go_proto.Request_PartialHistory); Control(local:true connection_id:\"127.0.0.1:56358\")"}
|
| 32 |
+
{"time":"2025-07-11T10:29:30.113553628+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000175569,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 33 |
+
{"time":"2025-07-11T10:29:30.113706301+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000345821,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 34 |
+
{"time":"2025-07-11T10:29:30.115848756+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000949262,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 35 |
+
{"time":"2025-07-11T10:29:30.119005923+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.00050725,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 36 |
+
{"time":"2025-07-11T10:29:30.134183378+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.00003329,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 37 |
+
{"time":"2025-07-11T10:29:31.496836166+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 38 |
+
{"time":"2025-07-11T10:29:34.136294928+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:47212->127.0.0.53:53: i/o timeout"}
|
| 39 |
+
{"time":"2025-07-11T10:30:39.503948407+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 40 |
+
{"time":"2025-07-11T10:31:01.498883211+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 41 |
+
{"time":"2025-07-11T10:31:47.151814458+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 42 |
+
{"time":"2025-07-11T10:32:31.500519508+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 43 |
+
{"time":"2025-07-11T10:32:53.295630902+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 44 |
+
{"time":"2025-07-11T10:34:01.502180799+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 45 |
+
{"time":"2025-07-11T10:34:03.224735373+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:35463->127.0.0.53:53: i/o timeout"}
|
| 46 |
+
{"time":"2025-07-11T10:35:13.048211757+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:37851->127.0.0.53:53: i/o timeout"}
|
| 47 |
+
{"time":"2025-07-11T10:35:31.50265147+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 48 |
+
{"time":"2025-07-11T10:36:18.703729665+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 49 |
+
{"time":"2025-07-11T10:36:37.135832426+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 50 |
+
{"time":"2025-07-11T10:37:26.776550454+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:48638->127.0.0.53:53: i/o timeout"}
|
| 51 |
+
{"time":"2025-07-11T10:37:43.279470452+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: server misbehaving"}
|
| 52 |
+
{"time":"2025-07-11T10:38:43.586319317+08:00","level":"INFO","msg":"sender: succeeded after taking longer than expected","seconds":1162.90365191,"work":"WorkRecord(*service_go_proto.Request_StopStatus); Control(local:true mailbox_slot:\"ykmb1zbab3au\" connection_id:\"127.0.0.1:56358\")"}
|
| 53 |
+
{"time":"2025-07-11T10:38:43.586581237+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1156.50415708,"work":"WorkRecord(*service_go_proto.Request_PartialHistory); Control(local:true connection_id:\"127.0.0.1:56358\")"}
|
| 54 |
+
{"time":"2025-07-11T10:38:43.586718446+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1153.473421569,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 55 |
+
{"time":"2025-07-11T10:38:43.586797174+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1153.471875371,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 56 |
+
{"time":"2025-07-11T10:38:43.586784719+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1153.473381097,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 57 |
+
{"time":"2025-07-11T10:38:43.586960887+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1153.468301722,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 58 |
+
{"time":"2025-07-11T10:38:43.587004666+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":1153.452813238,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 59 |
+
{"time":"2025-07-11T10:39:43.592291332+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 60 |
+
{"time":"2025-07-11T10:40:15.763452594+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 61 |
+
{"time":"2025-07-11T10:40:49.844091828+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 62 |
+
{"time":"2025-07-11T10:41:29.285662282+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 63 |
+
{"time":"2025-07-11T10:41:34.255134979+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp 35.186.228.49:443: connect: connection timed out"}
|
| 64 |
+
{"time":"2025-07-11T10:42:16.052045567+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 65 |
+
{"time":"2025-07-11T10:43:22.42672178+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 66 |
+
{"time":"2025-07-11T10:44:52.428073385+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 67 |
+
{"time":"2025-07-11T10:46:22.429729908+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 68 |
+
{"time":"2025-07-11T10:47:52.431455398+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded"}
|
| 69 |
+
{"time":"2025-07-11T10:49:13.591546149+08:00","level":"WARN","msg":"sender: taking a long time","seconds":600.000310986,"work":"WorkRecord(*service_go_proto.Request_StopStatus); Control(local:true mailbox_slot:\"39u0euc3gdqo\" connection_id:\"127.0.0.1:56358\")"}
|
| 70 |
+
{"time":"2025-07-11T10:49:19.991448173+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000419627,"work":"WorkRecord(*service_go_proto.Request_PartialHistory); Control(local:true connection_id:\"127.0.0.1:56358\")"}
|
| 71 |
+
{"time":"2025-07-11T10:49:20.113476668+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000669704,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 72 |
+
{"time":"2025-07-11T10:49:20.113666718+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000556595,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 73 |
+
{"time":"2025-07-11T10:49:20.115777548+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000262324,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 74 |
+
{"time":"2025-07-11T10:49:20.119884323+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.000767915,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 75 |
+
{"time":"2025-07-11T10:49:20.134071176+08:00","level":"WARN","msg":"runwork: taking a long time","seconds":600.001047034,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 76 |
+
{"time":"2025-07-11T10:49:22.432201148+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 77 |
+
{"time":"2025-07-11T10:50:52.434457185+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 78 |
+
{"time":"2025-07-11T10:52:22.436283572+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}
|
| 79 |
+
{"time":"2025-07-11T10:53:52.43745377+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}
|
| 80 |
+
{"time":"2025-07-11T10:54:52.979170008+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": read tcp 192.168.11.41:32904->35.186.228.49:443: read: connection timed out"}
|
| 81 |
+
{"time":"2025-07-11T10:55:53.226144695+08:00","level":"INFO","msg":"sender: succeeded after taking longer than expected","seconds":999.634939408,"work":"WorkRecord(*service_go_proto.Request_StopStatus); Control(local:true mailbox_slot:\"39u0euc3gdqo\" connection_id:\"127.0.0.1:56358\")"}
|
| 82 |
+
{"time":"2025-07-11T10:55:53.226264085+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.235293513,"work":"WorkRecord(*service_go_proto.Request_PartialHistory); Control(local:true connection_id:\"127.0.0.1:56358\")"}
|
| 83 |
+
{"time":"2025-07-11T10:55:53.226387716+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.113322681,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 84 |
+
{"time":"2025-07-11T10:55:53.226431815+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.110883112,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 85 |
+
{"time":"2025-07-11T10:55:53.226471261+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.113644133,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 86 |
+
{"time":"2025-07-11T10:55:53.226468934+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.107317404,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 87 |
+
{"time":"2025-07-11T10:55:53.226674708+08:00","level":"INFO","msg":"runwork: succeeded after taking longer than expected","seconds":993.093604107,"work":"WorkRecord(*service_go_proto.Record_Stats); Control(always_send:true)"}
|
| 88 |
+
{"time":"2025-07-11T19:27:48.228136062+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/zfajcmpj/file_stream\": dial tcp: lookup api.wandb.ai on 127.0.0.53:53: read udp 127.0.0.1:58754->127.0.0.53:53: i/o timeout"}
|
| 89 |
+
{"time":"2025-07-11T20:11:16.628499237+08:00","level":"INFO","msg":"stream: closing","id":"zfajcmpj"}
|
| 90 |
+
{"time":"2025-07-11T20:11:16.628550137+08:00","level":"INFO","msg":"Stopping system monitor"}
|
| 91 |
+
{"time":"2025-07-11T20:11:16.62954204+08:00","level":"INFO","msg":"Stopped system monitor"}
|
| 92 |
+
{"time":"2025-07-11T20:11:17.552805733+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
|
| 93 |
+
{"time":"2025-07-11T20:11:17.843551844+08:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"zfajcmpj"}}
|
| 94 |
+
{"time":"2025-07-11T20:11:17.843631936+08:00","level":"INFO","msg":"sender: closed","stream_id":"zfajcmpj"}
|
| 95 |
+
{"time":"2025-07-11T20:11:17.843630871+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"zfajcmpj"}}
|
| 96 |
+
{"time":"2025-07-11T20:11:17.843761743+08:00","level":"INFO","msg":"stream: closed","id":"zfajcmpj"}
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug.log
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Current SDK version is 0.18.6
|
| 2 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Configure stats pid to 3404262
|
| 3 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/.config/wandb/settings
|
| 4 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/workspace/LLM-BC/wandb/settings
|
| 5 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
|
| 6 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': 'online', '_disable_service': None}
|
| 7 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'llmbc/workspace/train_mlp_projector_workspace.py', 'program_abspath': '/home/chyang/workspace/LLM-BC/llmbc/workspace/train_mlp_projector_workspace.py', 'program': '/home/chyang/workspace/LLM-BC/./llmbc/workspace/train_mlp_projector_workspace.py'}
|
| 8 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_setup.py:_flush():79] Applying login settings: {}
|
| 9 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:_log_setup():533] Logging user logs to /home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug.log
|
| 10 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:_log_setup():534] Logging internal logs to /home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/logs/debug-internal.log
|
| 11 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:init():619] calling init triggers
|
| 12 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:init():626] wandb.init called with sweep_config: {}
|
| 13 |
+
config: {'name': 'train_mlp_projector', '_target_': 'llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace', 'obs_dim': 9, 'action_dim': 4, 'task_name': 'metaworld', 'exp_name': 'default', 'model_name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1, 'n_latency_steps': 0, 'past_action_visible': False, 'llm_translator': {'_target_': 'llmbc.translator.llm_translator.LLMTranslator', 'cfg': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'use_quantization': False, 'load_from_checkpoint': False, 'adaptor_path': 'data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744', 'use_orig_model': False, 'use_joint_mlp_projector': True, 'load_from_mlp_projector_checkpoint': False, 'mlp_projector_checkpoint_path': 'data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744/mlp_projector.bin', 'use_linear_normalizer': False, 'normalizer_checkpoint_path': '', 'max_length': 100, 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2025.07.11/03.58.46_HuggingFaceTB/SmolLM2-135M-Instruct'}}}, 'obs_dim': 9, 'action_dim': 4, 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1}, 'dataloader': {'batch_size': 8, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'val_dataloader': {'batch_size': 8, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'optimizer': {'_target_': 'torch.optim.Adam', 'lr': 0.0001, 'betas': [0.95, 0.999], 'eps': 1e-08, 'weight_decay': 1e-06}, 'training': {'device': 'cuda', 'seed': 42, 'debug': False, 'resume': True, 'lr_scheduler': 'cosine', 'lr_warmup_steps': 10, 'num_epochs': 1000, 'gradient_accumulate_every': 32, 'grad_norm_clip': 1, 'enable_normalizer': True, 'checkpoint_every': 1, 'val_every': 1, 'sample_every': 1, 'sample_max_batch': 128, 'max_train_steps': None, 'max_val_steps': None, 'tqdm_interval_sec': 1.0}, 'logging': {'project': 'llm_module_training', 'resume': True, 'mode': 'online', 'name': '2025.07.11-03.58.48_train_mlp_projector_metaworld', 'tags': ['train_mlp_projector', 'metaworld', 'default'], 'id': None, 'group': None}, 'checkpoint': {'topk': {'monitor_key': 'val_loss', 'mode': 'min', 'k': 5, 'format_str': 'epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt'}, 'save_last_ckpt': True, 'save_last_snapshot': False}, 'multi_run': {'run_dir': 'data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld', 'wandb_name_base': '2025.07.11-03.58.48_train_mlp_projector_metaworld'}, 'task': {'name': 'metaworld', 'obs_dim': 9, 'action_dim': 4, 'dataset': {'_target_': 'llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset', 'data_path': 'datasets/metaworld-general-split0.02.pt', 'data_path2': 'datasets/metaworld-split0.02.pt', 'horizon': 1, 'pad_before': 0, 'pad_after': 0, 'obs_eef_target': True, 'use_manual_normalizer': False, 'val_ratio': 0.05, 'dummy_normalizer': False}}, 'llm': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'use_quantization': False, 'load_from_checkpoint': False, 'adaptor_path': 'data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744', 'use_orig_model': False, 'use_joint_mlp_projector': True, 'load_from_mlp_projector_checkpoint': False, 'mlp_projector_checkpoint_path': 'data/outputs/2025.07.05/10.04.26_train_llm_lowdim_PegInsertionSide-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-PegInsertionSide-v1/checkpoint-9744/mlp_projector.bin', 'use_linear_normalizer': False, 'normalizer_checkpoint_path': '', 'max_length': 100, 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2025.07.11/03.58.46_HuggingFaceTB/SmolLM2-135M-Instruct'}}}}
|
| 14 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:init():669] starting backend
|
| 15 |
+
2025-07-11 03:58:49,403 INFO MainThread:3404262 [wandb_init.py:init():673] sending inform_init request
|
| 16 |
+
2025-07-11 03:58:49,404 INFO MainThread:3404262 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
|
| 17 |
+
2025-07-11 03:58:49,404 INFO MainThread:3404262 [wandb_init.py:init():686] backend started and connected
|
| 18 |
+
2025-07-11 03:58:49,409 INFO MainThread:3404262 [wandb_init.py:init():781] updated telemetry
|
| 19 |
+
2025-07-11 03:58:49,436 INFO MainThread:3404262 [wandb_init.py:init():814] communicating run to backend with 90.0 second timeout
|
| 20 |
+
2025-07-11 03:58:50,108 INFO MainThread:3404262 [wandb_init.py:init():867] starting run threads in backend
|
| 21 |
+
2025-07-11 03:58:50,205 INFO MainThread:3404262 [wandb_run.py:_console_start():2451] atexit reg
|
| 22 |
+
2025-07-11 03:58:50,205 INFO MainThread:3404262 [wandb_run.py:_redirect():2299] redirect: wrap_raw
|
| 23 |
+
2025-07-11 03:58:50,205 INFO MainThread:3404262 [wandb_run.py:_redirect():2364] Wrapping output streams.
|
| 24 |
+
2025-07-11 03:58:50,206 INFO MainThread:3404262 [wandb_run.py:_redirect():2389] Redirects installed.
|
| 25 |
+
2025-07-11 03:58:50,207 INFO MainThread:3404262 [wandb_init.py:init():911] run started, returning control to user process
|
| 26 |
+
2025-07-11 03:58:50,207 INFO MainThread:3404262 [wandb_run.py:_config_callback():1389] config_cb None None {'output_dir': '/home/chyang/workspace/LLM-BC/data/outputs/2025.07.11/03.58.46_train_mlp_projector_metaworld'}
|
| 27 |
+
2025-07-11 20:11:16,628 WARNING MsgRouterThr:3404262 [router.py:message_loop():75] message_loop has been closed
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/run-20250711_035849-zfajcmpj/run-zfajcmpj.wandb
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30a9a209cef39ed4cb49ce6577b50ce8a8e8e7291e09d5e11947465c6ca5c719
|
| 3 |
+
size 254644341
|
2025.07.11/03.58.46_train_mlp_projector_metaworld/wandb/wandb-resume.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"run_id": "zfajcmpj"}
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/.hydra/config.yaml
ADDED
|
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: train_llm_lowdim
|
| 2 |
+
_target_: llmbc.workspace.train_llm_workspace.TrainLLMWorkspace
|
| 3 |
+
obs_dim: ${task.obs_dim}
|
| 4 |
+
action_dim: ${task.action_dim}
|
| 5 |
+
horizon: 1
|
| 6 |
+
n_obs_steps: 1
|
| 7 |
+
n_action_steps: 1
|
| 8 |
+
task_name: ${task.name}
|
| 9 |
+
exp_name: train llm
|
| 10 |
+
model_name: ${llm.name}
|
| 11 |
+
use_quantization: ${llm.use_quantization}
|
| 12 |
+
lora_config: ${llm.lora_config}
|
| 13 |
+
dataset:
|
| 14 |
+
test_data_ratio: 0.01
|
| 15 |
+
debug: false
|
| 16 |
+
training:
|
| 17 |
+
seed: 42
|
| 18 |
+
per_device_train_batch_size: 1
|
| 19 |
+
per_device_eval_batch_size: 1
|
| 20 |
+
gradient_accumulation_steps: 32
|
| 21 |
+
optim: paged_adamw_32bit
|
| 22 |
+
num_train_epochs: 3
|
| 23 |
+
eval_strategy: steps
|
| 24 |
+
logging_steps: 1
|
| 25 |
+
warmup_steps: 10
|
| 26 |
+
logging_strategy: steps
|
| 27 |
+
learning_rate: 5.0e-05
|
| 28 |
+
fp16: false
|
| 29 |
+
bf16: true
|
| 30 |
+
tf32: true
|
| 31 |
+
group_by_length: true
|
| 32 |
+
report_to: wandb
|
| 33 |
+
save_steps: 5000
|
| 34 |
+
eval_steps: 10
|
| 35 |
+
use_joint_mlp_projector: ${llm.use_joint_mlp_projector}
|
| 36 |
+
joint_obs_action_mlp_lr: 1.0e-06
|
| 37 |
+
trainer:
|
| 38 |
+
obs_dim: ${obs_dim}
|
| 39 |
+
action_dim: ${action_dim}
|
| 40 |
+
use_joint_mlp_projector: ${llm.use_joint_mlp_projector}
|
| 41 |
+
max_seq_length: ${llm.max_length}
|
| 42 |
+
dataset_text_field: text
|
| 43 |
+
packing: false
|
| 44 |
+
logging:
|
| 45 |
+
project: llm_module_finetuning
|
| 46 |
+
resume: true
|
| 47 |
+
mode: online
|
| 48 |
+
name: ${now:%Y.%m.%d-%H.%M.%S}_${name}_${task_name}
|
| 49 |
+
tags:
|
| 50 |
+
- ${name}
|
| 51 |
+
- ${task_name}
|
| 52 |
+
- ${exp_name}
|
| 53 |
+
id: null
|
| 54 |
+
group: null
|
| 55 |
+
multi_run:
|
| 56 |
+
run_dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
|
| 57 |
+
wandb_name_base: ${now:%Y.%m.%d-%H.%M.%S}_${name}_${task_name}
|
| 58 |
+
task:
|
| 59 |
+
name: box-close-v2
|
| 60 |
+
obs_dim: 9
|
| 61 |
+
action_dim: 4
|
| 62 |
+
env_runner:
|
| 63 |
+
_target_: llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner
|
| 64 |
+
env_name: llf-metaworld-box-close-v2
|
| 65 |
+
max_steps: 30
|
| 66 |
+
n_obs_steps: ${n_obs_steps}
|
| 67 |
+
n_action_steps: ${n_action_steps}
|
| 68 |
+
instruction_type: b
|
| 69 |
+
feedback_type:
|
| 70 |
+
- hp
|
| 71 |
+
- hn
|
| 72 |
+
- fp
|
| 73 |
+
visual: false
|
| 74 |
+
dataset:
|
| 75 |
+
_target_: llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset
|
| 76 |
+
data_path: datasets/box-close-v2-general-shuf.pt
|
| 77 |
+
data_path2: datasets/box-close-v2-shuf.pt
|
| 78 |
+
horizon: ${horizon}
|
| 79 |
+
pad_before: ${eval:'${n_obs_steps}-1'}
|
| 80 |
+
pad_after: ${eval:'${n_action_steps}-1'}
|
| 81 |
+
obs_eef_target: true
|
| 82 |
+
use_manual_normalizer: false
|
| 83 |
+
val_ratio: 0.05
|
| 84 |
+
dummy_normalizer: false
|
| 85 |
+
instructor:
|
| 86 |
+
_target_: llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor
|
| 87 |
+
llm:
|
| 88 |
+
name: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 89 |
+
model_name: SmolLM2-135M-Instruct
|
| 90 |
+
use_raw_llm: false
|
| 91 |
+
use_quantization: false
|
| 92 |
+
load_from_checkpoint: false
|
| 93 |
+
adaptor_path: data/outputs/2025.06.02/17.59.17_train_llm_lowdim_sweep-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-sweep-v2/checkpoint-4572
|
| 94 |
+
use_orig_model: false
|
| 95 |
+
use_joint_mlp_projector: true
|
| 96 |
+
load_from_mlp_projector_checkpoint: true
|
| 97 |
+
mlp_projector_checkpoint_path: data/outputs/2025.08.11/10.45.37_train_mlp_projector_metaworld/checkpoints/latest.ckpt
|
| 98 |
+
use_linear_normalizer: false
|
| 99 |
+
normalizer_checkpoint_path: ''
|
| 100 |
+
max_length: 100
|
| 101 |
+
config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
|
| 102 |
+
causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
|
| 103 |
+
lora_config:
|
| 104 |
+
r: 32
|
| 105 |
+
lora_alpha: 64
|
| 106 |
+
lora_dropout: 0.05
|
| 107 |
+
bias: none
|
| 108 |
+
task_type: CAUSAL_LM
|
| 109 |
+
prompter:
|
| 110 |
+
_target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
|
| 111 |
+
use_joint_mlp_projector: true
|
| 112 |
+
hydra:
|
| 113 |
+
job:
|
| 114 |
+
override_dirname: ${model_name}
|
| 115 |
+
run:
|
| 116 |
+
dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${model_name}
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/.hydra/hydra.yaml
ADDED
|
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
hydra:
|
| 2 |
+
run:
|
| 3 |
+
dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
|
| 4 |
+
sweep:
|
| 5 |
+
dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
|
| 6 |
+
subdir: ${hydra.job.num}
|
| 7 |
+
launcher:
|
| 8 |
+
_target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
|
| 9 |
+
sweeper:
|
| 10 |
+
_target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
|
| 11 |
+
max_batch_size: null
|
| 12 |
+
params: null
|
| 13 |
+
help:
|
| 14 |
+
app_name: ${hydra.job.name}
|
| 15 |
+
header: '${hydra.help.app_name} is powered by Hydra.
|
| 16 |
+
|
| 17 |
+
'
|
| 18 |
+
footer: 'Powered by Hydra (https://hydra.cc)
|
| 19 |
+
|
| 20 |
+
Use --hydra-help to view Hydra specific help
|
| 21 |
+
|
| 22 |
+
'
|
| 23 |
+
template: '${hydra.help.header}
|
| 24 |
+
|
| 25 |
+
== Configuration groups ==
|
| 26 |
+
|
| 27 |
+
Compose your configuration from those groups (group=option)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
$APP_CONFIG_GROUPS
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
== Config ==
|
| 34 |
+
|
| 35 |
+
Override anything in the config (foo.bar=value)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
$CONFIG
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
${hydra.help.footer}
|
| 42 |
+
|
| 43 |
+
'
|
| 44 |
+
hydra_help:
|
| 45 |
+
template: 'Hydra (${hydra.runtime.version})
|
| 46 |
+
|
| 47 |
+
See https://hydra.cc for more info.
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
== Flags ==
|
| 51 |
+
|
| 52 |
+
$FLAGS_HELP
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
== Configuration groups ==
|
| 56 |
+
|
| 57 |
+
Compose your configuration from those groups (For example, append hydra/job_logging=disabled
|
| 58 |
+
to command line)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
$HYDRA_CONFIG_GROUPS
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
Use ''--cfg hydra'' to Show the Hydra config.
|
| 65 |
+
|
| 66 |
+
'
|
| 67 |
+
hydra_help: ???
|
| 68 |
+
hydra_logging:
|
| 69 |
+
version: 1
|
| 70 |
+
formatters:
|
| 71 |
+
simple:
|
| 72 |
+
format: '[%(asctime)s][HYDRA] %(message)s'
|
| 73 |
+
handlers:
|
| 74 |
+
console:
|
| 75 |
+
class: logging.StreamHandler
|
| 76 |
+
formatter: simple
|
| 77 |
+
stream: ext://sys.stdout
|
| 78 |
+
root:
|
| 79 |
+
level: INFO
|
| 80 |
+
handlers:
|
| 81 |
+
- console
|
| 82 |
+
loggers:
|
| 83 |
+
logging_example:
|
| 84 |
+
level: DEBUG
|
| 85 |
+
disable_existing_loggers: false
|
| 86 |
+
job_logging:
|
| 87 |
+
version: 1
|
| 88 |
+
formatters:
|
| 89 |
+
simple:
|
| 90 |
+
format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
|
| 91 |
+
handlers:
|
| 92 |
+
console:
|
| 93 |
+
class: logging.StreamHandler
|
| 94 |
+
formatter: simple
|
| 95 |
+
stream: ext://sys.stdout
|
| 96 |
+
file:
|
| 97 |
+
class: logging.FileHandler
|
| 98 |
+
formatter: simple
|
| 99 |
+
filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
|
| 100 |
+
root:
|
| 101 |
+
level: INFO
|
| 102 |
+
handlers:
|
| 103 |
+
- console
|
| 104 |
+
- file
|
| 105 |
+
disable_existing_loggers: false
|
| 106 |
+
env: {}
|
| 107 |
+
mode: RUN
|
| 108 |
+
searchpath: []
|
| 109 |
+
callbacks: {}
|
| 110 |
+
output_subdir: .hydra
|
| 111 |
+
overrides:
|
| 112 |
+
hydra:
|
| 113 |
+
- hydra.mode=RUN
|
| 114 |
+
task: []
|
| 115 |
+
job:
|
| 116 |
+
name: train_llm_workspace
|
| 117 |
+
chdir: null
|
| 118 |
+
override_dirname: ''
|
| 119 |
+
id: ???
|
| 120 |
+
num: ???
|
| 121 |
+
config_name: train_llm_workspace
|
| 122 |
+
env_set: {}
|
| 123 |
+
env_copy: []
|
| 124 |
+
config:
|
| 125 |
+
override_dirname:
|
| 126 |
+
kv_sep: '='
|
| 127 |
+
item_sep: ','
|
| 128 |
+
exclude_keys: []
|
| 129 |
+
runtime:
|
| 130 |
+
version: 1.2.0
|
| 131 |
+
version_base: '1.2'
|
| 132 |
+
cwd: /home/chyang/workspace/LLM-BC
|
| 133 |
+
config_sources:
|
| 134 |
+
- path: hydra.conf
|
| 135 |
+
schema: pkg
|
| 136 |
+
provider: hydra
|
| 137 |
+
- path: /home/chyang/workspace/LLM-BC/llmbc/config
|
| 138 |
+
schema: file
|
| 139 |
+
provider: main
|
| 140 |
+
- path: ''
|
| 141 |
+
schema: structured
|
| 142 |
+
provider: schema
|
| 143 |
+
output_dir: /home/chyang/workspace/LLM-BC/data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2
|
| 144 |
+
choices:
|
| 145 |
+
llm: smollm2-135m-instruct
|
| 146 |
+
task: box-close-v2
|
| 147 |
+
hydra/env: default
|
| 148 |
+
hydra/callbacks: null
|
| 149 |
+
hydra/job_logging: default
|
| 150 |
+
hydra/hydra_logging: default
|
| 151 |
+
hydra/hydra_help: default
|
| 152 |
+
hydra/help: default
|
| 153 |
+
hydra/sweeper: basic
|
| 154 |
+
hydra/launcher: basic
|
| 155 |
+
hydra/output: default
|
| 156 |
+
verbose: false
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/.hydra/overrides.yaml
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
[]
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/README.md
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 3 |
+
library_name: peft
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# Model Card for Model ID
|
| 7 |
+
|
| 8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## Model Details
|
| 13 |
+
|
| 14 |
+
### Model Description
|
| 15 |
+
|
| 16 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
- **Developed by:** [More Information Needed]
|
| 21 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 22 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 23 |
+
- **Model type:** [More Information Needed]
|
| 24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 25 |
+
- **License:** [More Information Needed]
|
| 26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 27 |
+
|
| 28 |
+
### Model Sources [optional]
|
| 29 |
+
|
| 30 |
+
<!-- Provide the basic links for the model. -->
|
| 31 |
+
|
| 32 |
+
- **Repository:** [More Information Needed]
|
| 33 |
+
- **Paper [optional]:** [More Information Needed]
|
| 34 |
+
- **Demo [optional]:** [More Information Needed]
|
| 35 |
+
|
| 36 |
+
## Uses
|
| 37 |
+
|
| 38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 39 |
+
|
| 40 |
+
### Direct Use
|
| 41 |
+
|
| 42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 43 |
+
|
| 44 |
+
[More Information Needed]
|
| 45 |
+
|
| 46 |
+
### Downstream Use [optional]
|
| 47 |
+
|
| 48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 49 |
+
|
| 50 |
+
[More Information Needed]
|
| 51 |
+
|
| 52 |
+
### Out-of-Scope Use
|
| 53 |
+
|
| 54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 55 |
+
|
| 56 |
+
[More Information Needed]
|
| 57 |
+
|
| 58 |
+
## Bias, Risks, and Limitations
|
| 59 |
+
|
| 60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 61 |
+
|
| 62 |
+
[More Information Needed]
|
| 63 |
+
|
| 64 |
+
### Recommendations
|
| 65 |
+
|
| 66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 67 |
+
|
| 68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 69 |
+
|
| 70 |
+
## How to Get Started with the Model
|
| 71 |
+
|
| 72 |
+
Use the code below to get started with the model.
|
| 73 |
+
|
| 74 |
+
[More Information Needed]
|
| 75 |
+
|
| 76 |
+
## Training Details
|
| 77 |
+
|
| 78 |
+
### Training Data
|
| 79 |
+
|
| 80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 81 |
+
|
| 82 |
+
[More Information Needed]
|
| 83 |
+
|
| 84 |
+
### Training Procedure
|
| 85 |
+
|
| 86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 87 |
+
|
| 88 |
+
#### Preprocessing [optional]
|
| 89 |
+
|
| 90 |
+
[More Information Needed]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
#### Training Hyperparameters
|
| 94 |
+
|
| 95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 96 |
+
|
| 97 |
+
#### Speeds, Sizes, Times [optional]
|
| 98 |
+
|
| 99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 100 |
+
|
| 101 |
+
[More Information Needed]
|
| 102 |
+
|
| 103 |
+
## Evaluation
|
| 104 |
+
|
| 105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 106 |
+
|
| 107 |
+
### Testing Data, Factors & Metrics
|
| 108 |
+
|
| 109 |
+
#### Testing Data
|
| 110 |
+
|
| 111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 112 |
+
|
| 113 |
+
[More Information Needed]
|
| 114 |
+
|
| 115 |
+
#### Factors
|
| 116 |
+
|
| 117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 118 |
+
|
| 119 |
+
[More Information Needed]
|
| 120 |
+
|
| 121 |
+
#### Metrics
|
| 122 |
+
|
| 123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 124 |
+
|
| 125 |
+
[More Information Needed]
|
| 126 |
+
|
| 127 |
+
### Results
|
| 128 |
+
|
| 129 |
+
[More Information Needed]
|
| 130 |
+
|
| 131 |
+
#### Summary
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
## Model Examination [optional]
|
| 136 |
+
|
| 137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 138 |
+
|
| 139 |
+
[More Information Needed]
|
| 140 |
+
|
| 141 |
+
## Environmental Impact
|
| 142 |
+
|
| 143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 144 |
+
|
| 145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 146 |
+
|
| 147 |
+
- **Hardware Type:** [More Information Needed]
|
| 148 |
+
- **Hours used:** [More Information Needed]
|
| 149 |
+
- **Cloud Provider:** [More Information Needed]
|
| 150 |
+
- **Compute Region:** [More Information Needed]
|
| 151 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 152 |
+
|
| 153 |
+
## Technical Specifications [optional]
|
| 154 |
+
|
| 155 |
+
### Model Architecture and Objective
|
| 156 |
+
|
| 157 |
+
[More Information Needed]
|
| 158 |
+
|
| 159 |
+
### Compute Infrastructure
|
| 160 |
+
|
| 161 |
+
[More Information Needed]
|
| 162 |
+
|
| 163 |
+
#### Hardware
|
| 164 |
+
|
| 165 |
+
[More Information Needed]
|
| 166 |
+
|
| 167 |
+
#### Software
|
| 168 |
+
|
| 169 |
+
[More Information Needed]
|
| 170 |
+
|
| 171 |
+
## Citation [optional]
|
| 172 |
+
|
| 173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 174 |
+
|
| 175 |
+
**BibTeX:**
|
| 176 |
+
|
| 177 |
+
[More Information Needed]
|
| 178 |
+
|
| 179 |
+
**APA:**
|
| 180 |
+
|
| 181 |
+
[More Information Needed]
|
| 182 |
+
|
| 183 |
+
## Glossary [optional]
|
| 184 |
+
|
| 185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 186 |
+
|
| 187 |
+
[More Information Needed]
|
| 188 |
+
|
| 189 |
+
## More Information [optional]
|
| 190 |
+
|
| 191 |
+
[More Information Needed]
|
| 192 |
+
|
| 193 |
+
## Model Card Authors [optional]
|
| 194 |
+
|
| 195 |
+
[More Information Needed]
|
| 196 |
+
|
| 197 |
+
## Model Card Contact
|
| 198 |
+
|
| 199 |
+
[More Information Needed]
|
| 200 |
+
### Framework versions
|
| 201 |
+
|
| 202 |
+
- PEFT 0.14.0
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/adapter_config.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "HuggingFaceTB/SmolLM2-135M-Instruct",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"eva_config": null,
|
| 7 |
+
"exclude_modules": null,
|
| 8 |
+
"fan_in_fan_out": false,
|
| 9 |
+
"inference_mode": true,
|
| 10 |
+
"init_lora_weights": true,
|
| 11 |
+
"layer_replication": null,
|
| 12 |
+
"layers_pattern": null,
|
| 13 |
+
"layers_to_transform": null,
|
| 14 |
+
"loftq_config": {},
|
| 15 |
+
"lora_alpha": 64,
|
| 16 |
+
"lora_bias": false,
|
| 17 |
+
"lora_dropout": 0.05,
|
| 18 |
+
"megatron_config": null,
|
| 19 |
+
"megatron_core": "megatron.core",
|
| 20 |
+
"modules_to_save": null,
|
| 21 |
+
"peft_type": "LORA",
|
| 22 |
+
"r": 32,
|
| 23 |
+
"rank_pattern": {},
|
| 24 |
+
"revision": null,
|
| 25 |
+
"target_modules": [
|
| 26 |
+
"o_proj",
|
| 27 |
+
"v_proj",
|
| 28 |
+
"q_proj",
|
| 29 |
+
"k_proj",
|
| 30 |
+
"up_proj",
|
| 31 |
+
"down_proj",
|
| 32 |
+
"gate_proj"
|
| 33 |
+
],
|
| 34 |
+
"task_type": "CAUSAL_LM",
|
| 35 |
+
"use_dora": false,
|
| 36 |
+
"use_rslora": false
|
| 37 |
+
}
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98cd6d3faca143d2accf8d665333d487de6b5935f500192453df73ca9ac9edbe
|
| 3 |
+
size 39131224
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/README.md
ADDED
|
@@ -0,0 +1,202 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
base_model: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 3 |
+
library_name: peft
|
| 4 |
+
---
|
| 5 |
+
|
| 6 |
+
# Model Card for Model ID
|
| 7 |
+
|
| 8 |
+
<!-- Provide a quick summary of what the model is/does. -->
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
## Model Details
|
| 13 |
+
|
| 14 |
+
### Model Description
|
| 15 |
+
|
| 16 |
+
<!-- Provide a longer summary of what this model is. -->
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
- **Developed by:** [More Information Needed]
|
| 21 |
+
- **Funded by [optional]:** [More Information Needed]
|
| 22 |
+
- **Shared by [optional]:** [More Information Needed]
|
| 23 |
+
- **Model type:** [More Information Needed]
|
| 24 |
+
- **Language(s) (NLP):** [More Information Needed]
|
| 25 |
+
- **License:** [More Information Needed]
|
| 26 |
+
- **Finetuned from model [optional]:** [More Information Needed]
|
| 27 |
+
|
| 28 |
+
### Model Sources [optional]
|
| 29 |
+
|
| 30 |
+
<!-- Provide the basic links for the model. -->
|
| 31 |
+
|
| 32 |
+
- **Repository:** [More Information Needed]
|
| 33 |
+
- **Paper [optional]:** [More Information Needed]
|
| 34 |
+
- **Demo [optional]:** [More Information Needed]
|
| 35 |
+
|
| 36 |
+
## Uses
|
| 37 |
+
|
| 38 |
+
<!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
|
| 39 |
+
|
| 40 |
+
### Direct Use
|
| 41 |
+
|
| 42 |
+
<!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
|
| 43 |
+
|
| 44 |
+
[More Information Needed]
|
| 45 |
+
|
| 46 |
+
### Downstream Use [optional]
|
| 47 |
+
|
| 48 |
+
<!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
|
| 49 |
+
|
| 50 |
+
[More Information Needed]
|
| 51 |
+
|
| 52 |
+
### Out-of-Scope Use
|
| 53 |
+
|
| 54 |
+
<!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
|
| 55 |
+
|
| 56 |
+
[More Information Needed]
|
| 57 |
+
|
| 58 |
+
## Bias, Risks, and Limitations
|
| 59 |
+
|
| 60 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
| 61 |
+
|
| 62 |
+
[More Information Needed]
|
| 63 |
+
|
| 64 |
+
### Recommendations
|
| 65 |
+
|
| 66 |
+
<!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
|
| 67 |
+
|
| 68 |
+
Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
|
| 69 |
+
|
| 70 |
+
## How to Get Started with the Model
|
| 71 |
+
|
| 72 |
+
Use the code below to get started with the model.
|
| 73 |
+
|
| 74 |
+
[More Information Needed]
|
| 75 |
+
|
| 76 |
+
## Training Details
|
| 77 |
+
|
| 78 |
+
### Training Data
|
| 79 |
+
|
| 80 |
+
<!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
|
| 81 |
+
|
| 82 |
+
[More Information Needed]
|
| 83 |
+
|
| 84 |
+
### Training Procedure
|
| 85 |
+
|
| 86 |
+
<!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
|
| 87 |
+
|
| 88 |
+
#### Preprocessing [optional]
|
| 89 |
+
|
| 90 |
+
[More Information Needed]
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
#### Training Hyperparameters
|
| 94 |
+
|
| 95 |
+
- **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
|
| 96 |
+
|
| 97 |
+
#### Speeds, Sizes, Times [optional]
|
| 98 |
+
|
| 99 |
+
<!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
|
| 100 |
+
|
| 101 |
+
[More Information Needed]
|
| 102 |
+
|
| 103 |
+
## Evaluation
|
| 104 |
+
|
| 105 |
+
<!-- This section describes the evaluation protocols and provides the results. -->
|
| 106 |
+
|
| 107 |
+
### Testing Data, Factors & Metrics
|
| 108 |
+
|
| 109 |
+
#### Testing Data
|
| 110 |
+
|
| 111 |
+
<!-- This should link to a Dataset Card if possible. -->
|
| 112 |
+
|
| 113 |
+
[More Information Needed]
|
| 114 |
+
|
| 115 |
+
#### Factors
|
| 116 |
+
|
| 117 |
+
<!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
|
| 118 |
+
|
| 119 |
+
[More Information Needed]
|
| 120 |
+
|
| 121 |
+
#### Metrics
|
| 122 |
+
|
| 123 |
+
<!-- These are the evaluation metrics being used, ideally with a description of why. -->
|
| 124 |
+
|
| 125 |
+
[More Information Needed]
|
| 126 |
+
|
| 127 |
+
### Results
|
| 128 |
+
|
| 129 |
+
[More Information Needed]
|
| 130 |
+
|
| 131 |
+
#### Summary
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
## Model Examination [optional]
|
| 136 |
+
|
| 137 |
+
<!-- Relevant interpretability work for the model goes here -->
|
| 138 |
+
|
| 139 |
+
[More Information Needed]
|
| 140 |
+
|
| 141 |
+
## Environmental Impact
|
| 142 |
+
|
| 143 |
+
<!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
|
| 144 |
+
|
| 145 |
+
Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
|
| 146 |
+
|
| 147 |
+
- **Hardware Type:** [More Information Needed]
|
| 148 |
+
- **Hours used:** [More Information Needed]
|
| 149 |
+
- **Cloud Provider:** [More Information Needed]
|
| 150 |
+
- **Compute Region:** [More Information Needed]
|
| 151 |
+
- **Carbon Emitted:** [More Information Needed]
|
| 152 |
+
|
| 153 |
+
## Technical Specifications [optional]
|
| 154 |
+
|
| 155 |
+
### Model Architecture and Objective
|
| 156 |
+
|
| 157 |
+
[More Information Needed]
|
| 158 |
+
|
| 159 |
+
### Compute Infrastructure
|
| 160 |
+
|
| 161 |
+
[More Information Needed]
|
| 162 |
+
|
| 163 |
+
#### Hardware
|
| 164 |
+
|
| 165 |
+
[More Information Needed]
|
| 166 |
+
|
| 167 |
+
#### Software
|
| 168 |
+
|
| 169 |
+
[More Information Needed]
|
| 170 |
+
|
| 171 |
+
## Citation [optional]
|
| 172 |
+
|
| 173 |
+
<!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
|
| 174 |
+
|
| 175 |
+
**BibTeX:**
|
| 176 |
+
|
| 177 |
+
[More Information Needed]
|
| 178 |
+
|
| 179 |
+
**APA:**
|
| 180 |
+
|
| 181 |
+
[More Information Needed]
|
| 182 |
+
|
| 183 |
+
## Glossary [optional]
|
| 184 |
+
|
| 185 |
+
<!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
|
| 186 |
+
|
| 187 |
+
[More Information Needed]
|
| 188 |
+
|
| 189 |
+
## More Information [optional]
|
| 190 |
+
|
| 191 |
+
[More Information Needed]
|
| 192 |
+
|
| 193 |
+
## Model Card Authors [optional]
|
| 194 |
+
|
| 195 |
+
[More Information Needed]
|
| 196 |
+
|
| 197 |
+
## Model Card Contact
|
| 198 |
+
|
| 199 |
+
[More Information Needed]
|
| 200 |
+
### Framework versions
|
| 201 |
+
|
| 202 |
+
- PEFT 0.14.0
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/adapter_config.json
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"alpha_pattern": {},
|
| 3 |
+
"auto_mapping": null,
|
| 4 |
+
"base_model_name_or_path": "HuggingFaceTB/SmolLM2-135M-Instruct",
|
| 5 |
+
"bias": "none",
|
| 6 |
+
"eva_config": null,
|
| 7 |
+
"exclude_modules": null,
|
| 8 |
+
"fan_in_fan_out": false,
|
| 9 |
+
"inference_mode": true,
|
| 10 |
+
"init_lora_weights": true,
|
| 11 |
+
"layer_replication": null,
|
| 12 |
+
"layers_pattern": null,
|
| 13 |
+
"layers_to_transform": null,
|
| 14 |
+
"loftq_config": {},
|
| 15 |
+
"lora_alpha": 64,
|
| 16 |
+
"lora_bias": false,
|
| 17 |
+
"lora_dropout": 0.05,
|
| 18 |
+
"megatron_config": null,
|
| 19 |
+
"megatron_core": "megatron.core",
|
| 20 |
+
"modules_to_save": null,
|
| 21 |
+
"peft_type": "LORA",
|
| 22 |
+
"r": 32,
|
| 23 |
+
"rank_pattern": {},
|
| 24 |
+
"revision": null,
|
| 25 |
+
"target_modules": [
|
| 26 |
+
"o_proj",
|
| 27 |
+
"v_proj",
|
| 28 |
+
"q_proj",
|
| 29 |
+
"k_proj",
|
| 30 |
+
"up_proj",
|
| 31 |
+
"down_proj",
|
| 32 |
+
"gate_proj"
|
| 33 |
+
],
|
| 34 |
+
"task_type": "CAUSAL_LM",
|
| 35 |
+
"use_dora": false,
|
| 36 |
+
"use_rslora": false
|
| 37 |
+
}
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/adapter_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:98cd6d3faca143d2accf8d665333d487de6b5935f500192453df73ca9ac9edbe
|
| 3 |
+
size 39131224
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/config.json
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_attn_implementation_autoset": true,
|
| 3 |
+
"_name_or_path": "HuggingFaceTB/SmolLM2-135M-Instruct",
|
| 4 |
+
"action_dim": 4,
|
| 5 |
+
"architectures": [
|
| 6 |
+
"LlamaForCausalLM"
|
| 7 |
+
],
|
| 8 |
+
"attention_bias": false,
|
| 9 |
+
"attention_dropout": 0.0,
|
| 10 |
+
"bos_token_id": 1,
|
| 11 |
+
"eos_token_id": 2,
|
| 12 |
+
"head_dim": 64,
|
| 13 |
+
"hidden_act": "silu",
|
| 14 |
+
"hidden_size": 576,
|
| 15 |
+
"initializer_range": 0.041666666666666664,
|
| 16 |
+
"intermediate_size": 1536,
|
| 17 |
+
"is_llama_config": true,
|
| 18 |
+
"max_position_embeddings": 8192,
|
| 19 |
+
"mlp_bias": false,
|
| 20 |
+
"model_type": "llama_lowdim",
|
| 21 |
+
"num_attention_heads": 9,
|
| 22 |
+
"num_hidden_layers": 30,
|
| 23 |
+
"num_key_value_heads": 3,
|
| 24 |
+
"obs_dim": 9,
|
| 25 |
+
"pad_token_id": 2,
|
| 26 |
+
"pretraining_tp": 1,
|
| 27 |
+
"rms_norm_eps": 1e-05,
|
| 28 |
+
"rope_interleaved": false,
|
| 29 |
+
"rope_scaling": null,
|
| 30 |
+
"rope_theta": 100000,
|
| 31 |
+
"tie_word_embeddings": true,
|
| 32 |
+
"torch_dtype": "bfloat16",
|
| 33 |
+
"transformers.js_config": {
|
| 34 |
+
"kv_cache_dtype": {
|
| 35 |
+
"fp16": "float16",
|
| 36 |
+
"q4f16": "float16"
|
| 37 |
+
}
|
| 38 |
+
},
|
| 39 |
+
"transformers_version": "4.47.1",
|
| 40 |
+
"use_cache": false,
|
| 41 |
+
"use_joint_mlp_projector": true,
|
| 42 |
+
"vocab_size": 49152
|
| 43 |
+
}
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/merges.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/mlp_projector.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9d558f1cb0bd8cea4060c58a14cee65842964fce083d2036d806b3671bc93037
|
| 3 |
+
size 1363904
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a467def6b34ea080f4fed81d3c60479914c41d0fba0607a02498531ba4bd676
|
| 3 |
+
size 81126202
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c3863444d190337b6d93e64a927a8f4ab343661adc8a292dd3c1275a824b6127
|
| 3 |
+
size 14244
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b20e4e7ef5d17f96fcf786acfa68c77beeca89e90512d77f8fe8789f0cbfa27f
|
| 3 |
+
size 1064
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/special_tokens_map.json
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"additional_special_tokens": [
|
| 3 |
+
"<|im_start|>",
|
| 4 |
+
"<|im_end|>"
|
| 5 |
+
],
|
| 6 |
+
"bos_token": {
|
| 7 |
+
"content": "<|im_start|>",
|
| 8 |
+
"lstrip": false,
|
| 9 |
+
"normalized": false,
|
| 10 |
+
"rstrip": false,
|
| 11 |
+
"single_word": false
|
| 12 |
+
},
|
| 13 |
+
"eos_token": {
|
| 14 |
+
"content": "<|im_end|>",
|
| 15 |
+
"lstrip": false,
|
| 16 |
+
"normalized": false,
|
| 17 |
+
"rstrip": false,
|
| 18 |
+
"single_word": false
|
| 19 |
+
},
|
| 20 |
+
"pad_token": {
|
| 21 |
+
"content": "<|im_end|>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false
|
| 26 |
+
},
|
| 27 |
+
"unk_token": {
|
| 28 |
+
"content": "<|endoftext|>",
|
| 29 |
+
"lstrip": false,
|
| 30 |
+
"normalized": false,
|
| 31 |
+
"rstrip": false,
|
| 32 |
+
"single_word": false
|
| 33 |
+
}
|
| 34 |
+
}
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/tokenizer_config.json
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"add_prefix_space": false,
|
| 3 |
+
"added_tokens_decoder": {
|
| 4 |
+
"0": {
|
| 5 |
+
"content": "<|endoftext|>",
|
| 6 |
+
"lstrip": false,
|
| 7 |
+
"normalized": false,
|
| 8 |
+
"rstrip": false,
|
| 9 |
+
"single_word": false,
|
| 10 |
+
"special": true
|
| 11 |
+
},
|
| 12 |
+
"1": {
|
| 13 |
+
"content": "<|im_start|>",
|
| 14 |
+
"lstrip": false,
|
| 15 |
+
"normalized": false,
|
| 16 |
+
"rstrip": false,
|
| 17 |
+
"single_word": false,
|
| 18 |
+
"special": true
|
| 19 |
+
},
|
| 20 |
+
"2": {
|
| 21 |
+
"content": "<|im_end|>",
|
| 22 |
+
"lstrip": false,
|
| 23 |
+
"normalized": false,
|
| 24 |
+
"rstrip": false,
|
| 25 |
+
"single_word": false,
|
| 26 |
+
"special": true
|
| 27 |
+
},
|
| 28 |
+
"3": {
|
| 29 |
+
"content": "<repo_name>",
|
| 30 |
+
"lstrip": false,
|
| 31 |
+
"normalized": false,
|
| 32 |
+
"rstrip": false,
|
| 33 |
+
"single_word": false,
|
| 34 |
+
"special": true
|
| 35 |
+
},
|
| 36 |
+
"4": {
|
| 37 |
+
"content": "<reponame>",
|
| 38 |
+
"lstrip": false,
|
| 39 |
+
"normalized": false,
|
| 40 |
+
"rstrip": false,
|
| 41 |
+
"single_word": false,
|
| 42 |
+
"special": true
|
| 43 |
+
},
|
| 44 |
+
"5": {
|
| 45 |
+
"content": "<file_sep>",
|
| 46 |
+
"lstrip": false,
|
| 47 |
+
"normalized": false,
|
| 48 |
+
"rstrip": false,
|
| 49 |
+
"single_word": false,
|
| 50 |
+
"special": true
|
| 51 |
+
},
|
| 52 |
+
"6": {
|
| 53 |
+
"content": "<filename>",
|
| 54 |
+
"lstrip": false,
|
| 55 |
+
"normalized": false,
|
| 56 |
+
"rstrip": false,
|
| 57 |
+
"single_word": false,
|
| 58 |
+
"special": true
|
| 59 |
+
},
|
| 60 |
+
"7": {
|
| 61 |
+
"content": "<gh_stars>",
|
| 62 |
+
"lstrip": false,
|
| 63 |
+
"normalized": false,
|
| 64 |
+
"rstrip": false,
|
| 65 |
+
"single_word": false,
|
| 66 |
+
"special": true
|
| 67 |
+
},
|
| 68 |
+
"8": {
|
| 69 |
+
"content": "<issue_start>",
|
| 70 |
+
"lstrip": false,
|
| 71 |
+
"normalized": false,
|
| 72 |
+
"rstrip": false,
|
| 73 |
+
"single_word": false,
|
| 74 |
+
"special": true
|
| 75 |
+
},
|
| 76 |
+
"9": {
|
| 77 |
+
"content": "<issue_comment>",
|
| 78 |
+
"lstrip": false,
|
| 79 |
+
"normalized": false,
|
| 80 |
+
"rstrip": false,
|
| 81 |
+
"single_word": false,
|
| 82 |
+
"special": true
|
| 83 |
+
},
|
| 84 |
+
"10": {
|
| 85 |
+
"content": "<issue_closed>",
|
| 86 |
+
"lstrip": false,
|
| 87 |
+
"normalized": false,
|
| 88 |
+
"rstrip": false,
|
| 89 |
+
"single_word": false,
|
| 90 |
+
"special": true
|
| 91 |
+
},
|
| 92 |
+
"11": {
|
| 93 |
+
"content": "<jupyter_start>",
|
| 94 |
+
"lstrip": false,
|
| 95 |
+
"normalized": false,
|
| 96 |
+
"rstrip": false,
|
| 97 |
+
"single_word": false,
|
| 98 |
+
"special": true
|
| 99 |
+
},
|
| 100 |
+
"12": {
|
| 101 |
+
"content": "<jupyter_text>",
|
| 102 |
+
"lstrip": false,
|
| 103 |
+
"normalized": false,
|
| 104 |
+
"rstrip": false,
|
| 105 |
+
"single_word": false,
|
| 106 |
+
"special": true
|
| 107 |
+
},
|
| 108 |
+
"13": {
|
| 109 |
+
"content": "<jupyter_code>",
|
| 110 |
+
"lstrip": false,
|
| 111 |
+
"normalized": false,
|
| 112 |
+
"rstrip": false,
|
| 113 |
+
"single_word": false,
|
| 114 |
+
"special": true
|
| 115 |
+
},
|
| 116 |
+
"14": {
|
| 117 |
+
"content": "<jupyter_output>",
|
| 118 |
+
"lstrip": false,
|
| 119 |
+
"normalized": false,
|
| 120 |
+
"rstrip": false,
|
| 121 |
+
"single_word": false,
|
| 122 |
+
"special": true
|
| 123 |
+
},
|
| 124 |
+
"15": {
|
| 125 |
+
"content": "<jupyter_script>",
|
| 126 |
+
"lstrip": false,
|
| 127 |
+
"normalized": false,
|
| 128 |
+
"rstrip": false,
|
| 129 |
+
"single_word": false,
|
| 130 |
+
"special": true
|
| 131 |
+
},
|
| 132 |
+
"16": {
|
| 133 |
+
"content": "<empty_output>",
|
| 134 |
+
"lstrip": false,
|
| 135 |
+
"normalized": false,
|
| 136 |
+
"rstrip": false,
|
| 137 |
+
"single_word": false,
|
| 138 |
+
"special": true
|
| 139 |
+
}
|
| 140 |
+
},
|
| 141 |
+
"additional_special_tokens": [
|
| 142 |
+
"<|im_start|>",
|
| 143 |
+
"<|im_end|>"
|
| 144 |
+
],
|
| 145 |
+
"bos_token": "<|im_start|>",
|
| 146 |
+
"chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
|
| 147 |
+
"clean_up_tokenization_spaces": false,
|
| 148 |
+
"eos_token": "<|im_end|>",
|
| 149 |
+
"extra_special_tokens": {},
|
| 150 |
+
"model_max_length": 8192,
|
| 151 |
+
"pad_token": "<|im_end|>",
|
| 152 |
+
"tokenizer_class": "GPT2Tokenizer",
|
| 153 |
+
"unk_token": "<|endoftext|>",
|
| 154 |
+
"vocab_size": 49152
|
| 155 |
+
}
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/trainer_state.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:68dd06759f6c865a485b284241282a608ec0ed1bf5acdfffb221641adc03ca3f
|
| 3 |
+
size 5944
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/checkpoint-4116/vocab.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/normalizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:02a2548541cdb7d3e3c2682c3a79ce762ff6b8f29ae9c59c9337a2f59259b844
|
| 3 |
+
size 4514
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/train_llm_workspace.log
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[2025-08-14 00:51:53,821][datasets.arrow_dataset][WARNING] - Setting TOKENIZERS_PARALLELISM=false for forked processes.
|
| 2 |
+
[2025-08-14 00:52:03,225][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -I/home/chyang/miniconda3/envs/llm-bc/include -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -fPIC -c /tmp/tmp_q7fesb1/test.c -o /tmp/tmp_q7fesb1/test.o
|
| 3 |
+
[2025-08-14 00:52:03,284][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat /tmp/tmp_q7fesb1/test.o -laio -o /tmp/tmp_q7fesb1/a.out
|
| 4 |
+
[2025-08-14 00:52:03,963][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -I/home/chyang/miniconda3/envs/llm-bc/include -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -fPIC -c /tmp/tmpzb6l_1ze/test.c -o /tmp/tmpzb6l_1ze/test.o
|
| 5 |
+
[2025-08-14 00:52:04,022][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat /tmp/tmpzb6l_1ze/test.o -L/usr/local/cuda -L/usr/local/cuda/lib64 -lcufile -o /tmp/tmpzb6l_1ze/a.out
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/debug-internal.log
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{"time":"2025-08-14T00:47:04.401718487+08:00","level":"INFO","msg":"using version","core version":"0.18.6"}
|
| 2 |
+
{"time":"2025-08-14T00:47:04.401726265+08:00","level":"INFO","msg":"created symlink","path":"/home/chyang/workspace/LLM-BC/data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/run-20250814_004704-1q0db7hd/logs/debug-core.log"}
|
| 3 |
+
{"time":"2025-08-14T00:47:04.505618343+08:00","level":"INFO","msg":"created new stream","id":"1q0db7hd"}
|
| 4 |
+
{"time":"2025-08-14T00:47:04.505645659+08:00","level":"INFO","msg":"stream: started","id":"1q0db7hd"}
|
| 5 |
+
{"time":"2025-08-14T00:47:04.505666637+08:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"1q0db7hd"}}
|
| 6 |
+
{"time":"2025-08-14T00:47:04.50566955+08:00","level":"INFO","msg":"handler: started","stream_id":{"value":"1q0db7hd"}}
|
| 7 |
+
{"time":"2025-08-14T00:47:04.505688208+08:00","level":"INFO","msg":"sender: started","stream_id":"1q0db7hd"}
|
| 8 |
+
{"time":"2025-08-14T00:47:05.270983992+08:00","level":"INFO","msg":"Starting system monitor"}
|
| 9 |
+
{"time":"2025-08-14T05:27:50.683069117+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)"}
|
| 10 |
+
{"time":"2025-08-14T05:59:20.737637353+08:00","level":"INFO","msg":"api: retrying error","error":"Post \"https://api.wandb.ai/graphql\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)"}
|
| 11 |
+
{"time":"2025-08-14T19:19:00.407201107+08:00","level":"INFO","msg":"Stopping system monitor"}
|
| 12 |
+
{"time":"2025-08-14T19:19:00.407729048+08:00","level":"INFO","msg":"Stopped system monitor"}
|
| 13 |
+
{"time":"2025-08-14T19:19:01.407607502+08:00","level":"INFO","msg":"handler: operation stats","stats":{"operations":[{"desc":"saving job artifact","runtime_seconds":0.334437898},{"desc":"uploading output.log","runtime_seconds":0.083104417,"progress":"685.4KB/685.4KB"},{"desc":"uploading config.yaml","runtime_seconds":0.083092238,"progress":"15.8KB/15.8KB"}],"total_operations":3}}
|
| 14 |
+
{"time":"2025-08-14T19:19:02.502050556+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
|
| 15 |
+
{"time":"2025-08-14T19:19:04.022331101+08:00","level":"INFO","msg":"stream: closing","id":"1q0db7hd"}
|
| 16 |
+
{"time":"2025-08-14T19:19:04.022347454+08:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"1q0db7hd"}}
|
| 17 |
+
{"time":"2025-08-14T19:19:04.022362876+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"1q0db7hd"}}
|
| 18 |
+
{"time":"2025-08-14T19:19:04.022371272+08:00","level":"INFO","msg":"sender: closed","stream_id":"1q0db7hd"}
|
| 19 |
+
{"time":"2025-08-14T19:19:04.022412635+08:00","level":"INFO","msg":"stream: closed","id":"1q0db7hd"}
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/debug.log
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_setup.py:_flush():79] Current SDK version is 0.18.6
|
| 2 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_setup.py:_flush():79] Configure stats pid to 1338151
|
| 3 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/.config/wandb/settings
|
| 4 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/workspace/LLM-BC/wandb/settings
|
| 5 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
|
| 6 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': 'online', '_disable_service': None}
|
| 7 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'llmbc/workspace/train_llm_workspace.py', 'program_abspath': '/home/chyang/workspace/LLM-BC/llmbc/workspace/train_llm_workspace.py', 'program': '/home/chyang/workspace/LLM-BC/./llmbc/workspace/train_llm_workspace.py'}
|
| 8 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_setup.py:_flush():79] Applying login settings: {}
|
| 9 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_init.py:_log_setup():533] Logging user logs to /home/chyang/workspace/LLM-BC/data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/run-20250814_004704-1q0db7hd/logs/debug.log
|
| 10 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_init.py:_log_setup():534] Logging internal logs to /home/chyang/workspace/LLM-BC/data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/run-20250814_004704-1q0db7hd/logs/debug-internal.log
|
| 11 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_init.py:init():619] calling init triggers
|
| 12 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_init.py:init():626] wandb.init called with sweep_config: {}
|
| 13 |
+
config: {'name': 'train_llm_lowdim', '_target_': 'llmbc.workspace.train_llm_workspace.TrainLLMWorkspace', 'obs_dim': 9, 'action_dim': 4, 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1, 'task_name': 'box-close-v2', 'exp_name': 'train llm', 'model_name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'use_quantization': False, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'dataset': {'test_data_ratio': 0.01}, 'debug': False, 'training': {'seed': 42, 'per_device_train_batch_size': 1, 'per_device_eval_batch_size': 1, 'gradient_accumulation_steps': 32, 'optim': 'paged_adamw_32bit', 'num_train_epochs': 3, 'eval_strategy': 'steps', 'logging_steps': 1, 'warmup_steps': 10, 'logging_strategy': 'steps', 'learning_rate': 5e-05, 'fp16': False, 'bf16': True, 'tf32': True, 'group_by_length': True, 'report_to': 'wandb', 'save_steps': 5000, 'eval_steps': 10, 'use_joint_mlp_projector': True, 'joint_obs_action_mlp_lr': 1e-06}, 'trainer': {'obs_dim': 9, 'action_dim': 4, 'use_joint_mlp_projector': True, 'max_seq_length': 100, 'dataset_text_field': 'text', 'packing': False}, 'logging': {'project': 'llm_module_finetuning', 'resume': True, 'mode': 'online', 'name': '2025.08.14-00.47.03_train_llm_lowdim_box-close-v2', 'tags': ['train_llm_lowdim', 'box-close-v2', 'train llm'], 'id': None, 'group': None}, 'multi_run': {'run_dir': 'data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2', 'wandb_name_base': '2025.08.14-00.47.03_train_llm_lowdim_box-close-v2'}, 'task': {'name': 'box-close-v2', 'obs_dim': 9, 'action_dim': 4, 'env_runner': {'_target_': 'llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner', 'env_name': 'llf-metaworld-box-close-v2', 'max_steps': 30, 'n_obs_steps': 1, 'n_action_steps': 1, 'instruction_type': 'b', 'feedback_type': ['hp', 'hn', 'fp'], 'visual': False}, 'dataset': {'_target_': 'llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset', 'data_path': 'datasets/box-close-v2-general-shuf.pt', 'data_path2': 'datasets/box-close-v2-shuf.pt', 'horizon': 1, 'pad_before': 0, 'pad_after': 0, 'obs_eef_target': True, 'use_manual_normalizer': False, 'val_ratio': 0.05, 'dummy_normalizer': False}, 'instructor': {'_target_': 'llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor'}}, 'llm': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'use_raw_llm': False, 'use_quantization': False, 'load_from_checkpoint': False, 'adaptor_path': 'data/outputs/2025.06.02/17.59.17_train_llm_lowdim_sweep-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-sweep-v2/checkpoint-4572', 'use_orig_model': False, 'use_joint_mlp_projector': True, 'load_from_mlp_projector_checkpoint': True, 'mlp_projector_checkpoint_path': 'data/outputs/2025.08.11/10.45.37_train_mlp_projector_metaworld/checkpoints/latest.ckpt', 'use_linear_normalizer': False, 'normalizer_checkpoint_path': '', 'max_length': 100, 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2025.08.14/00.47.03_HuggingFaceTB/SmolLM2-135M-Instruct'}}}}
|
| 14 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_init.py:init():669] starting backend
|
| 15 |
+
2025-08-14 00:47:04,399 INFO MainThread:1338151 [wandb_init.py:init():673] sending inform_init request
|
| 16 |
+
2025-08-14 00:47:04,400 INFO MainThread:1338151 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
|
| 17 |
+
2025-08-14 00:47:04,400 INFO MainThread:1338151 [wandb_init.py:init():686] backend started and connected
|
| 18 |
+
2025-08-14 00:47:04,406 INFO MainThread:1338151 [wandb_init.py:init():781] updated telemetry
|
| 19 |
+
2025-08-14 00:47:04,429 INFO MainThread:1338151 [wandb_init.py:init():814] communicating run to backend with 90.0 second timeout
|
| 20 |
+
2025-08-14 00:47:05,267 INFO MainThread:1338151 [wandb_init.py:init():867] starting run threads in backend
|
| 21 |
+
2025-08-14 00:47:05,380 INFO MainThread:1338151 [wandb_run.py:_console_start():2451] atexit reg
|
| 22 |
+
2025-08-14 00:47:05,380 INFO MainThread:1338151 [wandb_run.py:_redirect():2299] redirect: wrap_raw
|
| 23 |
+
2025-08-14 00:47:05,380 INFO MainThread:1338151 [wandb_run.py:_redirect():2364] Wrapping output streams.
|
| 24 |
+
2025-08-14 00:47:05,380 INFO MainThread:1338151 [wandb_run.py:_redirect():2389] Redirects installed.
|
| 25 |
+
2025-08-14 00:47:05,381 INFO MainThread:1338151 [wandb_init.py:init():911] run started, returning control to user process
|
| 26 |
+
2025-08-14 00:52:11,394 INFO MainThread:1338151 [wandb_run.py:_config_callback():1389] config_cb None None {'peft_config': {'default': {'task_type': 'CAUSAL_LM', 'peft_type': <PeftType.LORA: 'LORA'>, 'auto_mapping': None, 'base_model_name_or_path': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'revision': None, 'inference_mode': False, 'r': 32, 'target_modules': {'o_proj', 'v_proj', 'q_proj', 'k_proj', 'up_proj', 'down_proj', 'gate_proj'}, 'exclude_modules': None, 'lora_alpha': 64, 'lora_dropout': 0.05, 'fan_in_fan_out': False, 'bias': 'none', 'use_rslora': False, 'modules_to_save': None, 'init_lora_weights': True, 'layers_to_transform': None, 'layers_pattern': None, 'rank_pattern': {}, 'alpha_pattern': {}, 'megatron_config': None, 'megatron_core': 'megatron.core', 'loftq_config': {}, 'eva_config': None, 'use_dora': False, 'layer_replication': None, 'runtime_config': {'ephemeral_gpu_offload': False}, 'lora_bias': False}}, 'obs_dim': 9, 'action_dim': 4, 'use_joint_mlp_projector': True, 'vocab_size': 49152, 'max_position_embeddings': 8192, 'hidden_size': 576, 'intermediate_size': 1536, 'num_hidden_layers': 30, 'num_attention_heads': 9, 'num_key_value_heads': 3, 'hidden_act': 'silu', 'initializer_range': 0.041666666666666664, 'rms_norm_eps': 1e-05, 'pretraining_tp': 1, 'use_cache': False, 'rope_theta': 100000, 'rope_scaling': None, 'attention_bias': False, 'attention_dropout': 0.0, 'mlp_bias': False, 'head_dim': 64, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'bfloat16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlamaForCausalLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 2, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'HuggingFaceTB/SmolLM2-135M-Instruct', '_attn_implementation_autoset': True, 'transformers_version': '4.47.1', 'is_llama_config': True, 'model_type': 'llama_lowdim', 'rope_interleaved': False, 'transformers.js_config': {'kv_cache_dtype': {'q4f16': 'float16', 'fp16': 'float16'}}, 'output_dir': '/home/chyang/workspace/LLM-BC/data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'eval_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 1, 'per_device_eval_batch_size': 1, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 32, 'eval_accumulation_steps': None, 'eval_delay': 0, 'torch_empty_cache_steps': None, 'learning_rate': 5e-05, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 3, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 10, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/home/chyang/workspace/LLM-BC/data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/runs/Aug14_00-52-02_A6000-2', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 5000, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': True, 'fp16': False, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': True, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 10, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/home/chyang/workspace/LLM-BC/data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'paged_adamw_32bit', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': None, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'include_for_metrics': [], 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'evaluation_strategy': None, 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'use_liger_kernel': False, 'eval_use_gather_object': False, 'average_tokens_across_devices': False, 'dataset_text_field': 'text', 'packing': False, 'max_seq_length': 100, 'dataset_num_proc': None, 'dataset_batch_size': 1000, 'model_init_kwargs': None, 'dataset_kwargs': {}, 'eval_packing': None, 'num_of_sequences': 1024, 'chars_per_token': '<CHARS_PER_TOKEN>', 'use_liger': False, 'joint_obs_action_mlp_lr': 1e-06, 'obs_mlp_lr': None, 'action_mlp_lr': None}
|
| 27 |
+
2025-08-14 00:52:11,398 INFO MainThread:1338151 [wandb_config.py:__setitem__():154] config set model/num_parameters = 144624384 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x76a657977a30>>
|
| 28 |
+
2025-08-14 00:52:11,399 INFO MainThread:1338151 [wandb_run.py:_config_callback():1389] config_cb model/num_parameters 144624384 None
|
| 29 |
+
2025-08-14 19:19:00,406 INFO MainThread:1338151 [wandb_run.py:_finish():2146] finishing run chyang25-national-taiwan-university/llm_module_finetuning/1q0db7hd
|
| 30 |
+
2025-08-14 19:19:00,406 INFO MainThread:1338151 [wandb_run.py:_atexit_cleanup():2414] got exitcode: 0
|
| 31 |
+
2025-08-14 19:19:00,406 INFO MainThread:1338151 [wandb_run.py:_restore():2396] restore
|
| 32 |
+
2025-08-14 19:19:00,406 INFO MainThread:1338151 [wandb_run.py:_restore():2402] restore done
|
| 33 |
+
2025-08-14 19:19:04,016 INFO MainThread:1338151 [wandb_run.py:_footer_history_summary_info():3963] rendering history
|
| 34 |
+
2025-08-14 19:19:04,017 INFO MainThread:1338151 [wandb_run.py:_footer_history_summary_info():3995] rendering summary
|
| 35 |
+
2025-08-14 19:19:04,021 INFO MainThread:1338151 [wandb_run.py:_footer_sync_info():3922] logging synced files
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/run-20250814_004704-1q0db7hd/files/config.yaml
ADDED
|
@@ -0,0 +1,748 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
_attn_implementation_autoset:
|
| 2 |
+
value: true
|
| 3 |
+
_name_or_path:
|
| 4 |
+
value: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 5 |
+
_target_:
|
| 6 |
+
value: llmbc.workspace.train_llm_workspace.TrainLLMWorkspace
|
| 7 |
+
_wandb:
|
| 8 |
+
value:
|
| 9 |
+
cli_version: 0.18.6
|
| 10 |
+
m:
|
| 11 |
+
- "1": eval/runtime
|
| 12 |
+
"5": 2
|
| 13 |
+
"6":
|
| 14 |
+
- 1
|
| 15 |
+
- 3
|
| 16 |
+
"7": []
|
| 17 |
+
- "1": train/global_step
|
| 18 |
+
"6":
|
| 19 |
+
- 3
|
| 20 |
+
"7": []
|
| 21 |
+
- "1": eval/samples_per_second
|
| 22 |
+
"5": 2
|
| 23 |
+
"6":
|
| 24 |
+
- 1
|
| 25 |
+
- 3
|
| 26 |
+
"7": []
|
| 27 |
+
- "1": eval/steps_per_second
|
| 28 |
+
"5": 2
|
| 29 |
+
"6":
|
| 30 |
+
- 1
|
| 31 |
+
- 3
|
| 32 |
+
"7": []
|
| 33 |
+
- "1": train/grad_norm
|
| 34 |
+
"5": 2
|
| 35 |
+
"6":
|
| 36 |
+
- 1
|
| 37 |
+
- 3
|
| 38 |
+
"7": []
|
| 39 |
+
- "1": train/learning_rate
|
| 40 |
+
"5": 2
|
| 41 |
+
"6":
|
| 42 |
+
- 1
|
| 43 |
+
- 3
|
| 44 |
+
"7": []
|
| 45 |
+
- "1": eval/loss
|
| 46 |
+
"5": 2
|
| 47 |
+
"6":
|
| 48 |
+
- 1
|
| 49 |
+
- 3
|
| 50 |
+
"7": []
|
| 51 |
+
- "1": train/loss
|
| 52 |
+
"5": 2
|
| 53 |
+
"6":
|
| 54 |
+
- 1
|
| 55 |
+
- 3
|
| 56 |
+
"7": []
|
| 57 |
+
- "1": train/epoch
|
| 58 |
+
"5": 2
|
| 59 |
+
"6":
|
| 60 |
+
- 1
|
| 61 |
+
- 3
|
| 62 |
+
"7": []
|
| 63 |
+
python_version: 3.9.20
|
| 64 |
+
t:
|
| 65 |
+
"1":
|
| 66 |
+
- 1
|
| 67 |
+
- 2
|
| 68 |
+
- 3
|
| 69 |
+
- 5
|
| 70 |
+
- 11
|
| 71 |
+
- 12
|
| 72 |
+
- 41
|
| 73 |
+
- 49
|
| 74 |
+
- 50
|
| 75 |
+
- 51
|
| 76 |
+
- 53
|
| 77 |
+
- 55
|
| 78 |
+
- 71
|
| 79 |
+
- 84
|
| 80 |
+
- 98
|
| 81 |
+
"2":
|
| 82 |
+
- 1
|
| 83 |
+
- 2
|
| 84 |
+
- 3
|
| 85 |
+
- 5
|
| 86 |
+
- 11
|
| 87 |
+
- 12
|
| 88 |
+
- 41
|
| 89 |
+
- 49
|
| 90 |
+
- 50
|
| 91 |
+
- 51
|
| 92 |
+
- 53
|
| 93 |
+
- 55
|
| 94 |
+
- 71
|
| 95 |
+
- 84
|
| 96 |
+
- 98
|
| 97 |
+
"3":
|
| 98 |
+
- 2
|
| 99 |
+
- 7
|
| 100 |
+
- 13
|
| 101 |
+
- 15
|
| 102 |
+
- 16
|
| 103 |
+
- 19
|
| 104 |
+
- 23
|
| 105 |
+
- 55
|
| 106 |
+
- 62
|
| 107 |
+
- 66
|
| 108 |
+
"4": 3.9.20
|
| 109 |
+
"5": 0.18.6
|
| 110 |
+
"6": 4.47.1
|
| 111 |
+
"8":
|
| 112 |
+
- 5
|
| 113 |
+
"9":
|
| 114 |
+
"1": transformers_trainer
|
| 115 |
+
"12": 0.18.6
|
| 116 |
+
"13": linux-x86_64
|
| 117 |
+
accelerator_config:
|
| 118 |
+
value:
|
| 119 |
+
dispatch_batches: null
|
| 120 |
+
even_batches: true
|
| 121 |
+
gradient_accumulation_kwargs: null
|
| 122 |
+
non_blocking: false
|
| 123 |
+
split_batches: false
|
| 124 |
+
use_seedable_sampler: true
|
| 125 |
+
action_dim:
|
| 126 |
+
value: 4
|
| 127 |
+
action_mlp_lr:
|
| 128 |
+
value: null
|
| 129 |
+
adafactor:
|
| 130 |
+
value: false
|
| 131 |
+
adam_beta1:
|
| 132 |
+
value: 0.9
|
| 133 |
+
adam_beta2:
|
| 134 |
+
value: 0.999
|
| 135 |
+
adam_epsilon:
|
| 136 |
+
value: 1e-08
|
| 137 |
+
add_cross_attention:
|
| 138 |
+
value: false
|
| 139 |
+
architectures:
|
| 140 |
+
value:
|
| 141 |
+
- LlamaForCausalLM
|
| 142 |
+
attention_bias:
|
| 143 |
+
value: false
|
| 144 |
+
attention_dropout:
|
| 145 |
+
value: 0
|
| 146 |
+
auto_find_batch_size:
|
| 147 |
+
value: false
|
| 148 |
+
average_tokens_across_devices:
|
| 149 |
+
value: false
|
| 150 |
+
bad_words_ids:
|
| 151 |
+
value: null
|
| 152 |
+
batch_eval_metrics:
|
| 153 |
+
value: false
|
| 154 |
+
begin_suppress_tokens:
|
| 155 |
+
value: null
|
| 156 |
+
bf16:
|
| 157 |
+
value: true
|
| 158 |
+
bf16_full_eval:
|
| 159 |
+
value: false
|
| 160 |
+
bos_token_id:
|
| 161 |
+
value: 1
|
| 162 |
+
chars_per_token:
|
| 163 |
+
value: <CHARS_PER_TOKEN>
|
| 164 |
+
chunk_size_feed_forward:
|
| 165 |
+
value: 0
|
| 166 |
+
cross_attention_hidden_size:
|
| 167 |
+
value: null
|
| 168 |
+
data_seed:
|
| 169 |
+
value: null
|
| 170 |
+
dataloader_drop_last:
|
| 171 |
+
value: false
|
| 172 |
+
dataloader_num_workers:
|
| 173 |
+
value: 0
|
| 174 |
+
dataloader_persistent_workers:
|
| 175 |
+
value: false
|
| 176 |
+
dataloader_pin_memory:
|
| 177 |
+
value: true
|
| 178 |
+
dataloader_prefetch_factor:
|
| 179 |
+
value: null
|
| 180 |
+
dataset:
|
| 181 |
+
value:
|
| 182 |
+
test_data_ratio: 0.01
|
| 183 |
+
dataset_batch_size:
|
| 184 |
+
value: 1000
|
| 185 |
+
dataset_num_proc:
|
| 186 |
+
value: null
|
| 187 |
+
dataset_text_field:
|
| 188 |
+
value: text
|
| 189 |
+
ddp_backend:
|
| 190 |
+
value: null
|
| 191 |
+
ddp_broadcast_buffers:
|
| 192 |
+
value: null
|
| 193 |
+
ddp_bucket_cap_mb:
|
| 194 |
+
value: null
|
| 195 |
+
ddp_find_unused_parameters:
|
| 196 |
+
value: null
|
| 197 |
+
ddp_timeout:
|
| 198 |
+
value: 1800
|
| 199 |
+
debug:
|
| 200 |
+
value: []
|
| 201 |
+
decoder_start_token_id:
|
| 202 |
+
value: null
|
| 203 |
+
deepspeed:
|
| 204 |
+
value: null
|
| 205 |
+
disable_tqdm:
|
| 206 |
+
value: false
|
| 207 |
+
dispatch_batches:
|
| 208 |
+
value: null
|
| 209 |
+
diversity_penalty:
|
| 210 |
+
value: 0
|
| 211 |
+
do_eval:
|
| 212 |
+
value: true
|
| 213 |
+
do_predict:
|
| 214 |
+
value: false
|
| 215 |
+
do_sample:
|
| 216 |
+
value: false
|
| 217 |
+
do_train:
|
| 218 |
+
value: false
|
| 219 |
+
early_stopping:
|
| 220 |
+
value: false
|
| 221 |
+
encoder_no_repeat_ngram_size:
|
| 222 |
+
value: 0
|
| 223 |
+
eos_token_id:
|
| 224 |
+
value: 2
|
| 225 |
+
eval_accumulation_steps:
|
| 226 |
+
value: null
|
| 227 |
+
eval_delay:
|
| 228 |
+
value: 0
|
| 229 |
+
eval_do_concat_batches:
|
| 230 |
+
value: true
|
| 231 |
+
eval_on_start:
|
| 232 |
+
value: false
|
| 233 |
+
eval_packing:
|
| 234 |
+
value: null
|
| 235 |
+
eval_steps:
|
| 236 |
+
value: 10
|
| 237 |
+
eval_strategy:
|
| 238 |
+
value: steps
|
| 239 |
+
eval_use_gather_object:
|
| 240 |
+
value: false
|
| 241 |
+
evaluation_strategy:
|
| 242 |
+
value: null
|
| 243 |
+
exp_name:
|
| 244 |
+
value: train llm
|
| 245 |
+
exponential_decay_length_penalty:
|
| 246 |
+
value: null
|
| 247 |
+
finetuning_task:
|
| 248 |
+
value: null
|
| 249 |
+
forced_bos_token_id:
|
| 250 |
+
value: null
|
| 251 |
+
forced_eos_token_id:
|
| 252 |
+
value: null
|
| 253 |
+
fp16:
|
| 254 |
+
value: false
|
| 255 |
+
fp16_backend:
|
| 256 |
+
value: auto
|
| 257 |
+
fp16_full_eval:
|
| 258 |
+
value: false
|
| 259 |
+
fp16_opt_level:
|
| 260 |
+
value: O1
|
| 261 |
+
fsdp:
|
| 262 |
+
value: []
|
| 263 |
+
fsdp_config:
|
| 264 |
+
value:
|
| 265 |
+
min_num_params: 0
|
| 266 |
+
xla: false
|
| 267 |
+
xla_fsdp_grad_ckpt: false
|
| 268 |
+
xla_fsdp_v2: false
|
| 269 |
+
fsdp_min_num_params:
|
| 270 |
+
value: 0
|
| 271 |
+
fsdp_transformer_layer_cls_to_wrap:
|
| 272 |
+
value: null
|
| 273 |
+
full_determinism:
|
| 274 |
+
value: false
|
| 275 |
+
gradient_accumulation_steps:
|
| 276 |
+
value: 32
|
| 277 |
+
gradient_checkpointing:
|
| 278 |
+
value: false
|
| 279 |
+
gradient_checkpointing_kwargs:
|
| 280 |
+
value: null
|
| 281 |
+
greater_is_better:
|
| 282 |
+
value: null
|
| 283 |
+
group_by_length:
|
| 284 |
+
value: true
|
| 285 |
+
half_precision_backend:
|
| 286 |
+
value: auto
|
| 287 |
+
head_dim:
|
| 288 |
+
value: 64
|
| 289 |
+
hidden_act:
|
| 290 |
+
value: silu
|
| 291 |
+
hidden_size:
|
| 292 |
+
value: 576
|
| 293 |
+
horizon:
|
| 294 |
+
value: 1
|
| 295 |
+
hub_always_push:
|
| 296 |
+
value: false
|
| 297 |
+
hub_model_id:
|
| 298 |
+
value: null
|
| 299 |
+
hub_private_repo:
|
| 300 |
+
value: null
|
| 301 |
+
hub_strategy:
|
| 302 |
+
value: every_save
|
| 303 |
+
hub_token:
|
| 304 |
+
value: <HUB_TOKEN>
|
| 305 |
+
id2label:
|
| 306 |
+
value:
|
| 307 |
+
"0": LABEL_0
|
| 308 |
+
"1": LABEL_1
|
| 309 |
+
ignore_data_skip:
|
| 310 |
+
value: false
|
| 311 |
+
include_for_metrics:
|
| 312 |
+
value: []
|
| 313 |
+
include_inputs_for_metrics:
|
| 314 |
+
value: false
|
| 315 |
+
include_num_input_tokens_seen:
|
| 316 |
+
value: false
|
| 317 |
+
include_tokens_per_second:
|
| 318 |
+
value: false
|
| 319 |
+
initializer_range:
|
| 320 |
+
value: 0.041666666666666664
|
| 321 |
+
intermediate_size:
|
| 322 |
+
value: 1536
|
| 323 |
+
is_decoder:
|
| 324 |
+
value: false
|
| 325 |
+
is_encoder_decoder:
|
| 326 |
+
value: false
|
| 327 |
+
is_llama_config:
|
| 328 |
+
value: true
|
| 329 |
+
jit_mode_eval:
|
| 330 |
+
value: false
|
| 331 |
+
joint_obs_action_mlp_lr:
|
| 332 |
+
value: 1e-06
|
| 333 |
+
label_names:
|
| 334 |
+
value: null
|
| 335 |
+
label_smoothing_factor:
|
| 336 |
+
value: 0
|
| 337 |
+
label2id:
|
| 338 |
+
value:
|
| 339 |
+
LABEL_0: 0
|
| 340 |
+
LABEL_1: 1
|
| 341 |
+
learning_rate:
|
| 342 |
+
value: 5e-05
|
| 343 |
+
length_column_name:
|
| 344 |
+
value: length
|
| 345 |
+
length_penalty:
|
| 346 |
+
value: 1
|
| 347 |
+
llm:
|
| 348 |
+
value:
|
| 349 |
+
adaptor_path: data/outputs/2025.06.02/17.59.17_train_llm_lowdim_sweep-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-sweep-v2/checkpoint-4572
|
| 350 |
+
causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
|
| 351 |
+
config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
|
| 352 |
+
hydra:
|
| 353 |
+
job:
|
| 354 |
+
override_dirname: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 355 |
+
run:
|
| 356 |
+
dir: data/outputs/2025.08.14/00.47.03_HuggingFaceTB/SmolLM2-135M-Instruct
|
| 357 |
+
load_from_checkpoint: false
|
| 358 |
+
load_from_mlp_projector_checkpoint: true
|
| 359 |
+
lora_config:
|
| 360 |
+
bias: none
|
| 361 |
+
lora_alpha: 64
|
| 362 |
+
lora_dropout: 0.05
|
| 363 |
+
r: 32
|
| 364 |
+
task_type: CAUSAL_LM
|
| 365 |
+
max_length: 100
|
| 366 |
+
mlp_projector_checkpoint_path: data/outputs/2025.08.11/10.45.37_train_mlp_projector_metaworld/checkpoints/latest.ckpt
|
| 367 |
+
model_name: SmolLM2-135M-Instruct
|
| 368 |
+
name: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 369 |
+
normalizer_checkpoint_path: ""
|
| 370 |
+
prompter:
|
| 371 |
+
_target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
|
| 372 |
+
use_joint_mlp_projector: true
|
| 373 |
+
use_joint_mlp_projector: true
|
| 374 |
+
use_linear_normalizer: false
|
| 375 |
+
use_orig_model: false
|
| 376 |
+
use_quantization: false
|
| 377 |
+
use_raw_llm: false
|
| 378 |
+
load_best_model_at_end:
|
| 379 |
+
value: false
|
| 380 |
+
local_rank:
|
| 381 |
+
value: 0
|
| 382 |
+
log_level:
|
| 383 |
+
value: passive
|
| 384 |
+
log_level_replica:
|
| 385 |
+
value: warning
|
| 386 |
+
log_on_each_node:
|
| 387 |
+
value: true
|
| 388 |
+
logging:
|
| 389 |
+
value:
|
| 390 |
+
group: null
|
| 391 |
+
id: null
|
| 392 |
+
mode: online
|
| 393 |
+
name: 2025.08.14-00.47.03_train_llm_lowdim_box-close-v2
|
| 394 |
+
project: llm_module_finetuning
|
| 395 |
+
resume: true
|
| 396 |
+
tags:
|
| 397 |
+
- train_llm_lowdim
|
| 398 |
+
- box-close-v2
|
| 399 |
+
- train llm
|
| 400 |
+
logging_dir:
|
| 401 |
+
value: /home/chyang/workspace/LLM-BC/data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2/runs/Aug14_00-52-02_A6000-2
|
| 402 |
+
logging_first_step:
|
| 403 |
+
value: false
|
| 404 |
+
logging_nan_inf_filter:
|
| 405 |
+
value: true
|
| 406 |
+
logging_steps:
|
| 407 |
+
value: 1
|
| 408 |
+
logging_strategy:
|
| 409 |
+
value: steps
|
| 410 |
+
lora_config:
|
| 411 |
+
value:
|
| 412 |
+
bias: none
|
| 413 |
+
lora_alpha: 64
|
| 414 |
+
lora_dropout: 0.05
|
| 415 |
+
r: 32
|
| 416 |
+
task_type: CAUSAL_LM
|
| 417 |
+
lr_scheduler_type:
|
| 418 |
+
value: linear
|
| 419 |
+
max_grad_norm:
|
| 420 |
+
value: 1
|
| 421 |
+
max_length:
|
| 422 |
+
value: 20
|
| 423 |
+
max_position_embeddings:
|
| 424 |
+
value: 8192
|
| 425 |
+
max_seq_length:
|
| 426 |
+
value: 100
|
| 427 |
+
max_steps:
|
| 428 |
+
value: -1
|
| 429 |
+
metric_for_best_model:
|
| 430 |
+
value: null
|
| 431 |
+
min_length:
|
| 432 |
+
value: 0
|
| 433 |
+
mlp_bias:
|
| 434 |
+
value: false
|
| 435 |
+
model/num_parameters:
|
| 436 |
+
value: 144624384
|
| 437 |
+
model_init_kwargs:
|
| 438 |
+
value: null
|
| 439 |
+
model_name:
|
| 440 |
+
value: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 441 |
+
model_type:
|
| 442 |
+
value: llama_lowdim
|
| 443 |
+
mp_parameters:
|
| 444 |
+
value: ""
|
| 445 |
+
multi_run:
|
| 446 |
+
value:
|
| 447 |
+
run_dir: data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2
|
| 448 |
+
wandb_name_base: 2025.08.14-00.47.03_train_llm_lowdim_box-close-v2
|
| 449 |
+
n_action_steps:
|
| 450 |
+
value: 1
|
| 451 |
+
n_obs_steps:
|
| 452 |
+
value: 1
|
| 453 |
+
name:
|
| 454 |
+
value: train_llm_lowdim
|
| 455 |
+
neftune_noise_alpha:
|
| 456 |
+
value: null
|
| 457 |
+
no_cuda:
|
| 458 |
+
value: false
|
| 459 |
+
no_repeat_ngram_size:
|
| 460 |
+
value: 0
|
| 461 |
+
num_attention_heads:
|
| 462 |
+
value: 9
|
| 463 |
+
num_beam_groups:
|
| 464 |
+
value: 1
|
| 465 |
+
num_beams:
|
| 466 |
+
value: 1
|
| 467 |
+
num_hidden_layers:
|
| 468 |
+
value: 30
|
| 469 |
+
num_key_value_heads:
|
| 470 |
+
value: 3
|
| 471 |
+
num_of_sequences:
|
| 472 |
+
value: 1024
|
| 473 |
+
num_return_sequences:
|
| 474 |
+
value: 1
|
| 475 |
+
num_train_epochs:
|
| 476 |
+
value: 3
|
| 477 |
+
obs_dim:
|
| 478 |
+
value: 9
|
| 479 |
+
obs_mlp_lr:
|
| 480 |
+
value: null
|
| 481 |
+
optim:
|
| 482 |
+
value: paged_adamw_32bit
|
| 483 |
+
optim_args:
|
| 484 |
+
value: null
|
| 485 |
+
optim_target_modules:
|
| 486 |
+
value: null
|
| 487 |
+
output_attentions:
|
| 488 |
+
value: false
|
| 489 |
+
output_dir:
|
| 490 |
+
value: /home/chyang/workspace/LLM-BC/data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2
|
| 491 |
+
output_hidden_states:
|
| 492 |
+
value: false
|
| 493 |
+
output_scores:
|
| 494 |
+
value: false
|
| 495 |
+
overwrite_output_dir:
|
| 496 |
+
value: false
|
| 497 |
+
packing:
|
| 498 |
+
value: false
|
| 499 |
+
pad_token_id:
|
| 500 |
+
value: 2
|
| 501 |
+
past_index:
|
| 502 |
+
value: -1
|
| 503 |
+
peft_config:
|
| 504 |
+
value:
|
| 505 |
+
default:
|
| 506 |
+
auto_mapping: null
|
| 507 |
+
base_model_name_or_path: HuggingFaceTB/SmolLM2-135M-Instruct
|
| 508 |
+
bias: none
|
| 509 |
+
eva_config: null
|
| 510 |
+
exclude_modules: null
|
| 511 |
+
fan_in_fan_out: false
|
| 512 |
+
inference_mode: false
|
| 513 |
+
init_lora_weights: true
|
| 514 |
+
layer_replication: null
|
| 515 |
+
layers_pattern: null
|
| 516 |
+
layers_to_transform: null
|
| 517 |
+
lora_alpha: 64
|
| 518 |
+
lora_bias: false
|
| 519 |
+
lora_dropout: 0.05
|
| 520 |
+
megatron_config: null
|
| 521 |
+
megatron_core: megatron.core
|
| 522 |
+
modules_to_save: null
|
| 523 |
+
peft_type: LORA
|
| 524 |
+
r: 32
|
| 525 |
+
revision: null
|
| 526 |
+
runtime_config:
|
| 527 |
+
ephemeral_gpu_offload: false
|
| 528 |
+
target_modules:
|
| 529 |
+
- o_proj
|
| 530 |
+
- v_proj
|
| 531 |
+
- q_proj
|
| 532 |
+
- k_proj
|
| 533 |
+
- up_proj
|
| 534 |
+
- down_proj
|
| 535 |
+
- gate_proj
|
| 536 |
+
task_type: CAUSAL_LM
|
| 537 |
+
use_dora: false
|
| 538 |
+
use_rslora: false
|
| 539 |
+
per_device_eval_batch_size:
|
| 540 |
+
value: 1
|
| 541 |
+
per_device_train_batch_size:
|
| 542 |
+
value: 1
|
| 543 |
+
per_gpu_eval_batch_size:
|
| 544 |
+
value: null
|
| 545 |
+
per_gpu_train_batch_size:
|
| 546 |
+
value: null
|
| 547 |
+
prediction_loss_only:
|
| 548 |
+
value: false
|
| 549 |
+
prefix:
|
| 550 |
+
value: null
|
| 551 |
+
pretraining_tp:
|
| 552 |
+
value: 1
|
| 553 |
+
problem_type:
|
| 554 |
+
value: null
|
| 555 |
+
push_to_hub:
|
| 556 |
+
value: false
|
| 557 |
+
push_to_hub_model_id:
|
| 558 |
+
value: null
|
| 559 |
+
push_to_hub_organization:
|
| 560 |
+
value: null
|
| 561 |
+
push_to_hub_token:
|
| 562 |
+
value: <PUSH_TO_HUB_TOKEN>
|
| 563 |
+
ray_scope:
|
| 564 |
+
value: last
|
| 565 |
+
remove_invalid_values:
|
| 566 |
+
value: false
|
| 567 |
+
remove_unused_columns:
|
| 568 |
+
value: true
|
| 569 |
+
repetition_penalty:
|
| 570 |
+
value: 1
|
| 571 |
+
report_to:
|
| 572 |
+
value:
|
| 573 |
+
- wandb
|
| 574 |
+
restore_callback_states_from_checkpoint:
|
| 575 |
+
value: false
|
| 576 |
+
resume_from_checkpoint:
|
| 577 |
+
value: null
|
| 578 |
+
return_dict:
|
| 579 |
+
value: true
|
| 580 |
+
return_dict_in_generate:
|
| 581 |
+
value: false
|
| 582 |
+
rms_norm_eps:
|
| 583 |
+
value: 1e-05
|
| 584 |
+
rope_interleaved:
|
| 585 |
+
value: false
|
| 586 |
+
rope_scaling:
|
| 587 |
+
value: null
|
| 588 |
+
rope_theta:
|
| 589 |
+
value: 100000
|
| 590 |
+
run_name:
|
| 591 |
+
value: /home/chyang/workspace/LLM-BC/data/outputs/2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-box-close-v2
|
| 592 |
+
save_on_each_node:
|
| 593 |
+
value: false
|
| 594 |
+
save_only_model:
|
| 595 |
+
value: false
|
| 596 |
+
save_safetensors:
|
| 597 |
+
value: true
|
| 598 |
+
save_steps:
|
| 599 |
+
value: 5000
|
| 600 |
+
save_strategy:
|
| 601 |
+
value: steps
|
| 602 |
+
save_total_limit:
|
| 603 |
+
value: null
|
| 604 |
+
seed:
|
| 605 |
+
value: 42
|
| 606 |
+
sep_token_id:
|
| 607 |
+
value: null
|
| 608 |
+
skip_memory_metrics:
|
| 609 |
+
value: true
|
| 610 |
+
split_batches:
|
| 611 |
+
value: null
|
| 612 |
+
suppress_tokens:
|
| 613 |
+
value: null
|
| 614 |
+
task:
|
| 615 |
+
value:
|
| 616 |
+
action_dim: 4
|
| 617 |
+
dataset:
|
| 618 |
+
_target_: llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset
|
| 619 |
+
data_path: datasets/box-close-v2-general-shuf.pt
|
| 620 |
+
data_path2: datasets/box-close-v2-shuf.pt
|
| 621 |
+
dummy_normalizer: false
|
| 622 |
+
horizon: 1
|
| 623 |
+
obs_eef_target: true
|
| 624 |
+
pad_after: 0
|
| 625 |
+
pad_before: 0
|
| 626 |
+
use_manual_normalizer: false
|
| 627 |
+
val_ratio: 0.05
|
| 628 |
+
env_runner:
|
| 629 |
+
_target_: llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner
|
| 630 |
+
env_name: llf-metaworld-box-close-v2
|
| 631 |
+
feedback_type:
|
| 632 |
+
- hp
|
| 633 |
+
- hn
|
| 634 |
+
- fp
|
| 635 |
+
instruction_type: b
|
| 636 |
+
max_steps: 30
|
| 637 |
+
n_action_steps: 1
|
| 638 |
+
n_obs_steps: 1
|
| 639 |
+
visual: false
|
| 640 |
+
instructor:
|
| 641 |
+
_target_: llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor
|
| 642 |
+
name: box-close-v2
|
| 643 |
+
obs_dim: 9
|
| 644 |
+
task_name:
|
| 645 |
+
value: box-close-v2
|
| 646 |
+
task_specific_params:
|
| 647 |
+
value: null
|
| 648 |
+
temperature:
|
| 649 |
+
value: 1
|
| 650 |
+
tf_legacy_loss:
|
| 651 |
+
value: false
|
| 652 |
+
tf32:
|
| 653 |
+
value: true
|
| 654 |
+
tie_encoder_decoder:
|
| 655 |
+
value: false
|
| 656 |
+
tie_word_embeddings:
|
| 657 |
+
value: true
|
| 658 |
+
tokenizer_class:
|
| 659 |
+
value: null
|
| 660 |
+
top_k:
|
| 661 |
+
value: 50
|
| 662 |
+
top_p:
|
| 663 |
+
value: 1
|
| 664 |
+
torch_compile:
|
| 665 |
+
value: false
|
| 666 |
+
torch_compile_backend:
|
| 667 |
+
value: null
|
| 668 |
+
torch_compile_mode:
|
| 669 |
+
value: null
|
| 670 |
+
torch_dtype:
|
| 671 |
+
value: bfloat16
|
| 672 |
+
torch_empty_cache_steps:
|
| 673 |
+
value: null
|
| 674 |
+
torchdynamo:
|
| 675 |
+
value: null
|
| 676 |
+
torchscript:
|
| 677 |
+
value: false
|
| 678 |
+
tpu_metrics_debug:
|
| 679 |
+
value: false
|
| 680 |
+
tpu_num_cores:
|
| 681 |
+
value: null
|
| 682 |
+
trainer:
|
| 683 |
+
value:
|
| 684 |
+
action_dim: 4
|
| 685 |
+
dataset_text_field: text
|
| 686 |
+
max_seq_length: 100
|
| 687 |
+
obs_dim: 9
|
| 688 |
+
packing: false
|
| 689 |
+
use_joint_mlp_projector: true
|
| 690 |
+
training:
|
| 691 |
+
value:
|
| 692 |
+
bf16: true
|
| 693 |
+
eval_steps: 10
|
| 694 |
+
eval_strategy: steps
|
| 695 |
+
fp16: false
|
| 696 |
+
gradient_accumulation_steps: 32
|
| 697 |
+
group_by_length: true
|
| 698 |
+
joint_obs_action_mlp_lr: 1e-06
|
| 699 |
+
learning_rate: 5e-05
|
| 700 |
+
logging_steps: 1
|
| 701 |
+
logging_strategy: steps
|
| 702 |
+
num_train_epochs: 3
|
| 703 |
+
optim: paged_adamw_32bit
|
| 704 |
+
per_device_eval_batch_size: 1
|
| 705 |
+
per_device_train_batch_size: 1
|
| 706 |
+
report_to: wandb
|
| 707 |
+
save_steps: 5000
|
| 708 |
+
seed: 42
|
| 709 |
+
tf32: true
|
| 710 |
+
use_joint_mlp_projector: true
|
| 711 |
+
warmup_steps: 10
|
| 712 |
+
transformers.js_config:
|
| 713 |
+
value:
|
| 714 |
+
kv_cache_dtype:
|
| 715 |
+
fp16: float16
|
| 716 |
+
q4f16: float16
|
| 717 |
+
transformers_version:
|
| 718 |
+
value: 4.47.1
|
| 719 |
+
typical_p:
|
| 720 |
+
value: 1
|
| 721 |
+
use_bfloat16:
|
| 722 |
+
value: false
|
| 723 |
+
use_cache:
|
| 724 |
+
value: false
|
| 725 |
+
use_cpu:
|
| 726 |
+
value: false
|
| 727 |
+
use_ipex:
|
| 728 |
+
value: false
|
| 729 |
+
use_joint_mlp_projector:
|
| 730 |
+
value: true
|
| 731 |
+
use_legacy_prediction_loop:
|
| 732 |
+
value: false
|
| 733 |
+
use_liger:
|
| 734 |
+
value: false
|
| 735 |
+
use_liger_kernel:
|
| 736 |
+
value: false
|
| 737 |
+
use_mps_device:
|
| 738 |
+
value: false
|
| 739 |
+
use_quantization:
|
| 740 |
+
value: false
|
| 741 |
+
vocab_size:
|
| 742 |
+
value: 49152
|
| 743 |
+
warmup_ratio:
|
| 744 |
+
value: 0
|
| 745 |
+
warmup_steps:
|
| 746 |
+
value: 10
|
| 747 |
+
weight_decay:
|
| 748 |
+
value: 0
|
2025.08.14/00.47.03_train_llm_lowdim_box-close-v2/wandb/run-20250814_004704-1q0db7hd/files/output.log
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|