CHYang25 commited on
Commit
7a2f102
·
verified ·
1 Parent(s): 37fad31

Upload folder using huggingface_hub

Browse files
Files changed (23) hide show
  1. .gitattributes +1 -0
  2. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/.hydra/config.yaml +138 -0
  3. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/.hydra/hydra.yaml +154 -0
  4. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/.hydra/overrides.yaml +1 -0
  5. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/epoch=0093-val_loss=2.191.ckpt +3 -0
  6. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/epoch=0096-val_loss=2.189.ckpt +3 -0
  7. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/epoch=0097-val_loss=2.189.ckpt +3 -0
  8. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/epoch=0098-val_loss=2.186.ckpt +3 -0
  9. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/epoch=0099-val_loss=2.190.ckpt +3 -0
  10. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/latest.ckpt +3 -0
  11. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/logs.json.txt +0 -0
  12. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/train.log +6 -0
  13. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/debug-internal.log +20 -0
  14. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/debug.log +27 -0
  15. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/files/config.yaml +242 -0
  16. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/files/output.log +1 -0
  17. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/files/wandb-metadata.json +106 -0
  18. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/files/wandb-summary.json +1 -0
  19. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug-core.log +14 -0
  20. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug-internal.log +20 -0
  21. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug.log +27 -0
  22. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/run-d9kt1a6y.wandb +3 -0
  23. 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/wandb-resume.json +1 -0
.gitattributes CHANGED
@@ -125,3 +125,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
125
  2026.01.26/10.28.22_train_llm_lowdim_box-close-v2/wandb/run-20260126_102831-c6kg9o9b/run-c6kg9o9b.wandb filter=lfs diff=lfs merge=lfs -text
126
  2026.01.28/11.02.01_train_llm_lowdim_box-close-v2/wandb/run-20260128_110208-3y5kjuul/run-3y5kjuul.wandb filter=lfs diff=lfs merge=lfs -text
127
  2026.01.28/11.51.52_train_llm_lowdim_box-close-v2/wandb/run-20260128_115158-aqxmr1ik/run-aqxmr1ik.wandb filter=lfs diff=lfs merge=lfs -text
 
 
125
  2026.01.26/10.28.22_train_llm_lowdim_box-close-v2/wandb/run-20260126_102831-c6kg9o9b/run-c6kg9o9b.wandb filter=lfs diff=lfs merge=lfs -text
126
  2026.01.28/11.02.01_train_llm_lowdim_box-close-v2/wandb/run-20260128_110208-3y5kjuul/run-3y5kjuul.wandb filter=lfs diff=lfs merge=lfs -text
127
  2026.01.28/11.51.52_train_llm_lowdim_box-close-v2/wandb/run-20260128_115158-aqxmr1ik/run-aqxmr1ik.wandb filter=lfs diff=lfs merge=lfs -text
128
+ 2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/run-d9kt1a6y.wandb filter=lfs diff=lfs merge=lfs -text
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/.hydra/config.yaml ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: train_mlp_projector
2
+ _target_: llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace
3
+ obs_dim: ${task.obs_dim}
4
+ action_dim: ${task.action_dim}
5
+ task_name: ${task.name}
6
+ exp_name: default
7
+ model_name: ${llm.name}
8
+ horizon: 1
9
+ n_obs_steps: 1
10
+ n_action_steps: 1
11
+ n_latency_steps: 0
12
+ past_action_visible: false
13
+ llm_translator:
14
+ _target_: llmbc.translator.llm_translator.LLMTranslator
15
+ cfg: ${llm}
16
+ obs_dim: ${task.obs_dim}
17
+ action_dim: ${task.action_dim}
18
+ horizon: ${horizon}
19
+ n_obs_steps: ${n_obs_steps}
20
+ n_action_steps: ${n_action_steps}
21
+ dataloader:
22
+ batch_size: 128
23
+ num_workers: 0
24
+ shuffle: true
25
+ pin_memory: false
26
+ persistent_workers: false
27
+ val_dataloader:
28
+ batch_size: 128
29
+ num_workers: 0
30
+ shuffle: true
31
+ pin_memory: false
32
+ persistent_workers: false
33
+ optimizer:
34
+ _target_: torch.optim.Adam
35
+ lr: 0.0001
36
+ betas:
37
+ - 0.95
38
+ - 0.999
39
+ eps: 1.0e-08
40
+ weight_decay: 1.0e-06
41
+ training:
42
+ device: cuda
43
+ seed: 42
44
+ debug: false
45
+ resume: true
46
+ lr_scheduler: cosine
47
+ lr_warmup_steps: 10
48
+ num_epochs: 100
49
+ gradient_accumulate_every: 2
50
+ grad_norm_clip: 1
51
+ enable_normalizer: true
52
+ checkpoint_every: 1
53
+ val_every: 1
54
+ sample_every: 1
55
+ sample_max_batch: 128
56
+ max_train_steps: null
57
+ max_val_steps: null
58
+ tqdm_interval_sec: 1.0
59
+ logging:
60
+ project: llm_module_training
61
+ resume: true
62
+ mode: online
63
+ name: ${now:%Y.%m.%d-%H.%M.%S}_${name}_${task_name}
64
+ tags:
65
+ - ${name}
66
+ - ${task_name}
67
+ - ${exp_name}
68
+ id: null
69
+ group: null
70
+ checkpoint:
71
+ topk:
72
+ monitor_key: val_loss
73
+ mode: min
74
+ k: 5
75
+ format_str: epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt
76
+ save_last_ckpt: true
77
+ save_last_snapshot: false
78
+ multi_run:
79
+ run_dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
80
+ wandb_name_base: ${now:%Y.%m.%d-%H.%M.%S}_${name}_${task_name}
81
+ task:
82
+ name: box-close-v2
83
+ obs_dim: 9
84
+ action_dim: 4
85
+ env_runner:
86
+ _target_: llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner
87
+ env_name: llf-metaworld-box-close-v2
88
+ n_train: 10
89
+ n_test: 50
90
+ n_envs: 10
91
+ max_steps: 30
92
+ n_obs_steps: ${n_obs_steps}
93
+ n_action_steps: ${n_action_steps}
94
+ instruction_type: b
95
+ feedback_type:
96
+ - hp
97
+ - hn
98
+ - fp
99
+ visual: false
100
+ discount: 0.9
101
+ dataset:
102
+ _target_: llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset
103
+ data_path: datasets/box-close-v2-general-o4-mini.pt
104
+ data_path2: datasets/box-close-v2-o4-mini.pt
105
+ horizon: ${horizon}
106
+ pad_before: ${eval:'${n_obs_steps}-1'}
107
+ pad_after: ${eval:'${n_action_steps}-1'}
108
+ obs_eef_target: true
109
+ use_manual_normalizer: false
110
+ val_ratio: 0.05
111
+ dummy_normalizer: true
112
+ instructor:
113
+ _target_: llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor
114
+ llm:
115
+ name: HuggingFaceTB/SmolLM2-135M-Instruct
116
+ model_name: SmolLM2-135M-Instruct
117
+ config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
118
+ causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
119
+ use_quantization: false
120
+ use_joint_mlp_projector: true
121
+ llm_mode: pretrained
122
+ finetune_mode: orig
123
+ checkpoint: ''
124
+ max_length: 100
125
+ lora_config:
126
+ r: 32
127
+ lora_alpha: 64
128
+ lora_dropout: 0.05
129
+ bias: none
130
+ task_type: CAUSAL_LM
131
+ prompter:
132
+ _target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
133
+ use_joint_mlp_projector: true
134
+ hydra:
135
+ job:
136
+ override_dirname: ${model_name}
137
+ run:
138
+ dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${model_name}
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/.hydra/hydra.yaml ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
4
+ sweep:
5
+ dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task: []
115
+ job:
116
+ name: train
117
+ chdir: null
118
+ override_dirname: ''
119
+ id: ???
120
+ num: ???
121
+ config_name: llmbc_mlp_projector_box-close-v2_o4-mini.yaml
122
+ env_set: {}
123
+ env_copy: []
124
+ config:
125
+ override_dirname:
126
+ kv_sep: '='
127
+ item_sep: ','
128
+ exclude_keys: []
129
+ runtime:
130
+ version: 1.2.0
131
+ version_base: '1.2'
132
+ cwd: /work/u1131674/LLM-BC
133
+ config_sources:
134
+ - path: hydra.conf
135
+ schema: pkg
136
+ provider: hydra
137
+ - path: /work/u1131674/LLM-BC/config/lang_source
138
+ schema: file
139
+ provider: main
140
+ - path: ''
141
+ schema: structured
142
+ provider: schema
143
+ output_dir: /work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2
144
+ choices:
145
+ hydra/env: default
146
+ hydra/callbacks: null
147
+ hydra/job_logging: default
148
+ hydra/hydra_logging: default
149
+ hydra/hydra_help: default
150
+ hydra/help: default
151
+ hydra/sweeper: basic
152
+ hydra/launcher: basic
153
+ hydra/output: default
154
+ verbose: false
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ []
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/epoch=0093-val_loss=2.191.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c2e0a98bb77bf92c36e628ab66933ebb6a0f5c409800890b9dc261541581766
3
+ size 4110822
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/epoch=0096-val_loss=2.189.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d30704ee3eae4bbb3d76f86e632315fc2c74f62a7fd3488744bb1cd5bd5a83b
3
+ size 4110822
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/epoch=0097-val_loss=2.189.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a269c54f1cb469981f7d47c05e5c94945d5bea4c549e2463b9cbf4af9c8382bf
3
+ size 4110822
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/epoch=0098-val_loss=2.186.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d87b6064f6b6d606df21820412b92e036664f0e8b8497d93480cf02776340180
3
+ size 4110822
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/epoch=0099-val_loss=2.190.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f020bfaf26ab3446ecac695500c3c256cbfdf28541876f8858216f7ed58a083c
3
+ size 4110822
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/checkpoints/latest.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f020bfaf26ab3446ecac695500c3c256cbfdf28541876f8858216f7ed58a083c
3
+ size 4110822
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/logs.json.txt ADDED
The diff for this file is too large to render. See raw diff
 
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/train.log ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ [2026-01-28 00:20:33,669][numexpr.utils][INFO] - Note: detected 112 virtual cores but NumExpr set to maximum of 64, check "NUMEXPR_MAX_THREADS" environment variable.
2
+ [2026-01-28 00:20:33,669][numexpr.utils][INFO] - Note: NumExpr detected 112 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 16.
3
+ [2026-01-28 00:20:33,669][numexpr.utils][INFO] - NumExpr defaulting to 16 threads.
4
+ [2026-01-28 00:20:38,172][datasets][INFO] - PyTorch version 2.2.2 available.
5
+ [2026-01-28 00:20:38,173][datasets][INFO] - TensorFlow version 2.15.1 available.
6
+ [2026-01-28 00:20:38,174][datasets][INFO] - JAX version 0.4.30 available.
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/debug-internal.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2026-01-28T00:20:42.287152799+08:00","level":"INFO","msg":"using version","core version":"0.18.6"}
2
+ {"time":"2026-01-28T00:20:42.287163685+08:00","level":"INFO","msg":"created symlink","path":"/work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug-core.log"}
3
+ {"time":"2026-01-28T00:20:42.40159487+08:00","level":"INFO","msg":"created new stream","id":"d9kt1a6y"}
4
+ {"time":"2026-01-28T00:20:42.401631969+08:00","level":"INFO","msg":"stream: started","id":"d9kt1a6y"}
5
+ {"time":"2026-01-28T00:20:42.40165535+08:00","level":"INFO","msg":"sender: started","stream_id":"d9kt1a6y"}
6
+ {"time":"2026-01-28T00:20:42.401645639+08:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"d9kt1a6y"}}
7
+ {"time":"2026-01-28T00:20:42.401655093+08:00","level":"INFO","msg":"handler: started","stream_id":{"value":"d9kt1a6y"}}
8
+ {"time":"2026-01-28T00:20:43.122350505+08:00","level":"INFO","msg":"Starting system monitor"}
9
+ {"time":"2026-01-28T03:18:28.843132355+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/d9kt1a6y/file_stream"}
10
+ {"time":"2026-01-28T04:22:17.072983497+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/d9kt1a6y/file_stream"}
11
+ {"time":"2026-01-28T05:23:28.602833473+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/graphql"}
12
+ {"time":"2026-01-28T09:24:07.706274513+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/graphql"}
13
+ {"time":"2026-01-28T10:52:44.707921101+08:00","level":"INFO","msg":"stream: closing","id":"d9kt1a6y"}
14
+ {"time":"2026-01-28T10:52:44.707946286+08:00","level":"INFO","msg":"Stopping system monitor"}
15
+ {"time":"2026-01-28T10:52:44.708452053+08:00","level":"INFO","msg":"Stopped system monitor"}
16
+ {"time":"2026-01-28T10:52:48.257346098+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
17
+ {"time":"2026-01-28T10:52:48.506294252+08:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"d9kt1a6y"}}
18
+ {"time":"2026-01-28T10:52:48.506322189+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"d9kt1a6y"}}
19
+ {"time":"2026-01-28T10:52:48.506353262+08:00","level":"INFO","msg":"sender: closed","stream_id":"d9kt1a6y"}
20
+ {"time":"2026-01-28T10:52:48.506389957+08:00","level":"INFO","msg":"stream: closed","id":"d9kt1a6y"}
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/debug.log ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2026-01-28 00:20:42,282 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Current SDK version is 0.18.6
2
+ 2026-01-28 00:20:42,282 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Configure stats pid to 1129901
3
+ 2026-01-28 00:20:42,282 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Loading settings from /home/u1131674/.config/wandb/settings
4
+ 2026-01-28 00:20:42,282 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Loading settings from /work/u1131674/LLM-BC/wandb/settings
5
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': 'online', '_disable_service': None}
7
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train.py', 'program_abspath': '/work/u1131674/LLM-BC/train.py', 'program': '/work/u1131674/LLM-BC/./train.py'}
8
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:_log_setup():533] Logging user logs to /work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug.log
10
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:_log_setup():534] Logging internal logs to /work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug-internal.log
11
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:init():619] calling init triggers
12
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:init():626] wandb.init called with sweep_config: {}
13
+ config: {'name': 'train_mlp_projector', '_target_': 'llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace', 'obs_dim': 9, 'action_dim': 4, 'task_name': 'box-close-v2', 'exp_name': 'default', 'model_name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1, 'n_latency_steps': 0, 'past_action_visible': False, 'llm_translator': {'_target_': 'llmbc.translator.llm_translator.LLMTranslator', 'cfg': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'use_quantization': False, 'use_joint_mlp_projector': True, 'llm_mode': 'pretrained', 'finetune_mode': 'orig', 'checkpoint': '', 'max_length': 100, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2026.01.28/00.20.31_HuggingFaceTB/SmolLM2-135M-Instruct'}}}, 'obs_dim': 9, 'action_dim': 4, 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1}, 'dataloader': {'batch_size': 128, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'val_dataloader': {'batch_size': 128, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'optimizer': {'_target_': 'torch.optim.Adam', 'lr': 0.0001, 'betas': [0.95, 0.999], 'eps': 1e-08, 'weight_decay': 1e-06}, 'training': {'device': 'cuda', 'seed': 42, 'debug': False, 'resume': True, 'lr_scheduler': 'cosine', 'lr_warmup_steps': 10, 'num_epochs': 100, 'gradient_accumulate_every': 2, 'grad_norm_clip': 1, 'enable_normalizer': True, 'checkpoint_every': 1, 'val_every': 1, 'sample_every': 1, 'sample_max_batch': 128, 'max_train_steps': None, 'max_val_steps': None, 'tqdm_interval_sec': 1.0}, 'logging': {'project': 'llm_module_training', 'resume': True, 'mode': 'online', 'name': '2026.01.28-00.20.31_train_mlp_projector_box-close-v2', 'tags': ['train_mlp_projector', 'box-close-v2', 'default'], 'id': None, 'group': None}, 'checkpoint': {'topk': {'monitor_key': 'val_loss', 'mode': 'min', 'k': 5, 'format_str': 'epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt'}, 'save_last_ckpt': True, 'save_last_snapshot': False}, 'multi_run': {'run_dir': 'data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2', 'wandb_name_base': '2026.01.28-00.20.31_train_mlp_projector_box-close-v2'}, 'task': {'name': 'box-close-v2', 'obs_dim': 9, 'action_dim': 4, 'env_runner': {'_target_': 'llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner', 'env_name': 'llf-metaworld-box-close-v2', 'n_train': 10, 'n_test': 50, 'n_envs': 10, 'max_steps': 30, 'n_obs_steps': 1, 'n_action_steps': 1, 'instruction_type': 'b', 'feedback_type': ['hp', 'hn', 'fp'], 'visual': False, 'discount': 0.9}, 'dataset': {'_target_': 'llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset', 'data_path': 'datasets/box-close-v2-general-o4-mini.pt', 'data_path2': 'datasets/box-close-v2-o4-mini.pt', 'horizon': 1, 'pad_before': 0, 'pad_after': 0, 'obs_eef_target': True, 'use_manual_normalizer': False, 'val_ratio': 0.05, 'dummy_normalizer': True}, 'instructor': {'_target_': 'llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor'}}, 'llm': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'use_quantization': False, 'use_joint_mlp_projector': True, 'llm_mode': 'pretrained', 'finetune_mode': 'orig', 'checkpoint': '', 'max_length': 100, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2026.01.28/00.20.31_HuggingFaceTB/SmolLM2-135M-Instruct'}}}}
14
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:init():669] starting backend
15
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:init():673] sending inform_init request
16
+ 2026-01-28 00:20:42,284 INFO MainThread:1129901 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2026-01-28 00:20:42,285 INFO MainThread:1129901 [wandb_init.py:init():686] backend started and connected
18
+ 2026-01-28 00:20:42,292 INFO MainThread:1129901 [wandb_init.py:init():781] updated telemetry
19
+ 2026-01-28 00:20:42,346 INFO MainThread:1129901 [wandb_init.py:init():814] communicating run to backend with 90.0 second timeout
20
+ 2026-01-28 00:20:43,117 INFO MainThread:1129901 [wandb_init.py:init():867] starting run threads in backend
21
+ 2026-01-28 00:20:43,315 INFO MainThread:1129901 [wandb_run.py:_console_start():2451] atexit reg
22
+ 2026-01-28 00:20:43,316 INFO MainThread:1129901 [wandb_run.py:_redirect():2299] redirect: wrap_raw
23
+ 2026-01-28 00:20:43,316 INFO MainThread:1129901 [wandb_run.py:_redirect():2364] Wrapping output streams.
24
+ 2026-01-28 00:20:43,316 INFO MainThread:1129901 [wandb_run.py:_redirect():2389] Redirects installed.
25
+ 2026-01-28 00:20:43,319 INFO MainThread:1129901 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2026-01-28 00:20:43,319 INFO MainThread:1129901 [wandb_run.py:_config_callback():1389] config_cb None None {'output_dir': '/work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2'}
27
+ 2026-01-28 10:52:44,708 WARNING MsgRouterThr:1129901 [router.py:message_loop():75] message_loop has been closed
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/files/config.yaml ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _target_:
2
+ value: llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace
3
+ _wandb:
4
+ value:
5
+ cli_version: 0.18.6
6
+ m: []
7
+ python_version: 3.9.25
8
+ t:
9
+ "1":
10
+ - 1
11
+ - 2
12
+ - 3
13
+ - 5
14
+ - 11
15
+ - 12
16
+ - 41
17
+ - 49
18
+ - 50
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 83
24
+ - 95
25
+ - 98
26
+ - 105
27
+ "2":
28
+ - 1
29
+ - 2
30
+ - 3
31
+ - 5
32
+ - 11
33
+ - 12
34
+ - 41
35
+ - 49
36
+ - 50
37
+ - 51
38
+ - 53
39
+ - 55
40
+ - 71
41
+ - 83
42
+ - 95
43
+ - 98
44
+ - 105
45
+ "3":
46
+ - 13
47
+ - 15
48
+ - 16
49
+ - 23
50
+ - 55
51
+ - 61
52
+ "4": 3.9.25
53
+ "5": 0.18.6
54
+ "6": 4.47.1
55
+ "8":
56
+ - 5
57
+ "12": 0.18.6
58
+ "13": linux-x86_64
59
+ action_dim:
60
+ value: 4
61
+ checkpoint:
62
+ value:
63
+ save_last_ckpt: true
64
+ save_last_snapshot: false
65
+ topk:
66
+ format_str: epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt
67
+ k: 5
68
+ mode: min
69
+ monitor_key: val_loss
70
+ dataloader:
71
+ value:
72
+ batch_size: 128
73
+ num_workers: 0
74
+ persistent_workers: false
75
+ pin_memory: false
76
+ shuffle: true
77
+ exp_name:
78
+ value: default
79
+ horizon:
80
+ value: 1
81
+ llm:
82
+ value:
83
+ causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
84
+ checkpoint: ""
85
+ config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
86
+ finetune_mode: orig
87
+ hydra:
88
+ job:
89
+ override_dirname: HuggingFaceTB/SmolLM2-135M-Instruct
90
+ run:
91
+ dir: data/outputs/2026.01.28/00.20.31_HuggingFaceTB/SmolLM2-135M-Instruct
92
+ llm_mode: pretrained
93
+ lora_config:
94
+ bias: none
95
+ lora_alpha: 64
96
+ lora_dropout: 0.05
97
+ r: 32
98
+ task_type: CAUSAL_LM
99
+ max_length: 100
100
+ model_name: SmolLM2-135M-Instruct
101
+ name: HuggingFaceTB/SmolLM2-135M-Instruct
102
+ prompter:
103
+ _target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
104
+ use_joint_mlp_projector: true
105
+ use_joint_mlp_projector: true
106
+ use_quantization: false
107
+ llm_translator:
108
+ value:
109
+ _target_: llmbc.translator.llm_translator.LLMTranslator
110
+ action_dim: 4
111
+ cfg:
112
+ causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
113
+ checkpoint: ""
114
+ config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
115
+ finetune_mode: orig
116
+ hydra:
117
+ job:
118
+ override_dirname: HuggingFaceTB/SmolLM2-135M-Instruct
119
+ run:
120
+ dir: data/outputs/2026.01.28/00.20.31_HuggingFaceTB/SmolLM2-135M-Instruct
121
+ llm_mode: pretrained
122
+ lora_config:
123
+ bias: none
124
+ lora_alpha: 64
125
+ lora_dropout: 0.05
126
+ r: 32
127
+ task_type: CAUSAL_LM
128
+ max_length: 100
129
+ model_name: SmolLM2-135M-Instruct
130
+ name: HuggingFaceTB/SmolLM2-135M-Instruct
131
+ prompter:
132
+ _target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
133
+ use_joint_mlp_projector: true
134
+ use_joint_mlp_projector: true
135
+ use_quantization: false
136
+ horizon: 1
137
+ n_action_steps: 1
138
+ n_obs_steps: 1
139
+ obs_dim: 9
140
+ logging:
141
+ value:
142
+ group: null
143
+ id: null
144
+ mode: online
145
+ name: 2026.01.28-00.20.31_train_mlp_projector_box-close-v2
146
+ project: llm_module_training
147
+ resume: true
148
+ tags:
149
+ - train_mlp_projector
150
+ - box-close-v2
151
+ - default
152
+ model_name:
153
+ value: HuggingFaceTB/SmolLM2-135M-Instruct
154
+ multi_run:
155
+ value:
156
+ run_dir: data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2
157
+ wandb_name_base: 2026.01.28-00.20.31_train_mlp_projector_box-close-v2
158
+ n_action_steps:
159
+ value: 1
160
+ n_latency_steps:
161
+ value: 0
162
+ n_obs_steps:
163
+ value: 1
164
+ name:
165
+ value: train_mlp_projector
166
+ obs_dim:
167
+ value: 9
168
+ optimizer:
169
+ value:
170
+ _target_: torch.optim.Adam
171
+ betas:
172
+ - 0.95
173
+ - 0.999
174
+ eps: 1e-08
175
+ lr: 0.0001
176
+ weight_decay: 1e-06
177
+ output_dir:
178
+ value: /work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2
179
+ past_action_visible:
180
+ value: false
181
+ task:
182
+ value:
183
+ action_dim: 4
184
+ dataset:
185
+ _target_: llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset
186
+ data_path: datasets/box-close-v2-general-o4-mini.pt
187
+ data_path2: datasets/box-close-v2-o4-mini.pt
188
+ dummy_normalizer: true
189
+ horizon: 1
190
+ obs_eef_target: true
191
+ pad_after: 0
192
+ pad_before: 0
193
+ use_manual_normalizer: false
194
+ val_ratio: 0.05
195
+ env_runner:
196
+ _target_: llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner
197
+ discount: 0.9
198
+ env_name: llf-metaworld-box-close-v2
199
+ feedback_type:
200
+ - hp
201
+ - hn
202
+ - fp
203
+ instruction_type: b
204
+ max_steps: 30
205
+ n_action_steps: 1
206
+ n_envs: 10
207
+ n_obs_steps: 1
208
+ n_test: 50
209
+ n_train: 10
210
+ visual: false
211
+ instructor:
212
+ _target_: llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor
213
+ name: box-close-v2
214
+ obs_dim: 9
215
+ task_name:
216
+ value: box-close-v2
217
+ training:
218
+ value:
219
+ checkpoint_every: 1
220
+ debug: false
221
+ device: cuda
222
+ enable_normalizer: true
223
+ grad_norm_clip: 1
224
+ gradient_accumulate_every: 2
225
+ lr_scheduler: cosine
226
+ lr_warmup_steps: 10
227
+ max_train_steps: null
228
+ max_val_steps: null
229
+ num_epochs: 100
230
+ resume: true
231
+ sample_every: 1
232
+ sample_max_batch: 128
233
+ seed: 42
234
+ tqdm_interval_sec: 1
235
+ val_every: 1
236
+ val_dataloader:
237
+ value:
238
+ batch_size: 128
239
+ num_workers: 0
240
+ persistent_workers: false
241
+ pin_memory: false
242
+ shuffle: true
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/files/output.log ADDED
@@ -0,0 +1 @@
 
 
1
+
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/files/wandb-metadata.json ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-4.18.0-513.24.1.el8_9.x86_64-x86_64-with-glibc2.28",
3
+ "python": "3.9.25",
4
+ "startedAt": "2026-01-27T16:20:42.285279Z",
5
+ "args": [
6
+ "--config-path",
7
+ "config/lang_source",
8
+ "--config-name",
9
+ "llmbc_mlp_projector_box-close-v2_o4-mini.yaml"
10
+ ],
11
+ "program": "/work/u1131674/LLM-BC/./train.py",
12
+ "codePath": "train.py",
13
+ "git": {
14
+ "remote": "https://github.com/CHYang25/LLM-BC.git",
15
+ "commit": "2cf60c4cdae3a2ac623eb2aebb7d12dc2beb2b66"
16
+ },
17
+ "email": "chris920325@gmail.com",
18
+ "root": "/work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2",
19
+ "host": "hgpn21",
20
+ "username": "u1131674",
21
+ "executable": "/home/u1131674/.conda/envs/llm-bc/bin/python3",
22
+ "codePathLocal": "train.py",
23
+ "cpu_count": 112,
24
+ "cpu_count_logical": 112,
25
+ "gpu": "NVIDIA H100 80GB HBM3",
26
+ "gpu_count": 1,
27
+ "disk": {
28
+ "/": {
29
+ "total": "1918024196096",
30
+ "used": "217170878464"
31
+ }
32
+ },
33
+ "memory": {
34
+ "total": "2163677220864"
35
+ },
36
+ "cpu": {
37
+ "count": 112,
38
+ "countLogical": 112
39
+ },
40
+ "gpu_nvidia": [
41
+ {
42
+ "name": "NVIDIA H100 80GB HBM3",
43
+ "memoryTotal": "85520809984",
44
+ "cudaCores": 16896,
45
+ "architecture": "Hopper"
46
+ }
47
+ ],
48
+ "slurm": {
49
+ "cluster_name": "hpc",
50
+ "conf": "/etc/slurm/slurm.conf",
51
+ "cpu_bind": "quiet,mask_cpu:0x000000F000000F00000000000000",
52
+ "cpu_bind_list": "0x000000F000000F00000000000000",
53
+ "cpu_bind_type": "mask_cpu:",
54
+ "cpu_bind_verbose": "quiet",
55
+ "cpus_on_node": "8",
56
+ "cpus_per_task": "8",
57
+ "distribution": "cyclic,pack",
58
+ "gpus_on_node": "1",
59
+ "gpus_per_node": "1",
60
+ "gtids": "0",
61
+ "job_account": "mst114558",
62
+ "job_cpus_per_node": "8",
63
+ "job_end_time": "1769703630",
64
+ "job_gid": "106773",
65
+ "job_group": "MST114558",
66
+ "job_id": "105048",
67
+ "job_name": "python3",
68
+ "job_nodelist": "hgpn21",
69
+ "job_num_nodes": "1",
70
+ "job_partition": "normal",
71
+ "job_qos": "normal",
72
+ "job_start_time": "1769530830",
73
+ "job_uid": "41408",
74
+ "job_user": "u1131674",
75
+ "jobid": "105048",
76
+ "launch_node_ipaddr": "172.21.101.1",
77
+ "localid": "0",
78
+ "mem_per_node": "204800",
79
+ "nnodes": "1",
80
+ "nodeid": "0",
81
+ "nodelist": "hgpn21",
82
+ "nprocs": "1",
83
+ "ntasks": "1",
84
+ "prio_process": "0",
85
+ "procid": "0",
86
+ "srun_comm_host": "172.21.101.1",
87
+ "srun_comm_port": "41331",
88
+ "step_gpus": "4",
89
+ "step_id": "0",
90
+ "step_launcher_port": "41331",
91
+ "step_nodelist": "hgpn21",
92
+ "step_num_nodes": "1",
93
+ "step_num_tasks": "1",
94
+ "step_tasks_per_node": "1",
95
+ "stepid": "0",
96
+ "submit_dir": "/work/u1131674/LLM-BC",
97
+ "submit_host": "cbi-lgn01",
98
+ "task_pid": "1129901",
99
+ "tasks_per_node": "1",
100
+ "topology_addr": "ibsw1.hgpn21",
101
+ "topology_addr_pattern": "switch.node",
102
+ "tres_per_task": "cpu:8",
103
+ "umask": "0022"
104
+ },
105
+ "cudaVersion": "12.4"
106
+ }
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"train_loss":2.2015625126593936,"_wandb":{"runtime":37922},"_step":56499,"_timestamp":1.7695687646872494e+09,"grad_norm":5.658727169036865,"global_step":56499,"epoch":99,"val_loss":2.1897988319396973,"lr":0,"_runtime":37922.42265669}
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug-core.log ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2026-01-28T00:20:41.678122741+08:00","level":"INFO","msg":"started logging, with flags","port-filename":"/tmp/tmpf51s48j1/port-1129901.txt","pid":1129901,"debug":false,"disable-analytics":false}
2
+ {"time":"2026-01-28T00:20:41.678144133+08:00","level":"INFO","msg":"FeatureState","shutdownOnParentExitEnabled":false}
3
+ {"time":"2026-01-28T00:20:41.678525998+08:00","level":"INFO","msg":"Will exit if parent process dies.","ppid":1129901}
4
+ {"time":"2026-01-28T00:20:41.678522018+08:00","level":"INFO","msg":"server is running","addr":{"IP":"127.0.0.1","Port":41121,"Zone":""}}
5
+ {"time":"2026-01-28T00:20:41.872810103+08:00","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"127.0.0.1:33780"}
6
+ {"time":"2026-01-28T00:20:42.285792202+08:00","level":"INFO","msg":"handleInformInit: received","streamId":"d9kt1a6y","id":"127.0.0.1:33780"}
7
+ {"time":"2026-01-28T00:20:42.401637789+08:00","level":"INFO","msg":"handleInformInit: stream started","streamId":"d9kt1a6y","id":"127.0.0.1:33780"}
8
+ {"time":"2026-01-28T10:52:44.707861963+08:00","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"127.0.0.1:33780"}
9
+ {"time":"2026-01-28T10:52:44.707928482+08:00","level":"INFO","msg":"server is shutting down"}
10
+ {"time":"2026-01-28T10:52:44.707920412+08:00","level":"INFO","msg":"connection: Close: initiating connection closure","id":"127.0.0.1:33780"}
11
+ {"time":"2026-01-28T10:52:44.708005087+08:00","level":"INFO","msg":"connection: Close: connection successfully closed","id":"127.0.0.1:33780"}
12
+ {"time":"2026-01-28T10:52:48.506700484+08:00","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"127.0.0.1:33780"}
13
+ {"time":"2026-01-28T10:52:48.50671573+08:00","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"127.0.0.1:33780"}
14
+ {"time":"2026-01-28T10:52:48.506722098+08:00","level":"INFO","msg":"server is closed"}
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug-internal.log ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2026-01-28T00:20:42.287152799+08:00","level":"INFO","msg":"using version","core version":"0.18.6"}
2
+ {"time":"2026-01-28T00:20:42.287163685+08:00","level":"INFO","msg":"created symlink","path":"/work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug-core.log"}
3
+ {"time":"2026-01-28T00:20:42.40159487+08:00","level":"INFO","msg":"created new stream","id":"d9kt1a6y"}
4
+ {"time":"2026-01-28T00:20:42.401631969+08:00","level":"INFO","msg":"stream: started","id":"d9kt1a6y"}
5
+ {"time":"2026-01-28T00:20:42.40165535+08:00","level":"INFO","msg":"sender: started","stream_id":"d9kt1a6y"}
6
+ {"time":"2026-01-28T00:20:42.401645639+08:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"d9kt1a6y"}}
7
+ {"time":"2026-01-28T00:20:42.401655093+08:00","level":"INFO","msg":"handler: started","stream_id":{"value":"d9kt1a6y"}}
8
+ {"time":"2026-01-28T00:20:43.122350505+08:00","level":"INFO","msg":"Starting system monitor"}
9
+ {"time":"2026-01-28T03:18:28.843132355+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/d9kt1a6y/file_stream"}
10
+ {"time":"2026-01-28T04:22:17.072983497+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/d9kt1a6y/file_stream"}
11
+ {"time":"2026-01-28T05:23:28.602833473+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/graphql"}
12
+ {"time":"2026-01-28T09:24:07.706274513+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/graphql"}
13
+ {"time":"2026-01-28T10:52:44.707921101+08:00","level":"INFO","msg":"stream: closing","id":"d9kt1a6y"}
14
+ {"time":"2026-01-28T10:52:44.707946286+08:00","level":"INFO","msg":"Stopping system monitor"}
15
+ {"time":"2026-01-28T10:52:44.708452053+08:00","level":"INFO","msg":"Stopped system monitor"}
16
+ {"time":"2026-01-28T10:52:48.257346098+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
17
+ {"time":"2026-01-28T10:52:48.506294252+08:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"d9kt1a6y"}}
18
+ {"time":"2026-01-28T10:52:48.506322189+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"d9kt1a6y"}}
19
+ {"time":"2026-01-28T10:52:48.506353262+08:00","level":"INFO","msg":"sender: closed","stream_id":"d9kt1a6y"}
20
+ {"time":"2026-01-28T10:52:48.506389957+08:00","level":"INFO","msg":"stream: closed","id":"d9kt1a6y"}
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug.log ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2026-01-28 00:20:42,282 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Current SDK version is 0.18.6
2
+ 2026-01-28 00:20:42,282 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Configure stats pid to 1129901
3
+ 2026-01-28 00:20:42,282 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Loading settings from /home/u1131674/.config/wandb/settings
4
+ 2026-01-28 00:20:42,282 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Loading settings from /work/u1131674/LLM-BC/wandb/settings
5
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': 'online', '_disable_service': None}
7
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train.py', 'program_abspath': '/work/u1131674/LLM-BC/train.py', 'program': '/work/u1131674/LLM-BC/./train.py'}
8
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:_log_setup():533] Logging user logs to /work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug.log
10
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:_log_setup():534] Logging internal logs to /work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/logs/debug-internal.log
11
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:init():619] calling init triggers
12
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:init():626] wandb.init called with sweep_config: {}
13
+ config: {'name': 'train_mlp_projector', '_target_': 'llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace', 'obs_dim': 9, 'action_dim': 4, 'task_name': 'box-close-v2', 'exp_name': 'default', 'model_name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1, 'n_latency_steps': 0, 'past_action_visible': False, 'llm_translator': {'_target_': 'llmbc.translator.llm_translator.LLMTranslator', 'cfg': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'use_quantization': False, 'use_joint_mlp_projector': True, 'llm_mode': 'pretrained', 'finetune_mode': 'orig', 'checkpoint': '', 'max_length': 100, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2026.01.28/00.20.31_HuggingFaceTB/SmolLM2-135M-Instruct'}}}, 'obs_dim': 9, 'action_dim': 4, 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1}, 'dataloader': {'batch_size': 128, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'val_dataloader': {'batch_size': 128, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'optimizer': {'_target_': 'torch.optim.Adam', 'lr': 0.0001, 'betas': [0.95, 0.999], 'eps': 1e-08, 'weight_decay': 1e-06}, 'training': {'device': 'cuda', 'seed': 42, 'debug': False, 'resume': True, 'lr_scheduler': 'cosine', 'lr_warmup_steps': 10, 'num_epochs': 100, 'gradient_accumulate_every': 2, 'grad_norm_clip': 1, 'enable_normalizer': True, 'checkpoint_every': 1, 'val_every': 1, 'sample_every': 1, 'sample_max_batch': 128, 'max_train_steps': None, 'max_val_steps': None, 'tqdm_interval_sec': 1.0}, 'logging': {'project': 'llm_module_training', 'resume': True, 'mode': 'online', 'name': '2026.01.28-00.20.31_train_mlp_projector_box-close-v2', 'tags': ['train_mlp_projector', 'box-close-v2', 'default'], 'id': None, 'group': None}, 'checkpoint': {'topk': {'monitor_key': 'val_loss', 'mode': 'min', 'k': 5, 'format_str': 'epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt'}, 'save_last_ckpt': True, 'save_last_snapshot': False}, 'multi_run': {'run_dir': 'data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2', 'wandb_name_base': '2026.01.28-00.20.31_train_mlp_projector_box-close-v2'}, 'task': {'name': 'box-close-v2', 'obs_dim': 9, 'action_dim': 4, 'env_runner': {'_target_': 'llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner', 'env_name': 'llf-metaworld-box-close-v2', 'n_train': 10, 'n_test': 50, 'n_envs': 10, 'max_steps': 30, 'n_obs_steps': 1, 'n_action_steps': 1, 'instruction_type': 'b', 'feedback_type': ['hp', 'hn', 'fp'], 'visual': False, 'discount': 0.9}, 'dataset': {'_target_': 'llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset', 'data_path': 'datasets/box-close-v2-general-o4-mini.pt', 'data_path2': 'datasets/box-close-v2-o4-mini.pt', 'horizon': 1, 'pad_before': 0, 'pad_after': 0, 'obs_eef_target': True, 'use_manual_normalizer': False, 'val_ratio': 0.05, 'dummy_normalizer': True}, 'instructor': {'_target_': 'llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor'}}, 'llm': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'use_quantization': False, 'use_joint_mlp_projector': True, 'llm_mode': 'pretrained', 'finetune_mode': 'orig', 'checkpoint': '', 'max_length': 100, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2026.01.28/00.20.31_HuggingFaceTB/SmolLM2-135M-Instruct'}}}}
14
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:init():669] starting backend
15
+ 2026-01-28 00:20:42,283 INFO MainThread:1129901 [wandb_init.py:init():673] sending inform_init request
16
+ 2026-01-28 00:20:42,284 INFO MainThread:1129901 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2026-01-28 00:20:42,285 INFO MainThread:1129901 [wandb_init.py:init():686] backend started and connected
18
+ 2026-01-28 00:20:42,292 INFO MainThread:1129901 [wandb_init.py:init():781] updated telemetry
19
+ 2026-01-28 00:20:42,346 INFO MainThread:1129901 [wandb_init.py:init():814] communicating run to backend with 90.0 second timeout
20
+ 2026-01-28 00:20:43,117 INFO MainThread:1129901 [wandb_init.py:init():867] starting run threads in backend
21
+ 2026-01-28 00:20:43,315 INFO MainThread:1129901 [wandb_run.py:_console_start():2451] atexit reg
22
+ 2026-01-28 00:20:43,316 INFO MainThread:1129901 [wandb_run.py:_redirect():2299] redirect: wrap_raw
23
+ 2026-01-28 00:20:43,316 INFO MainThread:1129901 [wandb_run.py:_redirect():2364] Wrapping output streams.
24
+ 2026-01-28 00:20:43,316 INFO MainThread:1129901 [wandb_run.py:_redirect():2389] Redirects installed.
25
+ 2026-01-28 00:20:43,319 INFO MainThread:1129901 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2026-01-28 00:20:43,319 INFO MainThread:1129901 [wandb_run.py:_config_callback():1389] config_cb None None {'output_dir': '/work/u1131674/LLM-BC/data/outputs/2026.01.28/00.20.31_train_mlp_projector_box-close-v2'}
27
+ 2026-01-28 10:52:44,708 WARNING MsgRouterThr:1129901 [router.py:message_loop():75] message_loop has been closed
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/run-20260128_002042-d9kt1a6y/run-d9kt1a6y.wandb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de7f3a5e581eb8e1ef74351546fe00ccfbe437c9826d4574fddbca5ecf91d3df
3
+ size 55111506
2026.01.28/00.20.31_train_mlp_projector_box-close-v2/wandb/wandb-resume.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"run_id": "d9kt1a6y"}