CHYang25 commited on
Commit
fede27e
·
verified ·
1 Parent(s): d848eca

Upload folder using huggingface_hub

Browse files
Files changed (23) hide show
  1. .gitattributes +1 -0
  2. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/.hydra/config.yaml +138 -0
  3. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/.hydra/hydra.yaml +156 -0
  4. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/.hydra/overrides.yaml +1 -0
  5. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/epoch=0090-val_loss=1.364.ckpt +3 -0
  6. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/epoch=0093-val_loss=1.364.ckpt +3 -0
  7. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/epoch=0097-val_loss=1.366.ckpt +3 -0
  8. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/epoch=0098-val_loss=1.360.ckpt +3 -0
  9. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/epoch=0099-val_loss=1.364.ckpt +3 -0
  10. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/latest.ckpt +3 -0
  11. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/logs.json.txt +0 -0
  12. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/train_mlp_projector_workspace.log +0 -0
  13. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/debug-internal.log +18 -0
  14. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/debug.log +27 -0
  15. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/files/config.yaml +238 -0
  16. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/files/output.log +1 -0
  17. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/files/wandb-metadata.json +49 -0
  18. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/files/wandb-summary.json +1 -0
  19. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug-core.log +14 -0
  20. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug-internal.log +18 -0
  21. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug.log +27 -0
  22. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/run-rvk7mjhx.wandb +3 -0
  23. 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/wandb-resume.json +1 -0
.gitattributes CHANGED
@@ -50,3 +50,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
50
  2025.11.03/22.51.21_train_llm_lowdim_parking-v0/wandb/run-20251103_225122-zsfvjecr/run-zsfvjecr.wandb filter=lfs diff=lfs merge=lfs -text
51
  2025.11.04/14.51.54_train_llm_lowdim_maze2d-medium-v0/wandb/run-20251104_145155-3236tljj/run-3236tljj.wandb filter=lfs diff=lfs merge=lfs -text
52
  2025.09.24/22.08.08_train_mlp_projector_box-close-v2/wandb/run-20250924_220811-pqvx1ime/run-pqvx1ime.wandb filter=lfs diff=lfs merge=lfs -text
 
 
50
  2025.11.03/22.51.21_train_llm_lowdim_parking-v0/wandb/run-20251103_225122-zsfvjecr/run-zsfvjecr.wandb filter=lfs diff=lfs merge=lfs -text
51
  2025.11.04/14.51.54_train_llm_lowdim_maze2d-medium-v0/wandb/run-20251104_145155-3236tljj/run-3236tljj.wandb filter=lfs diff=lfs merge=lfs -text
52
  2025.09.24/22.08.08_train_mlp_projector_box-close-v2/wandb/run-20250924_220811-pqvx1ime/run-pqvx1ime.wandb filter=lfs diff=lfs merge=lfs -text
53
+ 2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/run-rvk7mjhx.wandb filter=lfs diff=lfs merge=lfs -text
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/.hydra/config.yaml ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: train_mlp_projector
2
+ _target_: llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace
3
+ obs_dim: ${task.obs_dim}
4
+ action_dim: ${task.action_dim}
5
+ task_name: ${task.name}
6
+ exp_name: default
7
+ model_name: ${llm.name}
8
+ horizon: 1
9
+ n_obs_steps: 1
10
+ n_action_steps: 1
11
+ n_latency_steps: 0
12
+ past_action_visible: false
13
+ llm_translator:
14
+ _target_: llmbc.translator.llm_translator.LLMTranslator
15
+ cfg: ${llm}
16
+ obs_dim: ${task.obs_dim}
17
+ action_dim: ${task.action_dim}
18
+ horizon: ${horizon}
19
+ n_obs_steps: ${n_obs_steps}
20
+ n_action_steps: ${n_action_steps}
21
+ dataloader:
22
+ batch_size: 128
23
+ num_workers: 0
24
+ shuffle: true
25
+ pin_memory: false
26
+ persistent_workers: false
27
+ val_dataloader:
28
+ batch_size: 128
29
+ num_workers: 0
30
+ shuffle: true
31
+ pin_memory: false
32
+ persistent_workers: false
33
+ optimizer:
34
+ _target_: torch.optim.Adam
35
+ lr: 0.0001
36
+ betas:
37
+ - 0.95
38
+ - 0.999
39
+ eps: 1.0e-08
40
+ weight_decay: 1.0e-06
41
+ training:
42
+ device: cuda
43
+ seed: 42
44
+ debug: false
45
+ resume: true
46
+ lr_scheduler: cosine
47
+ lr_warmup_steps: 10
48
+ num_epochs: 100
49
+ gradient_accumulate_every: 2
50
+ grad_norm_clip: 1
51
+ enable_normalizer: true
52
+ checkpoint_every: 1
53
+ val_every: 1
54
+ sample_every: 1
55
+ sample_max_batch: 128
56
+ max_train_steps: null
57
+ max_val_steps: null
58
+ tqdm_interval_sec: 1.0
59
+ logging:
60
+ project: llm_module_training
61
+ resume: true
62
+ mode: online
63
+ name: ${now:%Y.%m.%d-%H.%M.%S}_${name}_${task_name}
64
+ tags:
65
+ - ${name}
66
+ - ${task_name}
67
+ - ${exp_name}
68
+ id: null
69
+ group: null
70
+ checkpoint:
71
+ topk:
72
+ monitor_key: val_loss
73
+ mode: min
74
+ k: 5
75
+ format_str: epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt
76
+ save_last_ckpt: true
77
+ save_last_snapshot: false
78
+ multi_run:
79
+ run_dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
80
+ wandb_name_base: ${now:%Y.%m.%d-%H.%M.%S}_${name}_${task_name}
81
+ task:
82
+ name: box-close-v2
83
+ obs_dim: 9
84
+ action_dim: 4
85
+ env_runner:
86
+ _target_: llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner
87
+ env_name: llf-metaworld-box-close-v2
88
+ n_train: 10
89
+ n_test: 50
90
+ n_envs: 10
91
+ max_steps: 30
92
+ n_obs_steps: ${n_obs_steps}
93
+ n_action_steps: ${n_action_steps}
94
+ instruction_type: b
95
+ feedback_type:
96
+ - hp
97
+ - hn
98
+ - fp
99
+ visual: false
100
+ discount: 0.9
101
+ dataset:
102
+ _target_: llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset
103
+ data_path: datasets/box-close-v2-general.pt
104
+ data_path2: datasets/box-close-v2.pt
105
+ horizon: ${horizon}
106
+ pad_before: ${eval:'${n_obs_steps}-1'}
107
+ pad_after: ${eval:'${n_action_steps}-1'}
108
+ obs_eef_target: true
109
+ use_manual_normalizer: false
110
+ val_ratio: 0.05
111
+ dummy_normalizer: false
112
+ instructor:
113
+ _target_: llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor
114
+ llm:
115
+ name: HuggingFaceTB/SmolLM2-135M-Instruct
116
+ model_name: SmolLM2-135M-Instruct
117
+ config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
118
+ causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
119
+ use_quantization: false
120
+ use_joint_mlp_projector: true
121
+ llm_mode: pretrained
122
+ finetune_mode: orig
123
+ checkpoint: ''
124
+ max_length: 100
125
+ lora_config:
126
+ r: 32
127
+ lora_alpha: 64
128
+ lora_dropout: 0.05
129
+ bias: none
130
+ task_type: CAUSAL_LM
131
+ prompter:
132
+ _target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
133
+ use_joint_mlp_projector: true
134
+ hydra:
135
+ job:
136
+ override_dirname: ${model_name}
137
+ run:
138
+ dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${model_name}
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/.hydra/hydra.yaml ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ hydra:
2
+ run:
3
+ dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
4
+ sweep:
5
+ dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
6
+ subdir: ${hydra.job.num}
7
+ launcher:
8
+ _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
+ sweeper:
10
+ _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
+ max_batch_size: null
12
+ params: null
13
+ help:
14
+ app_name: ${hydra.job.name}
15
+ header: '${hydra.help.app_name} is powered by Hydra.
16
+
17
+ '
18
+ footer: 'Powered by Hydra (https://hydra.cc)
19
+
20
+ Use --hydra-help to view Hydra specific help
21
+
22
+ '
23
+ template: '${hydra.help.header}
24
+
25
+ == Configuration groups ==
26
+
27
+ Compose your configuration from those groups (group=option)
28
+
29
+
30
+ $APP_CONFIG_GROUPS
31
+
32
+
33
+ == Config ==
34
+
35
+ Override anything in the config (foo.bar=value)
36
+
37
+
38
+ $CONFIG
39
+
40
+
41
+ ${hydra.help.footer}
42
+
43
+ '
44
+ hydra_help:
45
+ template: 'Hydra (${hydra.runtime.version})
46
+
47
+ See https://hydra.cc for more info.
48
+
49
+
50
+ == Flags ==
51
+
52
+ $FLAGS_HELP
53
+
54
+
55
+ == Configuration groups ==
56
+
57
+ Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
+ to command line)
59
+
60
+
61
+ $HYDRA_CONFIG_GROUPS
62
+
63
+
64
+ Use ''--cfg hydra'' to Show the Hydra config.
65
+
66
+ '
67
+ hydra_help: ???
68
+ hydra_logging:
69
+ version: 1
70
+ formatters:
71
+ simple:
72
+ format: '[%(asctime)s][HYDRA] %(message)s'
73
+ handlers:
74
+ console:
75
+ class: logging.StreamHandler
76
+ formatter: simple
77
+ stream: ext://sys.stdout
78
+ root:
79
+ level: INFO
80
+ handlers:
81
+ - console
82
+ loggers:
83
+ logging_example:
84
+ level: DEBUG
85
+ disable_existing_loggers: false
86
+ job_logging:
87
+ version: 1
88
+ formatters:
89
+ simple:
90
+ format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
+ handlers:
92
+ console:
93
+ class: logging.StreamHandler
94
+ formatter: simple
95
+ stream: ext://sys.stdout
96
+ file:
97
+ class: logging.FileHandler
98
+ formatter: simple
99
+ filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
+ root:
101
+ level: INFO
102
+ handlers:
103
+ - console
104
+ - file
105
+ disable_existing_loggers: false
106
+ env: {}
107
+ mode: RUN
108
+ searchpath: []
109
+ callbacks: {}
110
+ output_subdir: .hydra
111
+ overrides:
112
+ hydra:
113
+ - hydra.mode=RUN
114
+ task: []
115
+ job:
116
+ name: train_mlp_projector_workspace
117
+ chdir: null
118
+ override_dirname: ''
119
+ id: ???
120
+ num: ???
121
+ config_name: train_mlp_projector_workspace
122
+ env_set: {}
123
+ env_copy: []
124
+ config:
125
+ override_dirname:
126
+ kv_sep: '='
127
+ item_sep: ','
128
+ exclude_keys: []
129
+ runtime:
130
+ version: 1.2.0
131
+ version_base: '1.2'
132
+ cwd: /home/chyang/workspace/LLM-BC
133
+ config_sources:
134
+ - path: hydra.conf
135
+ schema: pkg
136
+ provider: hydra
137
+ - path: /home/chyang/workspace/LLM-BC/llmbc/config
138
+ schema: file
139
+ provider: main
140
+ - path: ''
141
+ schema: structured
142
+ provider: schema
143
+ output_dir: /home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2
144
+ choices:
145
+ llm: smollm2-135m-instruct
146
+ task: box-close-v2
147
+ hydra/env: default
148
+ hydra/callbacks: null
149
+ hydra/job_logging: default
150
+ hydra/hydra_logging: default
151
+ hydra/hydra_help: default
152
+ hydra/help: default
153
+ hydra/sweeper: basic
154
+ hydra/launcher: basic
155
+ hydra/output: default
156
+ verbose: false
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/.hydra/overrides.yaml ADDED
@@ -0,0 +1 @@
 
 
1
+ []
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/epoch=0090-val_loss=1.364.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f66b3d210e6afe193d4c04215a548c31a34694bea2e4420b85044d2b223dfe76
3
+ size 4112578
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/epoch=0093-val_loss=1.364.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b19d97440319c1ea7b3b8e802a3298a832bab00573137784daba2b9afbc56747
3
+ size 4112578
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/epoch=0097-val_loss=1.366.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c3676d660138b8b772a7fd603c5bfe1f63c9edc0a07c6b758eb9646c990f2ba
3
+ size 4112578
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/epoch=0098-val_loss=1.360.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d846285bee0f7575023c4e4188d174b27a0a1c9b4a366c58bb02723e30d9091d
3
+ size 4112578
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/epoch=0099-val_loss=1.364.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef2effe7a61726a799273149eb8e144dd1e8d624c2159279b2ac657f133c28d6
3
+ size 4112578
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/checkpoints/latest.ckpt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef2effe7a61726a799273149eb8e144dd1e8d624c2159279b2ac657f133c28d6
3
+ size 4112578
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/logs.json.txt ADDED
The diff for this file is too large to render. See raw diff
 
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/train_mlp_projector_workspace.log ADDED
File without changes
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/debug-internal.log ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2025-10-22T00:26:02.756579779+08:00","level":"INFO","msg":"using version","core version":"0.18.6"}
2
+ {"time":"2025-10-22T00:26:02.75658787+08:00","level":"INFO","msg":"created symlink","path":"/home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug-core.log"}
3
+ {"time":"2025-10-22T00:26:02.861085521+08:00","level":"INFO","msg":"created new stream","id":"rvk7mjhx"}
4
+ {"time":"2025-10-22T00:26:02.861104058+08:00","level":"INFO","msg":"stream: started","id":"rvk7mjhx"}
5
+ {"time":"2025-10-22T00:26:02.86116531+08:00","level":"INFO","msg":"handler: started","stream_id":{"value":"rvk7mjhx"}}
6
+ {"time":"2025-10-22T00:26:02.86115692+08:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"rvk7mjhx"}}
7
+ {"time":"2025-10-22T00:26:02.861348083+08:00","level":"INFO","msg":"sender: started","stream_id":"rvk7mjhx"}
8
+ {"time":"2025-10-22T00:26:03.535795016+08:00","level":"INFO","msg":"Starting system monitor"}
9
+ {"time":"2025-10-22T05:48:34.287173856+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/rvk7mjhx/file_stream"}
10
+ {"time":"2025-10-22T06:14:37.009839286+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/rvk7mjhx/file_stream"}
11
+ {"time":"2025-10-22T23:32:00.707133497+08:00","level":"INFO","msg":"stream: closing","id":"rvk7mjhx"}
12
+ {"time":"2025-10-22T23:32:00.707161267+08:00","level":"INFO","msg":"Stopping system monitor"}
13
+ {"time":"2025-10-22T23:32:00.707804728+08:00","level":"INFO","msg":"Stopped system monitor"}
14
+ {"time":"2025-10-22T23:32:01.791665035+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
15
+ {"time":"2025-10-22T23:32:02.043511307+08:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"rvk7mjhx"}}
16
+ {"time":"2025-10-22T23:32:02.04355714+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"rvk7mjhx"}}
17
+ {"time":"2025-10-22T23:32:02.043570721+08:00","level":"INFO","msg":"sender: closed","stream_id":"rvk7mjhx"}
18
+ {"time":"2025-10-22T23:32:02.043608004+08:00","level":"INFO","msg":"stream: closed","id":"rvk7mjhx"}
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/debug.log ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Current SDK version is 0.18.6
2
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Configure stats pid to 4075930
3
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/.config/wandb/settings
4
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/workspace/LLM-BC/wandb/settings
5
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': 'online', '_disable_service': None}
7
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'llmbc/workspace/train_mlp_projector_workspace.py', 'program_abspath': '/home/chyang/workspace/LLM-BC/llmbc/workspace/train_mlp_projector_workspace.py', 'program': '/home/chyang/workspace/LLM-BC/./llmbc/workspace/train_mlp_projector_workspace.py'}
8
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:_log_setup():533] Logging user logs to /home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug.log
10
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:_log_setup():534] Logging internal logs to /home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug-internal.log
11
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:init():619] calling init triggers
12
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:init():626] wandb.init called with sweep_config: {}
13
+ config: {'name': 'train_mlp_projector', '_target_': 'llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace', 'obs_dim': 9, 'action_dim': 4, 'task_name': 'box-close-v2', 'exp_name': 'default', 'model_name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1, 'n_latency_steps': 0, 'past_action_visible': False, 'llm_translator': {'_target_': 'llmbc.translator.llm_translator.LLMTranslator', 'cfg': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'use_quantization': False, 'use_joint_mlp_projector': True, 'llm_mode': 'pretrained', 'finetune_mode': 'orig', 'checkpoint': '', 'max_length': 100, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2025.10.22/00.25.59_HuggingFaceTB/SmolLM2-135M-Instruct'}}}, 'obs_dim': 9, 'action_dim': 4, 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1}, 'dataloader': {'batch_size': 128, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'val_dataloader': {'batch_size': 128, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'optimizer': {'_target_': 'torch.optim.Adam', 'lr': 0.0001, 'betas': [0.95, 0.999], 'eps': 1e-08, 'weight_decay': 1e-06}, 'training': {'device': 'cuda', 'seed': 42, 'debug': False, 'resume': True, 'lr_scheduler': 'cosine', 'lr_warmup_steps': 10, 'num_epochs': 100, 'gradient_accumulate_every': 2, 'grad_norm_clip': 1, 'enable_normalizer': True, 'checkpoint_every': 1, 'val_every': 1, 'sample_every': 1, 'sample_max_batch': 128, 'max_train_steps': None, 'max_val_steps': None, 'tqdm_interval_sec': 1.0}, 'logging': {'project': 'llm_module_training', 'resume': True, 'mode': 'online', 'name': '2025.10.22-00.26.01_train_mlp_projector_box-close-v2', 'tags': ['train_mlp_projector', 'box-close-v2', 'default'], 'id': None, 'group': None}, 'checkpoint': {'topk': {'monitor_key': 'val_loss', 'mode': 'min', 'k': 5, 'format_str': 'epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt'}, 'save_last_ckpt': True, 'save_last_snapshot': False}, 'multi_run': {'run_dir': 'data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2', 'wandb_name_base': '2025.10.22-00.26.01_train_mlp_projector_box-close-v2'}, 'task': {'name': 'box-close-v2', 'obs_dim': 9, 'action_dim': 4, 'env_runner': {'_target_': 'llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner', 'env_name': 'llf-metaworld-box-close-v2', 'n_train': 10, 'n_test': 50, 'n_envs': 10, 'max_steps': 30, 'n_obs_steps': 1, 'n_action_steps': 1, 'instruction_type': 'b', 'feedback_type': ['hp', 'hn', 'fp'], 'visual': False, 'discount': 0.9}, 'dataset': {'_target_': 'llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset', 'data_path': 'datasets/box-close-v2-general.pt', 'data_path2': 'datasets/box-close-v2.pt', 'horizon': 1, 'pad_before': 0, 'pad_after': 0, 'obs_eef_target': True, 'use_manual_normalizer': False, 'val_ratio': 0.05, 'dummy_normalizer': False}, 'instructor': {'_target_': 'llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor'}}, 'llm': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'use_quantization': False, 'use_joint_mlp_projector': True, 'llm_mode': 'pretrained', 'finetune_mode': 'orig', 'checkpoint': '', 'max_length': 100, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2025.10.22/00.25.59_HuggingFaceTB/SmolLM2-135M-Instruct'}}}}
14
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:init():669] starting backend
15
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:init():673] sending inform_init request
16
+ 2025-10-22 00:26:02,753 INFO MainThread:4075930 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2025-10-22 00:26:02,753 INFO MainThread:4075930 [wandb_init.py:init():686] backend started and connected
18
+ 2025-10-22 00:26:02,757 INFO MainThread:4075930 [wandb_init.py:init():781] updated telemetry
19
+ 2025-10-22 00:26:02,803 INFO MainThread:4075930 [wandb_init.py:init():814] communicating run to backend with 90.0 second timeout
20
+ 2025-10-22 00:26:03,532 INFO MainThread:4075930 [wandb_init.py:init():867] starting run threads in backend
21
+ 2025-10-22 00:26:03,630 INFO MainThread:4075930 [wandb_run.py:_console_start():2451] atexit reg
22
+ 2025-10-22 00:26:03,630 INFO MainThread:4075930 [wandb_run.py:_redirect():2299] redirect: wrap_raw
23
+ 2025-10-22 00:26:03,631 INFO MainThread:4075930 [wandb_run.py:_redirect():2364] Wrapping output streams.
24
+ 2025-10-22 00:26:03,631 INFO MainThread:4075930 [wandb_run.py:_redirect():2389] Redirects installed.
25
+ 2025-10-22 00:26:03,632 INFO MainThread:4075930 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2025-10-22 00:26:03,633 INFO MainThread:4075930 [wandb_run.py:_config_callback():1389] config_cb None None {'output_dir': '/home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2'}
27
+ 2025-10-22 23:32:00,707 WARNING MsgRouterThr:4075930 [router.py:message_loop():75] message_loop has been closed
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/files/config.yaml ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _target_:
2
+ value: llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace
3
+ _wandb:
4
+ value:
5
+ cli_version: 0.18.6
6
+ m: []
7
+ python_version: 3.9.20
8
+ t:
9
+ "1":
10
+ - 1
11
+ - 2
12
+ - 3
13
+ - 5
14
+ - 11
15
+ - 12
16
+ - 41
17
+ - 49
18
+ - 50
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 83
24
+ - 98
25
+ "2":
26
+ - 1
27
+ - 2
28
+ - 3
29
+ - 5
30
+ - 11
31
+ - 12
32
+ - 41
33
+ - 49
34
+ - 50
35
+ - 51
36
+ - 53
37
+ - 55
38
+ - 71
39
+ - 83
40
+ - 98
41
+ "3":
42
+ - 13
43
+ - 15
44
+ - 16
45
+ - 23
46
+ - 55
47
+ - 61
48
+ "4": 3.9.20
49
+ "5": 0.18.6
50
+ "6": 4.47.1
51
+ "8":
52
+ - 5
53
+ "12": 0.18.6
54
+ "13": linux-x86_64
55
+ action_dim:
56
+ value: 4
57
+ checkpoint:
58
+ value:
59
+ save_last_ckpt: true
60
+ save_last_snapshot: false
61
+ topk:
62
+ format_str: epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt
63
+ k: 5
64
+ mode: min
65
+ monitor_key: val_loss
66
+ dataloader:
67
+ value:
68
+ batch_size: 128
69
+ num_workers: 0
70
+ persistent_workers: false
71
+ pin_memory: false
72
+ shuffle: true
73
+ exp_name:
74
+ value: default
75
+ horizon:
76
+ value: 1
77
+ llm:
78
+ value:
79
+ causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
80
+ checkpoint: ""
81
+ config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
82
+ finetune_mode: orig
83
+ hydra:
84
+ job:
85
+ override_dirname: HuggingFaceTB/SmolLM2-135M-Instruct
86
+ run:
87
+ dir: data/outputs/2025.10.22/00.25.59_HuggingFaceTB/SmolLM2-135M-Instruct
88
+ llm_mode: pretrained
89
+ lora_config:
90
+ bias: none
91
+ lora_alpha: 64
92
+ lora_dropout: 0.05
93
+ r: 32
94
+ task_type: CAUSAL_LM
95
+ max_length: 100
96
+ model_name: SmolLM2-135M-Instruct
97
+ name: HuggingFaceTB/SmolLM2-135M-Instruct
98
+ prompter:
99
+ _target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
100
+ use_joint_mlp_projector: true
101
+ use_joint_mlp_projector: true
102
+ use_quantization: false
103
+ llm_translator:
104
+ value:
105
+ _target_: llmbc.translator.llm_translator.LLMTranslator
106
+ action_dim: 4
107
+ cfg:
108
+ causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
109
+ checkpoint: ""
110
+ config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
111
+ finetune_mode: orig
112
+ hydra:
113
+ job:
114
+ override_dirname: HuggingFaceTB/SmolLM2-135M-Instruct
115
+ run:
116
+ dir: data/outputs/2025.10.22/00.25.59_HuggingFaceTB/SmolLM2-135M-Instruct
117
+ llm_mode: pretrained
118
+ lora_config:
119
+ bias: none
120
+ lora_alpha: 64
121
+ lora_dropout: 0.05
122
+ r: 32
123
+ task_type: CAUSAL_LM
124
+ max_length: 100
125
+ model_name: SmolLM2-135M-Instruct
126
+ name: HuggingFaceTB/SmolLM2-135M-Instruct
127
+ prompter:
128
+ _target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
129
+ use_joint_mlp_projector: true
130
+ use_joint_mlp_projector: true
131
+ use_quantization: false
132
+ horizon: 1
133
+ n_action_steps: 1
134
+ n_obs_steps: 1
135
+ obs_dim: 9
136
+ logging:
137
+ value:
138
+ group: null
139
+ id: null
140
+ mode: online
141
+ name: 2025.10.22-00.26.01_train_mlp_projector_box-close-v2
142
+ project: llm_module_training
143
+ resume: true
144
+ tags:
145
+ - train_mlp_projector
146
+ - box-close-v2
147
+ - default
148
+ model_name:
149
+ value: HuggingFaceTB/SmolLM2-135M-Instruct
150
+ multi_run:
151
+ value:
152
+ run_dir: data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2
153
+ wandb_name_base: 2025.10.22-00.26.01_train_mlp_projector_box-close-v2
154
+ n_action_steps:
155
+ value: 1
156
+ n_latency_steps:
157
+ value: 0
158
+ n_obs_steps:
159
+ value: 1
160
+ name:
161
+ value: train_mlp_projector
162
+ obs_dim:
163
+ value: 9
164
+ optimizer:
165
+ value:
166
+ _target_: torch.optim.Adam
167
+ betas:
168
+ - 0.95
169
+ - 0.999
170
+ eps: 1e-08
171
+ lr: 0.0001
172
+ weight_decay: 1e-06
173
+ output_dir:
174
+ value: /home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2
175
+ past_action_visible:
176
+ value: false
177
+ task:
178
+ value:
179
+ action_dim: 4
180
+ dataset:
181
+ _target_: llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset
182
+ data_path: datasets/box-close-v2-general.pt
183
+ data_path2: datasets/box-close-v2.pt
184
+ dummy_normalizer: false
185
+ horizon: 1
186
+ obs_eef_target: true
187
+ pad_after: 0
188
+ pad_before: 0
189
+ use_manual_normalizer: false
190
+ val_ratio: 0.05
191
+ env_runner:
192
+ _target_: llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner
193
+ discount: 0.9
194
+ env_name: llf-metaworld-box-close-v2
195
+ feedback_type:
196
+ - hp
197
+ - hn
198
+ - fp
199
+ instruction_type: b
200
+ max_steps: 30
201
+ n_action_steps: 1
202
+ n_envs: 10
203
+ n_obs_steps: 1
204
+ n_test: 50
205
+ n_train: 10
206
+ visual: false
207
+ instructor:
208
+ _target_: llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor
209
+ name: box-close-v2
210
+ obs_dim: 9
211
+ task_name:
212
+ value: box-close-v2
213
+ training:
214
+ value:
215
+ checkpoint_every: 1
216
+ debug: false
217
+ device: cuda
218
+ enable_normalizer: true
219
+ grad_norm_clip: 1
220
+ gradient_accumulate_every: 2
221
+ lr_scheduler: cosine
222
+ lr_warmup_steps: 10
223
+ max_train_steps: null
224
+ max_val_steps: null
225
+ num_epochs: 100
226
+ resume: true
227
+ sample_every: 1
228
+ sample_max_batch: 128
229
+ seed: 42
230
+ tqdm_interval_sec: 1
231
+ val_every: 1
232
+ val_dataloader:
233
+ value:
234
+ batch_size: 128
235
+ num_workers: 0
236
+ persistent_workers: false
237
+ pin_memory: false
238
+ shuffle: true
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/files/output.log ADDED
@@ -0,0 +1 @@
 
 
1
+
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/files/wandb-metadata.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-6.8.0-79-generic-x86_64-with-glibc2.35",
3
+ "python": "3.9.20",
4
+ "startedAt": "2025-10-21T16:26:02.753656Z",
5
+ "program": "/home/chyang/workspace/LLM-BC/./llmbc/workspace/train_mlp_projector_workspace.py",
6
+ "codePath": "llmbc/workspace/train_mlp_projector_workspace.py",
7
+ "git": {
8
+ "remote": "https://github.com/CHYang25/LLM-BC.git",
9
+ "commit": "82d8f9783f620ddedb277dc15adc1220b7a0761c"
10
+ },
11
+ "email": "chris920325@gmail.com",
12
+ "root": "/home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2",
13
+ "host": "A6000-2",
14
+ "username": "chyang",
15
+ "executable": "/home/chyang/miniconda3/envs/llm-bc/bin/python3",
16
+ "codePathLocal": "llmbc/workspace/train_mlp_projector_workspace.py",
17
+ "cpu_count": 12,
18
+ "cpu_count_logical": 24,
19
+ "gpu": "NVIDIA RTX A6000",
20
+ "gpu_count": 2,
21
+ "disk": {
22
+ "/": {
23
+ "total": "1967317549056",
24
+ "used": "1651976560640"
25
+ }
26
+ },
27
+ "memory": {
28
+ "total": "134538616832"
29
+ },
30
+ "cpu": {
31
+ "count": 12,
32
+ "countLogical": 24
33
+ },
34
+ "gpu_nvidia": [
35
+ {
36
+ "name": "NVIDIA RTX A6000",
37
+ "memoryTotal": "51527024640",
38
+ "cudaCores": 10752,
39
+ "architecture": "Ampere"
40
+ },
41
+ {
42
+ "name": "NVIDIA RTX A6000",
43
+ "memoryTotal": "51527024640",
44
+ "cudaCores": 10752,
45
+ "architecture": "Ampere"
46
+ }
47
+ ],
48
+ "cudaVersion": "12.6"
49
+ }
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"global_step":56499,"_step":56499,"_wandb":{"runtime":83157},"_runtime":83157.953493991,"lr":0,"epoch":99,"train_loss":1.3826065704885837,"grad_norm":34.68653106689453,"val_loss":1.3635742664337158,"_timestamp":1.7611471206686912e+09}
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug-core.log ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2025-10-22T00:26:02.11917178+08:00","level":"INFO","msg":"started logging, with flags","port-filename":"/tmp/tmp73fuvlbg/port-4075930.txt","pid":4075930,"debug":false,"disable-analytics":false}
2
+ {"time":"2025-10-22T00:26:02.11919596+08:00","level":"INFO","msg":"FeatureState","shutdownOnParentExitEnabled":false}
3
+ {"time":"2025-10-22T00:26:02.120158635+08:00","level":"INFO","msg":"server is running","addr":{"IP":"127.0.0.1","Port":35269,"Zone":""}}
4
+ {"time":"2025-10-22T00:26:02.120252111+08:00","level":"INFO","msg":"Will exit if parent process dies.","ppid":4075930}
5
+ {"time":"2025-10-22T00:26:02.31670632+08:00","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"127.0.0.1:35546"}
6
+ {"time":"2025-10-22T00:26:02.756485777+08:00","level":"INFO","msg":"handleInformInit: received","streamId":"rvk7mjhx","id":"127.0.0.1:35546"}
7
+ {"time":"2025-10-22T00:26:02.861107293+08:00","level":"INFO","msg":"handleInformInit: stream started","streamId":"rvk7mjhx","id":"127.0.0.1:35546"}
8
+ {"time":"2025-10-22T23:32:00.70707028+08:00","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"127.0.0.1:35546"}
9
+ {"time":"2025-10-22T23:32:00.707178291+08:00","level":"INFO","msg":"server is shutting down"}
10
+ {"time":"2025-10-22T23:32:00.707210487+08:00","level":"INFO","msg":"connection: Close: initiating connection closure","id":"127.0.0.1:35546"}
11
+ {"time":"2025-10-22T23:32:00.707271333+08:00","level":"INFO","msg":"connection: Close: connection successfully closed","id":"127.0.0.1:35546"}
12
+ {"time":"2025-10-22T23:32:02.043653217+08:00","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"127.0.0.1:35546"}
13
+ {"time":"2025-10-22T23:32:02.043673016+08:00","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"127.0.0.1:35546"}
14
+ {"time":"2025-10-22T23:32:02.043684701+08:00","level":"INFO","msg":"server is closed"}
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug-internal.log ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"time":"2025-10-22T00:26:02.756579779+08:00","level":"INFO","msg":"using version","core version":"0.18.6"}
2
+ {"time":"2025-10-22T00:26:02.75658787+08:00","level":"INFO","msg":"created symlink","path":"/home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug-core.log"}
3
+ {"time":"2025-10-22T00:26:02.861085521+08:00","level":"INFO","msg":"created new stream","id":"rvk7mjhx"}
4
+ {"time":"2025-10-22T00:26:02.861104058+08:00","level":"INFO","msg":"stream: started","id":"rvk7mjhx"}
5
+ {"time":"2025-10-22T00:26:02.86116531+08:00","level":"INFO","msg":"handler: started","stream_id":{"value":"rvk7mjhx"}}
6
+ {"time":"2025-10-22T00:26:02.86115692+08:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"rvk7mjhx"}}
7
+ {"time":"2025-10-22T00:26:02.861348083+08:00","level":"INFO","msg":"sender: started","stream_id":"rvk7mjhx"}
8
+ {"time":"2025-10-22T00:26:03.535795016+08:00","level":"INFO","msg":"Starting system monitor"}
9
+ {"time":"2025-10-22T05:48:34.287173856+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/rvk7mjhx/file_stream"}
10
+ {"time":"2025-10-22T06:14:37.009839286+08:00","level":"INFO","msg":"api: retrying HTTP error","status":502,"url":"https://api.wandb.ai/files/chyang25-national-taiwan-university/llm_module_training/rvk7mjhx/file_stream"}
11
+ {"time":"2025-10-22T23:32:00.707133497+08:00","level":"INFO","msg":"stream: closing","id":"rvk7mjhx"}
12
+ {"time":"2025-10-22T23:32:00.707161267+08:00","level":"INFO","msg":"Stopping system monitor"}
13
+ {"time":"2025-10-22T23:32:00.707804728+08:00","level":"INFO","msg":"Stopped system monitor"}
14
+ {"time":"2025-10-22T23:32:01.791665035+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
15
+ {"time":"2025-10-22T23:32:02.043511307+08:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"rvk7mjhx"}}
16
+ {"time":"2025-10-22T23:32:02.04355714+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"rvk7mjhx"}}
17
+ {"time":"2025-10-22T23:32:02.043570721+08:00","level":"INFO","msg":"sender: closed","stream_id":"rvk7mjhx"}
18
+ {"time":"2025-10-22T23:32:02.043608004+08:00","level":"INFO","msg":"stream: closed","id":"rvk7mjhx"}
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug.log ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Current SDK version is 0.18.6
2
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Configure stats pid to 4075930
3
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/.config/wandb/settings
4
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/workspace/LLM-BC/wandb/settings
5
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': 'online', '_disable_service': None}
7
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'llmbc/workspace/train_mlp_projector_workspace.py', 'program_abspath': '/home/chyang/workspace/LLM-BC/llmbc/workspace/train_mlp_projector_workspace.py', 'program': '/home/chyang/workspace/LLM-BC/./llmbc/workspace/train_mlp_projector_workspace.py'}
8
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_setup.py:_flush():79] Applying login settings: {}
9
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:_log_setup():533] Logging user logs to /home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug.log
10
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:_log_setup():534] Logging internal logs to /home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/logs/debug-internal.log
11
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:init():619] calling init triggers
12
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:init():626] wandb.init called with sweep_config: {}
13
+ config: {'name': 'train_mlp_projector', '_target_': 'llmbc.workspace.train_mlp_projector_workspace.TrainMlpProjectorWorkspace', 'obs_dim': 9, 'action_dim': 4, 'task_name': 'box-close-v2', 'exp_name': 'default', 'model_name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1, 'n_latency_steps': 0, 'past_action_visible': False, 'llm_translator': {'_target_': 'llmbc.translator.llm_translator.LLMTranslator', 'cfg': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'use_quantization': False, 'use_joint_mlp_projector': True, 'llm_mode': 'pretrained', 'finetune_mode': 'orig', 'checkpoint': '', 'max_length': 100, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2025.10.22/00.25.59_HuggingFaceTB/SmolLM2-135M-Instruct'}}}, 'obs_dim': 9, 'action_dim': 4, 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1}, 'dataloader': {'batch_size': 128, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'val_dataloader': {'batch_size': 128, 'num_workers': 0, 'shuffle': True, 'pin_memory': False, 'persistent_workers': False}, 'optimizer': {'_target_': 'torch.optim.Adam', 'lr': 0.0001, 'betas': [0.95, 0.999], 'eps': 1e-08, 'weight_decay': 1e-06}, 'training': {'device': 'cuda', 'seed': 42, 'debug': False, 'resume': True, 'lr_scheduler': 'cosine', 'lr_warmup_steps': 10, 'num_epochs': 100, 'gradient_accumulate_every': 2, 'grad_norm_clip': 1, 'enable_normalizer': True, 'checkpoint_every': 1, 'val_every': 1, 'sample_every': 1, 'sample_max_batch': 128, 'max_train_steps': None, 'max_val_steps': None, 'tqdm_interval_sec': 1.0}, 'logging': {'project': 'llm_module_training', 'resume': True, 'mode': 'online', 'name': '2025.10.22-00.26.01_train_mlp_projector_box-close-v2', 'tags': ['train_mlp_projector', 'box-close-v2', 'default'], 'id': None, 'group': None}, 'checkpoint': {'topk': {'monitor_key': 'val_loss', 'mode': 'min', 'k': 5, 'format_str': 'epoch={epoch:04d}-val_loss={val_loss:.3f}.ckpt'}, 'save_last_ckpt': True, 'save_last_snapshot': False}, 'multi_run': {'run_dir': 'data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2', 'wandb_name_base': '2025.10.22-00.26.01_train_mlp_projector_box-close-v2'}, 'task': {'name': 'box-close-v2', 'obs_dim': 9, 'action_dim': 4, 'env_runner': {'_target_': 'llmbc.env_runner.metaworld_lowdim_runner.MetaworldLowdimRunner', 'env_name': 'llf-metaworld-box-close-v2', 'n_train': 10, 'n_test': 50, 'n_envs': 10, 'max_steps': 30, 'n_obs_steps': 1, 'n_action_steps': 1, 'instruction_type': 'b', 'feedback_type': ['hp', 'hn', 'fp'], 'visual': False, 'discount': 0.9}, 'dataset': {'_target_': 'llmbc.dataset.metaworld_lowdim_dataset.MetaworldLowdimDataset', 'data_path': 'datasets/box-close-v2-general.pt', 'data_path2': 'datasets/box-close-v2.pt', 'horizon': 1, 'pad_before': 0, 'pad_after': 0, 'obs_eef_target': True, 'use_manual_normalizer': False, 'val_ratio': 0.05, 'dummy_normalizer': False}, 'instructor': {'_target_': 'llmbc.translator.instructor.metaworld_instructor.box_close_v2_instructor.BoxCloseV2Instructor'}}, 'llm': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'use_quantization': False, 'use_joint_mlp_projector': True, 'llm_mode': 'pretrained', 'finetune_mode': 'orig', 'checkpoint': '', 'max_length': 100, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2025.10.22/00.25.59_HuggingFaceTB/SmolLM2-135M-Instruct'}}}}
14
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:init():669] starting backend
15
+ 2025-10-22 00:26:02,752 INFO MainThread:4075930 [wandb_init.py:init():673] sending inform_init request
16
+ 2025-10-22 00:26:02,753 INFO MainThread:4075930 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
+ 2025-10-22 00:26:02,753 INFO MainThread:4075930 [wandb_init.py:init():686] backend started and connected
18
+ 2025-10-22 00:26:02,757 INFO MainThread:4075930 [wandb_init.py:init():781] updated telemetry
19
+ 2025-10-22 00:26:02,803 INFO MainThread:4075930 [wandb_init.py:init():814] communicating run to backend with 90.0 second timeout
20
+ 2025-10-22 00:26:03,532 INFO MainThread:4075930 [wandb_init.py:init():867] starting run threads in backend
21
+ 2025-10-22 00:26:03,630 INFO MainThread:4075930 [wandb_run.py:_console_start():2451] atexit reg
22
+ 2025-10-22 00:26:03,630 INFO MainThread:4075930 [wandb_run.py:_redirect():2299] redirect: wrap_raw
23
+ 2025-10-22 00:26:03,631 INFO MainThread:4075930 [wandb_run.py:_redirect():2364] Wrapping output streams.
24
+ 2025-10-22 00:26:03,631 INFO MainThread:4075930 [wandb_run.py:_redirect():2389] Redirects installed.
25
+ 2025-10-22 00:26:03,632 INFO MainThread:4075930 [wandb_init.py:init():911] run started, returning control to user process
26
+ 2025-10-22 00:26:03,633 INFO MainThread:4075930 [wandb_run.py:_config_callback():1389] config_cb None None {'output_dir': '/home/chyang/workspace/LLM-BC/data/outputs/2025.10.22/00.25.59_train_mlp_projector_box-close-v2'}
27
+ 2025-10-22 23:32:00,707 WARNING MsgRouterThr:4075930 [router.py:message_loop():75] message_loop has been closed
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/run-20251022_002602-rvk7mjhx/run-rvk7mjhx.wandb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f644f8feb3ddcb5f8708bf78a4d9bb3c75cf97ee1d37ba30af2f53b01d8cb825
3
+ size 66465965
2025.10.22/00.25.59_train_mlp_projector_box-close-v2/wandb/wandb-resume.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"run_id": "rvk7mjhx"}