CHYang25 commited on
Commit
e15961f
·
verified ·
1 Parent(s): ab04142

Delete 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1

Browse files
Files changed (32) hide show
  1. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/.hydra/config.yaml +0 -115
  2. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/.hydra/hydra.yaml +0 -154
  3. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/.hydra/overrides.yaml +0 -1
  4. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/config.json +0 -42
  5. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/generation_config.json +0 -7
  6. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/merges.txt +0 -0
  7. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/mlp_projector.bin +0 -3
  8. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/model.safetensors +0 -3
  9. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/optimizer.pt +0 -3
  10. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/rng_state.pth +0 -3
  11. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/scheduler.pt +0 -3
  12. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/special_tokens_map.json +0 -34
  13. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/tokenizer.json +0 -0
  14. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/tokenizer_config.json +0 -155
  15. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/trainer_state.json +0 -2841
  16. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/training_args.bin +0 -3
  17. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/vocab.json +0 -0
  18. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/config.json +0 -42
  19. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/generation_config.json +0 -7
  20. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/model.safetensors +0 -3
  21. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/normalizer.pt +0 -3
  22. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/train.log +0 -11
  23. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/debug-internal.log +0 -17
  24. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/debug.log +0 -35
  25. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/files/config.yaml +0 -711
  26. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/files/output.log +0 -509
  27. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/files/wandb-metadata.json +0 -55
  28. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/files/wandb-summary.json +0 -1
  29. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug-core.log +0 -16
  30. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug-internal.log +0 -17
  31. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug.log +0 -35
  32. 2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/run-nhmfpc2t.wandb +0 -3
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/.hydra/config.yaml DELETED
@@ -1,115 +0,0 @@
1
- name: train_llm_lowdim
2
- _target_: llmbc.workspace.train_llm_workspace.TrainLLMWorkspace
3
- obs_dim: ${task.obs_dim}
4
- action_dim: ${task.action_dim}
5
- horizon: 1
6
- n_obs_steps: 1
7
- n_action_steps: 1
8
- task_name: ${task.name}
9
- exp_name: train llm
10
- model_name: ${llm.name}
11
- use_quantization: ${llm.use_quantization}
12
- lora_config: ${llm.lora_config}
13
- dataset:
14
- test_data_ratio: 0.01
15
- debug: false
16
- training:
17
- seed: 42
18
- per_device_train_batch_size: 128
19
- per_device_eval_batch_size: 128
20
- gradient_accumulation_steps: 1
21
- optim: paged_adamw_32bit
22
- num_train_epochs: 10
23
- eval_strategy: steps
24
- logging_steps: 1
25
- warmup_steps: 10
26
- logging_strategy: steps
27
- learning_rate: 0.0001
28
- fp16: false
29
- bf16: true
30
- tf32: true
31
- group_by_length: true
32
- report_to: wandb
33
- save_steps: 5000
34
- eval_steps: 10
35
- use_joint_mlp_projector: ${llm.use_joint_mlp_projector}
36
- joint_obs_action_mlp_lr: 5.0e-05
37
- trainer:
38
- obs_dim: ${obs_dim}
39
- action_dim: ${action_dim}
40
- use_joint_mlp_projector: ${llm.use_joint_mlp_projector}
41
- max_seq_length: ${llm.max_length}
42
- dataset_text_field: text
43
- packing: false
44
- logging:
45
- project: llm_module_finetuning
46
- resume: true
47
- mode: online
48
- name: ${now:%Y.%m.%d-%H.%M.%S}_${name}_${task_name}
49
- tags:
50
- - ${name}
51
- - ${task_name}
52
- - ${exp_name}
53
- id: null
54
- group: null
55
- multi_run:
56
- run_dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
57
- wandb_name_base: ${now:%Y.%m.%d-%H.%M.%S}_${name}_${task_name}
58
- task:
59
- name: adroit-hand-hammer-v1
60
- obs_dim: 46
61
- action_dim: 26
62
- env_runner:
63
- _target_: llmbc.env_runner.adroit_lowdim_runner.AdroitHandLowdimRunner
64
- env_name: llf-adroit-adroit-hand-hammer-v1
65
- n_train: 10
66
- n_test: 50
67
- n_envs: 10
68
- max_steps: 150
69
- n_obs_steps: ${n_obs_steps}
70
- n_action_steps: ${n_action_steps}
71
- instruction_type: b
72
- feedback_type:
73
- - hp
74
- - hn
75
- - fp
76
- visual: false
77
- discount: 0.99
78
- dataset:
79
- _target_: llmbc.dataset.adroit_lowdim_dataset.AdroitHandLowdimDataset
80
- data_path: datasets/adroit-hand-hammer-v1-general.pt
81
- data_path2: datasets/adroit-hand-hammer-v1.pt
82
- horizon: ${horizon}
83
- pad_before: ${eval:'${n_obs_steps}-1'}
84
- pad_after: ${eval:'${n_action_steps}-1'}
85
- obs_eef_target: true
86
- use_manual_normalizer: false
87
- val_ratio: 0.05
88
- dummy_normalizer: false
89
- instructor:
90
- _target_: llmbc.translator.instructor.adroit_instructor.adroit_hand_hammer_v1_instructor.AdroitHandHammerV1Instructor
91
- llm:
92
- name: HuggingFaceTB/SmolLM2-135M-Instruct
93
- model_name: SmolLM2-135M-Instruct
94
- config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
95
- causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
96
- use_quantization: false
97
- use_joint_mlp_projector: true
98
- llm_mode: mlp-finetuned
99
- finetune_mode: orig
100
- checkpoint: data/outputs/2026.03.27/14.38.20_train_mlp_projector_adroit-hand-hammer-v1/checkpoints/latest.ckpt
101
- max_length: 100
102
- lora_config:
103
- r: 32
104
- lora_alpha: 64
105
- lora_dropout: 0.05
106
- bias: none
107
- task_type: CAUSAL_LM
108
- prompter:
109
- _target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
110
- use_joint_mlp_projector: true
111
- hydra:
112
- job:
113
- override_dirname: ${model_name}
114
- run:
115
- dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${model_name}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/.hydra/hydra.yaml DELETED
@@ -1,154 +0,0 @@
1
- hydra:
2
- run:
3
- dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
4
- sweep:
5
- dir: data/outputs/${now:%Y.%m.%d}/${now:%H.%M.%S}_${name}_${task_name}
6
- subdir: ${hydra.job.num}
7
- launcher:
8
- _target_: hydra._internal.core_plugins.basic_launcher.BasicLauncher
9
- sweeper:
10
- _target_: hydra._internal.core_plugins.basic_sweeper.BasicSweeper
11
- max_batch_size: null
12
- params: null
13
- help:
14
- app_name: ${hydra.job.name}
15
- header: '${hydra.help.app_name} is powered by Hydra.
16
-
17
- '
18
- footer: 'Powered by Hydra (https://hydra.cc)
19
-
20
- Use --hydra-help to view Hydra specific help
21
-
22
- '
23
- template: '${hydra.help.header}
24
-
25
- == Configuration groups ==
26
-
27
- Compose your configuration from those groups (group=option)
28
-
29
-
30
- $APP_CONFIG_GROUPS
31
-
32
-
33
- == Config ==
34
-
35
- Override anything in the config (foo.bar=value)
36
-
37
-
38
- $CONFIG
39
-
40
-
41
- ${hydra.help.footer}
42
-
43
- '
44
- hydra_help:
45
- template: 'Hydra (${hydra.runtime.version})
46
-
47
- See https://hydra.cc for more info.
48
-
49
-
50
- == Flags ==
51
-
52
- $FLAGS_HELP
53
-
54
-
55
- == Configuration groups ==
56
-
57
- Compose your configuration from those groups (For example, append hydra/job_logging=disabled
58
- to command line)
59
-
60
-
61
- $HYDRA_CONFIG_GROUPS
62
-
63
-
64
- Use ''--cfg hydra'' to Show the Hydra config.
65
-
66
- '
67
- hydra_help: ???
68
- hydra_logging:
69
- version: 1
70
- formatters:
71
- simple:
72
- format: '[%(asctime)s][HYDRA] %(message)s'
73
- handlers:
74
- console:
75
- class: logging.StreamHandler
76
- formatter: simple
77
- stream: ext://sys.stdout
78
- root:
79
- level: INFO
80
- handlers:
81
- - console
82
- loggers:
83
- logging_example:
84
- level: DEBUG
85
- disable_existing_loggers: false
86
- job_logging:
87
- version: 1
88
- formatters:
89
- simple:
90
- format: '[%(asctime)s][%(name)s][%(levelname)s] - %(message)s'
91
- handlers:
92
- console:
93
- class: logging.StreamHandler
94
- formatter: simple
95
- stream: ext://sys.stdout
96
- file:
97
- class: logging.FileHandler
98
- formatter: simple
99
- filename: ${hydra.runtime.output_dir}/${hydra.job.name}.log
100
- root:
101
- level: INFO
102
- handlers:
103
- - console
104
- - file
105
- disable_existing_loggers: false
106
- env: {}
107
- mode: RUN
108
- searchpath: []
109
- callbacks: {}
110
- output_subdir: .hydra
111
- overrides:
112
- hydra:
113
- - hydra.mode=RUN
114
- task: []
115
- job:
116
- name: train
117
- chdir: null
118
- override_dirname: ''
119
- id: ???
120
- num: ???
121
- config_name: llmdp_llm_adroit-hand-hammer-v1.yaml
122
- env_set: {}
123
- env_copy: []
124
- config:
125
- override_dirname:
126
- kv_sep: '='
127
- item_sep: ','
128
- exclude_keys: []
129
- runtime:
130
- version: 1.2.0
131
- version_base: '1.2'
132
- cwd: /tmp2/chyang/workspace/LLM-BC
133
- config_sources:
134
- - path: hydra.conf
135
- schema: pkg
136
- provider: hydra
137
- - path: /tmp2/chyang/workspace/LLM-BC/config/main_table
138
- schema: file
139
- provider: main
140
- - path: ''
141
- schema: structured
142
- provider: schema
143
- output_dir: /tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1
144
- choices:
145
- hydra/env: default
146
- hydra/callbacks: null
147
- hydra/job_logging: default
148
- hydra/hydra_logging: default
149
- hydra/hydra_help: default
150
- hydra/help: default
151
- hydra/sweeper: basic
152
- hydra/launcher: basic
153
- hydra/output: default
154
- verbose: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/.hydra/overrides.yaml DELETED
@@ -1 +0,0 @@
1
- []
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/config.json DELETED
@@ -1,42 +0,0 @@
1
- {
2
- "_name_or_path": "HuggingFaceTB/SmolLM2-135M-Instruct",
3
- "action_dim": 26,
4
- "architectures": [
5
- "LowdimLlamaForCausalLM"
6
- ],
7
- "attention_bias": false,
8
- "attention_dropout": 0.0,
9
- "bos_token_id": 1,
10
- "eos_token_id": 2,
11
- "head_dim": 64,
12
- "hidden_act": "silu",
13
- "hidden_size": 576,
14
- "initializer_range": 0.041666666666666664,
15
- "intermediate_size": 1536,
16
- "is_llama_config": true,
17
- "max_position_embeddings": 8192,
18
- "mlp_bias": false,
19
- "model_type": "llama_lowdim",
20
- "num_attention_heads": 9,
21
- "num_hidden_layers": 30,
22
- "num_key_value_heads": 3,
23
- "obs_dim": 46,
24
- "pad_token_id": 2,
25
- "pretraining_tp": 1,
26
- "rms_norm_eps": 1e-05,
27
- "rope_interleaved": false,
28
- "rope_scaling": null,
29
- "rope_theta": 100000,
30
- "tie_word_embeddings": true,
31
- "torch_dtype": "float32",
32
- "transformers.js_config": {
33
- "kv_cache_dtype": {
34
- "fp16": "float16",
35
- "q4f16": "float16"
36
- }
37
- },
38
- "transformers_version": "4.47.1",
39
- "use_cache": false,
40
- "use_joint_mlp_projector": true,
41
- "vocab_size": 49152
42
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/generation_config.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "_from_model_config": true,
3
- "bos_token_id": 1,
4
- "eos_token_id": 2,
5
- "pad_token_id": 2,
6
- "transformers_version": "4.47.1"
7
- }
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/merges.txt DELETED
The diff for this file is too large to render. See raw diff
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/mlp_projector.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c4338608ed11f67ca4d076f1596453801d07110a1ba10d38b55690336de76e74
3
- size 1499776
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:532f9325616dbe87be8846d0b03f80c41a2de5d5a05c7397eb15b3484410ede9
3
- size 539588496
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/optimizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:7bc7e722db0999c1636e94a830207f155cf22d2c9d28f129bc987afa57af7a45
3
- size 1079284794
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/rng_state.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:a8c41ff260c47496f4f1ca68f9e267daacc3461dc3a79663c2d83c1ae8fbf495
3
- size 14244
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/scheduler.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:36cd9db9ff66ee06ad412fe768ecde23049517e84a863a3dfc041789e2c58298
3
- size 1064
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/special_tokens_map.json DELETED
@@ -1,34 +0,0 @@
1
- {
2
- "additional_special_tokens": [
3
- "<|im_start|>",
4
- "<|im_end|>"
5
- ],
6
- "bos_token": {
7
- "content": "<|im_start|>",
8
- "lstrip": false,
9
- "normalized": false,
10
- "rstrip": false,
11
- "single_word": false
12
- },
13
- "eos_token": {
14
- "content": "<|im_end|>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false
19
- },
20
- "pad_token": {
21
- "content": "<|im_end|>",
22
- "lstrip": false,
23
- "normalized": false,
24
- "rstrip": false,
25
- "single_word": false
26
- },
27
- "unk_token": {
28
- "content": "<|endoftext|>",
29
- "lstrip": false,
30
- "normalized": false,
31
- "rstrip": false,
32
- "single_word": false
33
- }
34
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/tokenizer.json DELETED
The diff for this file is too large to render. See raw diff
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/tokenizer_config.json DELETED
@@ -1,155 +0,0 @@
1
- {
2
- "add_prefix_space": false,
3
- "added_tokens_decoder": {
4
- "0": {
5
- "content": "<|endoftext|>",
6
- "lstrip": false,
7
- "normalized": false,
8
- "rstrip": false,
9
- "single_word": false,
10
- "special": true
11
- },
12
- "1": {
13
- "content": "<|im_start|>",
14
- "lstrip": false,
15
- "normalized": false,
16
- "rstrip": false,
17
- "single_word": false,
18
- "special": true
19
- },
20
- "2": {
21
- "content": "<|im_end|>",
22
- "lstrip": false,
23
- "normalized": false,
24
- "rstrip": false,
25
- "single_word": false,
26
- "special": true
27
- },
28
- "3": {
29
- "content": "<repo_name>",
30
- "lstrip": false,
31
- "normalized": false,
32
- "rstrip": false,
33
- "single_word": false,
34
- "special": true
35
- },
36
- "4": {
37
- "content": "<reponame>",
38
- "lstrip": false,
39
- "normalized": false,
40
- "rstrip": false,
41
- "single_word": false,
42
- "special": true
43
- },
44
- "5": {
45
- "content": "<file_sep>",
46
- "lstrip": false,
47
- "normalized": false,
48
- "rstrip": false,
49
- "single_word": false,
50
- "special": true
51
- },
52
- "6": {
53
- "content": "<filename>",
54
- "lstrip": false,
55
- "normalized": false,
56
- "rstrip": false,
57
- "single_word": false,
58
- "special": true
59
- },
60
- "7": {
61
- "content": "<gh_stars>",
62
- "lstrip": false,
63
- "normalized": false,
64
- "rstrip": false,
65
- "single_word": false,
66
- "special": true
67
- },
68
- "8": {
69
- "content": "<issue_start>",
70
- "lstrip": false,
71
- "normalized": false,
72
- "rstrip": false,
73
- "single_word": false,
74
- "special": true
75
- },
76
- "9": {
77
- "content": "<issue_comment>",
78
- "lstrip": false,
79
- "normalized": false,
80
- "rstrip": false,
81
- "single_word": false,
82
- "special": true
83
- },
84
- "10": {
85
- "content": "<issue_closed>",
86
- "lstrip": false,
87
- "normalized": false,
88
- "rstrip": false,
89
- "single_word": false,
90
- "special": true
91
- },
92
- "11": {
93
- "content": "<jupyter_start>",
94
- "lstrip": false,
95
- "normalized": false,
96
- "rstrip": false,
97
- "single_word": false,
98
- "special": true
99
- },
100
- "12": {
101
- "content": "<jupyter_text>",
102
- "lstrip": false,
103
- "normalized": false,
104
- "rstrip": false,
105
- "single_word": false,
106
- "special": true
107
- },
108
- "13": {
109
- "content": "<jupyter_code>",
110
- "lstrip": false,
111
- "normalized": false,
112
- "rstrip": false,
113
- "single_word": false,
114
- "special": true
115
- },
116
- "14": {
117
- "content": "<jupyter_output>",
118
- "lstrip": false,
119
- "normalized": false,
120
- "rstrip": false,
121
- "single_word": false,
122
- "special": true
123
- },
124
- "15": {
125
- "content": "<jupyter_script>",
126
- "lstrip": false,
127
- "normalized": false,
128
- "rstrip": false,
129
- "single_word": false,
130
- "special": true
131
- },
132
- "16": {
133
- "content": "<empty_output>",
134
- "lstrip": false,
135
- "normalized": false,
136
- "rstrip": false,
137
- "single_word": false,
138
- "special": true
139
- }
140
- },
141
- "additional_special_tokens": [
142
- "<|im_start|>",
143
- "<|im_end|>"
144
- ],
145
- "bos_token": "<|im_start|>",
146
- "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful AI assistant named SmolLM, trained by Hugging Face<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
147
- "clean_up_tokenization_spaces": false,
148
- "eos_token": "<|im_end|>",
149
- "extra_special_tokens": {},
150
- "model_max_length": 8192,
151
- "pad_token": "<|im_end|>",
152
- "tokenizer_class": "GPT2Tokenizer",
153
- "unk_token": "<|endoftext|>",
154
- "vocab_size": 49152
155
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/trainer_state.json DELETED
@@ -1,2841 +0,0 @@
1
- {
2
- "best_metric": null,
3
- "best_model_checkpoint": null,
4
- "epoch": 10.0,
5
- "eval_steps": 10,
6
- "global_step": 360,
7
- "is_hyper_param_search": false,
8
- "is_local_process_zero": true,
9
- "is_world_process_zero": true,
10
- "log_history": [
11
- {
12
- "epoch": 0.027777777777777776,
13
- "grad_norm": 13.871646881103516,
14
- "learning_rate": 1e-05,
15
- "loss": 1.6431,
16
- "step": 1
17
- },
18
- {
19
- "epoch": 0.05555555555555555,
20
- "grad_norm": 17.890308380126953,
21
- "learning_rate": 2e-05,
22
- "loss": 1.6018,
23
- "step": 2
24
- },
25
- {
26
- "epoch": 0.08333333333333333,
27
- "grad_norm": 13.746294021606445,
28
- "learning_rate": 3e-05,
29
- "loss": 1.5953,
30
- "step": 3
31
- },
32
- {
33
- "epoch": 0.1111111111111111,
34
- "grad_norm": 15.9970121383667,
35
- "learning_rate": 4e-05,
36
- "loss": 1.5355,
37
- "step": 4
38
- },
39
- {
40
- "epoch": 0.1388888888888889,
41
- "grad_norm": 18.634761810302734,
42
- "learning_rate": 5e-05,
43
- "loss": 1.552,
44
- "step": 5
45
- },
46
- {
47
- "epoch": 0.16666666666666666,
48
- "grad_norm": 10.22042179107666,
49
- "learning_rate": 6e-05,
50
- "loss": 1.4926,
51
- "step": 6
52
- },
53
- {
54
- "epoch": 0.19444444444444445,
55
- "grad_norm": 14.976595878601074,
56
- "learning_rate": 7e-05,
57
- "loss": 1.1827,
58
- "step": 7
59
- },
60
- {
61
- "epoch": 0.2222222222222222,
62
- "grad_norm": 34.35334396362305,
63
- "learning_rate": 8e-05,
64
- "loss": 1.173,
65
- "step": 8
66
- },
67
- {
68
- "epoch": 0.25,
69
- "grad_norm": 7.392702579498291,
70
- "learning_rate": 9e-05,
71
- "loss": 0.7987,
72
- "step": 9
73
- },
74
- {
75
- "epoch": 0.2777777777777778,
76
- "grad_norm": 8.6481294631958,
77
- "learning_rate": 0.0001,
78
- "loss": 0.7641,
79
- "step": 10
80
- },
81
- {
82
- "epoch": 0.2777777777777778,
83
- "eval_loss": 0.719104528427124,
84
- "eval_runtime": 0.1041,
85
- "eval_samples_per_second": 441.967,
86
- "eval_steps_per_second": 9.608,
87
- "step": 10
88
- },
89
- {
90
- "epoch": 0.3055555555555556,
91
- "grad_norm": 11.896777153015137,
92
- "learning_rate": 9.971428571428571e-05,
93
- "loss": 0.6656,
94
- "step": 11
95
- },
96
- {
97
- "epoch": 0.3333333333333333,
98
- "grad_norm": 6.002552032470703,
99
- "learning_rate": 9.942857142857144e-05,
100
- "loss": 0.5743,
101
- "step": 12
102
- },
103
- {
104
- "epoch": 0.3611111111111111,
105
- "grad_norm": 4.569210529327393,
106
- "learning_rate": 9.914285714285715e-05,
107
- "loss": 0.5204,
108
- "step": 13
109
- },
110
- {
111
- "epoch": 0.3888888888888889,
112
- "grad_norm": 3.2792978286743164,
113
- "learning_rate": 9.885714285714286e-05,
114
- "loss": 0.5217,
115
- "step": 14
116
- },
117
- {
118
- "epoch": 0.4166666666666667,
119
- "grad_norm": 3.6701369285583496,
120
- "learning_rate": 9.857142857142858e-05,
121
- "loss": 0.5046,
122
- "step": 15
123
- },
124
- {
125
- "epoch": 0.4444444444444444,
126
- "grad_norm": 4.064358711242676,
127
- "learning_rate": 9.828571428571429e-05,
128
- "loss": 0.4803,
129
- "step": 16
130
- },
131
- {
132
- "epoch": 0.4722222222222222,
133
- "grad_norm": 2.9059529304504395,
134
- "learning_rate": 9.8e-05,
135
- "loss": 0.4466,
136
- "step": 17
137
- },
138
- {
139
- "epoch": 0.5,
140
- "grad_norm": 2.499434471130371,
141
- "learning_rate": 9.771428571428572e-05,
142
- "loss": 0.4453,
143
- "step": 18
144
- },
145
- {
146
- "epoch": 0.5277777777777778,
147
- "grad_norm": 2.084019899368286,
148
- "learning_rate": 9.742857142857143e-05,
149
- "loss": 0.4382,
150
- "step": 19
151
- },
152
- {
153
- "epoch": 0.5555555555555556,
154
- "grad_norm": 1.2787052392959595,
155
- "learning_rate": 9.714285714285715e-05,
156
- "loss": 0.4208,
157
- "step": 20
158
- },
159
- {
160
- "epoch": 0.5555555555555556,
161
- "eval_loss": 0.4429771900177002,
162
- "eval_runtime": 0.1036,
163
- "eval_samples_per_second": 443.983,
164
- "eval_steps_per_second": 9.652,
165
- "step": 20
166
- },
167
- {
168
- "epoch": 0.5833333333333334,
169
- "grad_norm": 1.8704657554626465,
170
- "learning_rate": 9.685714285714286e-05,
171
- "loss": 0.431,
172
- "step": 21
173
- },
174
- {
175
- "epoch": 0.6111111111111112,
176
- "grad_norm": 1.7761015892028809,
177
- "learning_rate": 9.657142857142858e-05,
178
- "loss": 0.4317,
179
- "step": 22
180
- },
181
- {
182
- "epoch": 0.6388888888888888,
183
- "grad_norm": 1.7499357461929321,
184
- "learning_rate": 9.628571428571429e-05,
185
- "loss": 0.4229,
186
- "step": 23
187
- },
188
- {
189
- "epoch": 0.6666666666666666,
190
- "grad_norm": 1.2509069442749023,
191
- "learning_rate": 9.6e-05,
192
- "loss": 0.4126,
193
- "step": 24
194
- },
195
- {
196
- "epoch": 0.6944444444444444,
197
- "grad_norm": 1.3415058851242065,
198
- "learning_rate": 9.571428571428573e-05,
199
- "loss": 0.3888,
200
- "step": 25
201
- },
202
- {
203
- "epoch": 0.7222222222222222,
204
- "grad_norm": 1.513482689857483,
205
- "learning_rate": 9.542857142857143e-05,
206
- "loss": 0.4111,
207
- "step": 26
208
- },
209
- {
210
- "epoch": 0.75,
211
- "grad_norm": 1.0207685232162476,
212
- "learning_rate": 9.514285714285714e-05,
213
- "loss": 0.3904,
214
- "step": 27
215
- },
216
- {
217
- "epoch": 0.7777777777777778,
218
- "grad_norm": 1.0765091180801392,
219
- "learning_rate": 9.485714285714287e-05,
220
- "loss": 0.3911,
221
- "step": 28
222
- },
223
- {
224
- "epoch": 0.8055555555555556,
225
- "grad_norm": 1.2146029472351074,
226
- "learning_rate": 9.457142857142858e-05,
227
- "loss": 0.3893,
228
- "step": 29
229
- },
230
- {
231
- "epoch": 0.8333333333333334,
232
- "grad_norm": 1.302972435951233,
233
- "learning_rate": 9.428571428571429e-05,
234
- "loss": 0.3915,
235
- "step": 30
236
- },
237
- {
238
- "epoch": 0.8333333333333334,
239
- "eval_loss": 0.39956018328666687,
240
- "eval_runtime": 0.1036,
241
- "eval_samples_per_second": 444.089,
242
- "eval_steps_per_second": 9.654,
243
- "step": 30
244
- },
245
- {
246
- "epoch": 0.8611111111111112,
247
- "grad_norm": 1.2195433378219604,
248
- "learning_rate": 9.4e-05,
249
- "loss": 0.377,
250
- "step": 31
251
- },
252
- {
253
- "epoch": 0.8888888888888888,
254
- "grad_norm": 1.2320094108581543,
255
- "learning_rate": 9.371428571428572e-05,
256
- "loss": 0.3837,
257
- "step": 32
258
- },
259
- {
260
- "epoch": 0.9166666666666666,
261
- "grad_norm": 1.0609043836593628,
262
- "learning_rate": 9.342857142857143e-05,
263
- "loss": 0.3776,
264
- "step": 33
265
- },
266
- {
267
- "epoch": 0.9444444444444444,
268
- "grad_norm": 0.9609966278076172,
269
- "learning_rate": 9.314285714285715e-05,
270
- "loss": 0.3887,
271
- "step": 34
272
- },
273
- {
274
- "epoch": 0.9722222222222222,
275
- "grad_norm": 1.0595581531524658,
276
- "learning_rate": 9.285714285714286e-05,
277
- "loss": 0.3761,
278
- "step": 35
279
- },
280
- {
281
- "epoch": 1.0,
282
- "grad_norm": 0.990327775478363,
283
- "learning_rate": 9.257142857142858e-05,
284
- "loss": 0.3744,
285
- "step": 36
286
- },
287
- {
288
- "epoch": 1.0277777777777777,
289
- "grad_norm": 1.272873044013977,
290
- "learning_rate": 9.228571428571429e-05,
291
- "loss": 0.3625,
292
- "step": 37
293
- },
294
- {
295
- "epoch": 1.0555555555555556,
296
- "grad_norm": 1.9024567604064941,
297
- "learning_rate": 9.200000000000001e-05,
298
- "loss": 0.3869,
299
- "step": 38
300
- },
301
- {
302
- "epoch": 1.0833333333333333,
303
- "grad_norm": 1.3398654460906982,
304
- "learning_rate": 9.171428571428572e-05,
305
- "loss": 0.3751,
306
- "step": 39
307
- },
308
- {
309
- "epoch": 1.1111111111111112,
310
- "grad_norm": 1.9176064729690552,
311
- "learning_rate": 9.142857142857143e-05,
312
- "loss": 0.3662,
313
- "step": 40
314
- },
315
- {
316
- "epoch": 1.1111111111111112,
317
- "eval_loss": 0.37765416502952576,
318
- "eval_runtime": 0.1036,
319
- "eval_samples_per_second": 444.001,
320
- "eval_steps_per_second": 9.652,
321
- "step": 40
322
- },
323
- {
324
- "epoch": 1.1388888888888888,
325
- "grad_norm": 1.1852660179138184,
326
- "learning_rate": 9.114285714285716e-05,
327
- "loss": 0.3665,
328
- "step": 41
329
- },
330
- {
331
- "epoch": 1.1666666666666667,
332
- "grad_norm": 1.831186056137085,
333
- "learning_rate": 9.085714285714286e-05,
334
- "loss": 0.3705,
335
- "step": 42
336
- },
337
- {
338
- "epoch": 1.1944444444444444,
339
- "grad_norm": 1.1574777364730835,
340
- "learning_rate": 9.057142857142857e-05,
341
- "loss": 0.3582,
342
- "step": 43
343
- },
344
- {
345
- "epoch": 1.2222222222222223,
346
- "grad_norm": 1.3485198020935059,
347
- "learning_rate": 9.028571428571428e-05,
348
- "loss": 0.3724,
349
- "step": 44
350
- },
351
- {
352
- "epoch": 1.25,
353
- "grad_norm": 1.0934721231460571,
354
- "learning_rate": 9e-05,
355
- "loss": 0.3549,
356
- "step": 45
357
- },
358
- {
359
- "epoch": 1.2777777777777777,
360
- "grad_norm": 1.2588518857955933,
361
- "learning_rate": 8.971428571428571e-05,
362
- "loss": 0.3607,
363
- "step": 46
364
- },
365
- {
366
- "epoch": 1.3055555555555556,
367
- "grad_norm": 0.9038533568382263,
368
- "learning_rate": 8.942857142857142e-05,
369
- "loss": 0.3492,
370
- "step": 47
371
- },
372
- {
373
- "epoch": 1.3333333333333333,
374
- "grad_norm": 1.083348274230957,
375
- "learning_rate": 8.914285714285715e-05,
376
- "loss": 0.361,
377
- "step": 48
378
- },
379
- {
380
- "epoch": 1.3611111111111112,
381
- "grad_norm": 0.8287424445152283,
382
- "learning_rate": 8.885714285714286e-05,
383
- "loss": 0.3475,
384
- "step": 49
385
- },
386
- {
387
- "epoch": 1.3888888888888888,
388
- "grad_norm": 1.3475714921951294,
389
- "learning_rate": 8.857142857142857e-05,
390
- "loss": 0.363,
391
- "step": 50
392
- },
393
- {
394
- "epoch": 1.3888888888888888,
395
- "eval_loss": 0.3627206087112427,
396
- "eval_runtime": 0.1039,
397
- "eval_samples_per_second": 442.561,
398
- "eval_steps_per_second": 9.621,
399
- "step": 50
400
- },
401
- {
402
- "epoch": 1.4166666666666667,
403
- "grad_norm": 1.1012552976608276,
404
- "learning_rate": 8.828571428571429e-05,
405
- "loss": 0.3527,
406
- "step": 51
407
- },
408
- {
409
- "epoch": 1.4444444444444444,
410
- "grad_norm": 0.6421935558319092,
411
- "learning_rate": 8.800000000000001e-05,
412
- "loss": 0.3418,
413
- "step": 52
414
- },
415
- {
416
- "epoch": 1.4722222222222223,
417
- "grad_norm": 1.1574995517730713,
418
- "learning_rate": 8.771428571428572e-05,
419
- "loss": 0.3513,
420
- "step": 53
421
- },
422
- {
423
- "epoch": 1.5,
424
- "grad_norm": 1.0251258611679077,
425
- "learning_rate": 8.742857142857144e-05,
426
- "loss": 0.3515,
427
- "step": 54
428
- },
429
- {
430
- "epoch": 1.5277777777777777,
431
- "grad_norm": 0.9864039421081543,
432
- "learning_rate": 8.714285714285715e-05,
433
- "loss": 0.3609,
434
- "step": 55
435
- },
436
- {
437
- "epoch": 1.5555555555555556,
438
- "grad_norm": 0.757999062538147,
439
- "learning_rate": 8.685714285714286e-05,
440
- "loss": 0.3454,
441
- "step": 56
442
- },
443
- {
444
- "epoch": 1.5833333333333335,
445
- "grad_norm": 1.0983614921569824,
446
- "learning_rate": 8.657142857142858e-05,
447
- "loss": 0.3488,
448
- "step": 57
449
- },
450
- {
451
- "epoch": 1.6111111111111112,
452
- "grad_norm": 1.4811136722564697,
453
- "learning_rate": 8.62857142857143e-05,
454
- "loss": 0.3562,
455
- "step": 58
456
- },
457
- {
458
- "epoch": 1.6388888888888888,
459
- "grad_norm": 0.9457672834396362,
460
- "learning_rate": 8.6e-05,
461
- "loss": 0.349,
462
- "step": 59
463
- },
464
- {
465
- "epoch": 1.6666666666666665,
466
- "grad_norm": 1.4347460269927979,
467
- "learning_rate": 8.571428571428571e-05,
468
- "loss": 0.3551,
469
- "step": 60
470
- },
471
- {
472
- "epoch": 1.6666666666666665,
473
- "eval_loss": 0.35965070128440857,
474
- "eval_runtime": 0.1045,
475
- "eval_samples_per_second": 440.164,
476
- "eval_steps_per_second": 9.569,
477
- "step": 60
478
- },
479
- {
480
- "epoch": 1.6944444444444444,
481
- "grad_norm": 1.0592706203460693,
482
- "learning_rate": 8.542857142857144e-05,
483
- "loss": 0.3485,
484
- "step": 61
485
- },
486
- {
487
- "epoch": 1.7222222222222223,
488
- "grad_norm": 1.3444126844406128,
489
- "learning_rate": 8.514285714285714e-05,
490
- "loss": 0.3512,
491
- "step": 62
492
- },
493
- {
494
- "epoch": 1.75,
495
- "grad_norm": 0.9045667052268982,
496
- "learning_rate": 8.485714285714285e-05,
497
- "loss": 0.3525,
498
- "step": 63
499
- },
500
- {
501
- "epoch": 1.7777777777777777,
502
- "grad_norm": 1.135429859161377,
503
- "learning_rate": 8.457142857142858e-05,
504
- "loss": 0.3483,
505
- "step": 64
506
- },
507
- {
508
- "epoch": 1.8055555555555556,
509
- "grad_norm": 0.7742411494255066,
510
- "learning_rate": 8.428571428571429e-05,
511
- "loss": 0.3445,
512
- "step": 65
513
- },
514
- {
515
- "epoch": 1.8333333333333335,
516
- "grad_norm": 1.2747840881347656,
517
- "learning_rate": 8.4e-05,
518
- "loss": 0.3425,
519
- "step": 66
520
- },
521
- {
522
- "epoch": 1.8611111111111112,
523
- "grad_norm": 1.1280975341796875,
524
- "learning_rate": 8.371428571428572e-05,
525
- "loss": 0.3506,
526
- "step": 67
527
- },
528
- {
529
- "epoch": 1.8888888888888888,
530
- "grad_norm": 1.3229925632476807,
531
- "learning_rate": 8.342857142857143e-05,
532
- "loss": 0.3458,
533
- "step": 68
534
- },
535
- {
536
- "epoch": 1.9166666666666665,
537
- "grad_norm": 1.0970568656921387,
538
- "learning_rate": 8.314285714285715e-05,
539
- "loss": 0.3443,
540
- "step": 69
541
- },
542
- {
543
- "epoch": 1.9444444444444444,
544
- "grad_norm": 1.7599389553070068,
545
- "learning_rate": 8.285714285714287e-05,
546
- "loss": 0.3612,
547
- "step": 70
548
- },
549
- {
550
- "epoch": 1.9444444444444444,
551
- "eval_loss": 0.35299769043922424,
552
- "eval_runtime": 0.1042,
553
- "eval_samples_per_second": 441.433,
554
- "eval_steps_per_second": 9.596,
555
- "step": 70
556
- },
557
- {
558
- "epoch": 1.9722222222222223,
559
- "grad_norm": 0.8275991678237915,
560
- "learning_rate": 8.257142857142858e-05,
561
- "loss": 0.3373,
562
- "step": 71
563
- },
564
- {
565
- "epoch": 2.0,
566
- "grad_norm": 1.5045437812805176,
567
- "learning_rate": 8.228571428571429e-05,
568
- "loss": 0.3624,
569
- "step": 72
570
- },
571
- {
572
- "epoch": 2.0277777777777777,
573
- "grad_norm": 0.9771829843521118,
574
- "learning_rate": 8.2e-05,
575
- "loss": 0.3434,
576
- "step": 73
577
- },
578
- {
579
- "epoch": 2.0555555555555554,
580
- "grad_norm": 0.8552800416946411,
581
- "learning_rate": 8.171428571428572e-05,
582
- "loss": 0.3347,
583
- "step": 74
584
- },
585
- {
586
- "epoch": 2.0833333333333335,
587
- "grad_norm": 0.8917291164398193,
588
- "learning_rate": 8.142857142857143e-05,
589
- "loss": 0.3448,
590
- "step": 75
591
- },
592
- {
593
- "epoch": 2.111111111111111,
594
- "grad_norm": 0.9143850207328796,
595
- "learning_rate": 8.114285714285714e-05,
596
- "loss": 0.3309,
597
- "step": 76
598
- },
599
- {
600
- "epoch": 2.138888888888889,
601
- "grad_norm": 1.359926700592041,
602
- "learning_rate": 8.085714285714287e-05,
603
- "loss": 0.3471,
604
- "step": 77
605
- },
606
- {
607
- "epoch": 2.1666666666666665,
608
- "grad_norm": 0.84107506275177,
609
- "learning_rate": 8.057142857142857e-05,
610
- "loss": 0.3433,
611
- "step": 78
612
- },
613
- {
614
- "epoch": 2.1944444444444446,
615
- "grad_norm": 1.2953639030456543,
616
- "learning_rate": 8.028571428571428e-05,
617
- "loss": 0.3495,
618
- "step": 79
619
- },
620
- {
621
- "epoch": 2.2222222222222223,
622
- "grad_norm": 0.9937311410903931,
623
- "learning_rate": 8e-05,
624
- "loss": 0.3388,
625
- "step": 80
626
- },
627
- {
628
- "epoch": 2.2222222222222223,
629
- "eval_loss": 0.3491460978984833,
630
- "eval_runtime": 0.1044,
631
- "eval_samples_per_second": 440.593,
632
- "eval_steps_per_second": 9.578,
633
- "step": 80
634
- },
635
- {
636
- "epoch": 2.25,
637
- "grad_norm": 1.0681490898132324,
638
- "learning_rate": 7.971428571428572e-05,
639
- "loss": 0.3435,
640
- "step": 81
641
- },
642
- {
643
- "epoch": 2.2777777777777777,
644
- "grad_norm": 0.8466928601264954,
645
- "learning_rate": 7.942857142857143e-05,
646
- "loss": 0.3333,
647
- "step": 82
648
- },
649
- {
650
- "epoch": 2.3055555555555554,
651
- "grad_norm": 0.8183342814445496,
652
- "learning_rate": 7.914285714285715e-05,
653
- "loss": 0.3305,
654
- "step": 83
655
- },
656
- {
657
- "epoch": 2.3333333333333335,
658
- "grad_norm": 0.833314061164856,
659
- "learning_rate": 7.885714285714286e-05,
660
- "loss": 0.3289,
661
- "step": 84
662
- },
663
- {
664
- "epoch": 2.361111111111111,
665
- "grad_norm": 0.8347731828689575,
666
- "learning_rate": 7.857142857142858e-05,
667
- "loss": 0.3331,
668
- "step": 85
669
- },
670
- {
671
- "epoch": 2.388888888888889,
672
- "grad_norm": 1.0877679586410522,
673
- "learning_rate": 7.828571428571429e-05,
674
- "loss": 0.3437,
675
- "step": 86
676
- },
677
- {
678
- "epoch": 2.4166666666666665,
679
- "grad_norm": 0.9570125937461853,
680
- "learning_rate": 7.800000000000001e-05,
681
- "loss": 0.3331,
682
- "step": 87
683
- },
684
- {
685
- "epoch": 2.4444444444444446,
686
- "grad_norm": 0.7662280797958374,
687
- "learning_rate": 7.771428571428572e-05,
688
- "loss": 0.3363,
689
- "step": 88
690
- },
691
- {
692
- "epoch": 2.4722222222222223,
693
- "grad_norm": 0.9321999549865723,
694
- "learning_rate": 7.742857142857143e-05,
695
- "loss": 0.3305,
696
- "step": 89
697
- },
698
- {
699
- "epoch": 2.5,
700
- "grad_norm": 0.8284544348716736,
701
- "learning_rate": 7.714285714285715e-05,
702
- "loss": 0.3332,
703
- "step": 90
704
- },
705
- {
706
- "epoch": 2.5,
707
- "eval_loss": 0.34498000144958496,
708
- "eval_runtime": 0.1046,
709
- "eval_samples_per_second": 439.848,
710
- "eval_steps_per_second": 9.562,
711
- "step": 90
712
- },
713
- {
714
- "epoch": 2.5277777777777777,
715
- "grad_norm": 1.0568827390670776,
716
- "learning_rate": 7.685714285714286e-05,
717
- "loss": 0.3448,
718
- "step": 91
719
- },
720
- {
721
- "epoch": 2.5555555555555554,
722
- "grad_norm": 0.9136806130409241,
723
- "learning_rate": 7.657142857142857e-05,
724
- "loss": 0.334,
725
- "step": 92
726
- },
727
- {
728
- "epoch": 2.5833333333333335,
729
- "grad_norm": 1.2551990747451782,
730
- "learning_rate": 7.62857142857143e-05,
731
- "loss": 0.3425,
732
- "step": 93
733
- },
734
- {
735
- "epoch": 2.611111111111111,
736
- "grad_norm": 0.8284862637519836,
737
- "learning_rate": 7.6e-05,
738
- "loss": 0.3265,
739
- "step": 94
740
- },
741
- {
742
- "epoch": 2.638888888888889,
743
- "grad_norm": 0.7161554098129272,
744
- "learning_rate": 7.571428571428571e-05,
745
- "loss": 0.3267,
746
- "step": 95
747
- },
748
- {
749
- "epoch": 2.6666666666666665,
750
- "grad_norm": 0.8050905466079712,
751
- "learning_rate": 7.542857142857144e-05,
752
- "loss": 0.3342,
753
- "step": 96
754
- },
755
- {
756
- "epoch": 2.6944444444444446,
757
- "grad_norm": 0.7441209554672241,
758
- "learning_rate": 7.514285714285715e-05,
759
- "loss": 0.3325,
760
- "step": 97
761
- },
762
- {
763
- "epoch": 2.7222222222222223,
764
- "grad_norm": 0.591927707195282,
765
- "learning_rate": 7.485714285714285e-05,
766
- "loss": 0.334,
767
- "step": 98
768
- },
769
- {
770
- "epoch": 2.75,
771
- "grad_norm": 0.8902866244316101,
772
- "learning_rate": 7.457142857142856e-05,
773
- "loss": 0.3473,
774
- "step": 99
775
- },
776
- {
777
- "epoch": 2.7777777777777777,
778
- "grad_norm": 0.6760069131851196,
779
- "learning_rate": 7.428571428571429e-05,
780
- "loss": 0.326,
781
- "step": 100
782
- },
783
- {
784
- "epoch": 2.7777777777777777,
785
- "eval_loss": 0.3410107493400574,
786
- "eval_runtime": 0.1043,
787
- "eval_samples_per_second": 441.026,
788
- "eval_steps_per_second": 9.588,
789
- "step": 100
790
- },
791
- {
792
- "epoch": 2.8055555555555554,
793
- "grad_norm": 0.6579345464706421,
794
- "learning_rate": 7.4e-05,
795
- "loss": 0.3348,
796
- "step": 101
797
- },
798
- {
799
- "epoch": 2.8333333333333335,
800
- "grad_norm": 1.0648415088653564,
801
- "learning_rate": 7.371428571428572e-05,
802
- "loss": 0.3253,
803
- "step": 102
804
- },
805
- {
806
- "epoch": 2.861111111111111,
807
- "grad_norm": 0.6868814826011658,
808
- "learning_rate": 7.342857142857144e-05,
809
- "loss": 0.3297,
810
- "step": 103
811
- },
812
- {
813
- "epoch": 2.888888888888889,
814
- "grad_norm": 1.1149464845657349,
815
- "learning_rate": 7.314285714285715e-05,
816
- "loss": 0.3401,
817
- "step": 104
818
- },
819
- {
820
- "epoch": 2.9166666666666665,
821
- "grad_norm": 0.8934164047241211,
822
- "learning_rate": 7.285714285714286e-05,
823
- "loss": 0.3348,
824
- "step": 105
825
- },
826
- {
827
- "epoch": 2.9444444444444446,
828
- "grad_norm": 1.1119507551193237,
829
- "learning_rate": 7.257142857142858e-05,
830
- "loss": 0.3427,
831
- "step": 106
832
- },
833
- {
834
- "epoch": 2.9722222222222223,
835
- "grad_norm": 0.8103634715080261,
836
- "learning_rate": 7.228571428571429e-05,
837
- "loss": 0.3374,
838
- "step": 107
839
- },
840
- {
841
- "epoch": 3.0,
842
- "grad_norm": 0.8421126008033752,
843
- "learning_rate": 7.2e-05,
844
- "loss": 0.3395,
845
- "step": 108
846
- },
847
- {
848
- "epoch": 3.0277777777777777,
849
- "grad_norm": 0.8583278656005859,
850
- "learning_rate": 7.171428571428572e-05,
851
- "loss": 0.331,
852
- "step": 109
853
- },
854
- {
855
- "epoch": 3.0555555555555554,
856
- "grad_norm": 1.2129111289978027,
857
- "learning_rate": 7.142857142857143e-05,
858
- "loss": 0.3355,
859
- "step": 110
860
- },
861
- {
862
- "epoch": 3.0555555555555554,
863
- "eval_loss": 0.332057386636734,
864
- "eval_runtime": 0.1051,
865
- "eval_samples_per_second": 437.685,
866
- "eval_steps_per_second": 9.515,
867
- "step": 110
868
- },
869
- {
870
- "epoch": 3.0833333333333335,
871
- "grad_norm": 0.9463130235671997,
872
- "learning_rate": 7.114285714285714e-05,
873
- "loss": 0.3294,
874
- "step": 111
875
- },
876
- {
877
- "epoch": 3.111111111111111,
878
- "grad_norm": 0.9692079424858093,
879
- "learning_rate": 7.085714285714285e-05,
880
- "loss": 0.3327,
881
- "step": 112
882
- },
883
- {
884
- "epoch": 3.138888888888889,
885
- "grad_norm": 0.9853659868240356,
886
- "learning_rate": 7.057142857142858e-05,
887
- "loss": 0.3295,
888
- "step": 113
889
- },
890
- {
891
- "epoch": 3.1666666666666665,
892
- "grad_norm": 0.7222715616226196,
893
- "learning_rate": 7.028571428571428e-05,
894
- "loss": 0.3358,
895
- "step": 114
896
- },
897
- {
898
- "epoch": 3.1944444444444446,
899
- "grad_norm": 1.1528452634811401,
900
- "learning_rate": 7e-05,
901
- "loss": 0.3406,
902
- "step": 115
903
- },
904
- {
905
- "epoch": 3.2222222222222223,
906
- "grad_norm": 1.0079970359802246,
907
- "learning_rate": 6.971428571428572e-05,
908
- "loss": 0.329,
909
- "step": 116
910
- },
911
- {
912
- "epoch": 3.25,
913
- "grad_norm": 0.7162885665893555,
914
- "learning_rate": 6.942857142857143e-05,
915
- "loss": 0.327,
916
- "step": 117
917
- },
918
- {
919
- "epoch": 3.2777777777777777,
920
- "grad_norm": 0.9302375912666321,
921
- "learning_rate": 6.914285714285715e-05,
922
- "loss": 0.336,
923
- "step": 118
924
- },
925
- {
926
- "epoch": 3.3055555555555554,
927
- "grad_norm": 0.8540468215942383,
928
- "learning_rate": 6.885714285714286e-05,
929
- "loss": 0.3356,
930
- "step": 119
931
- },
932
- {
933
- "epoch": 3.3333333333333335,
934
- "grad_norm": 0.598040759563446,
935
- "learning_rate": 6.857142857142858e-05,
936
- "loss": 0.3279,
937
- "step": 120
938
- },
939
- {
940
- "epoch": 3.3333333333333335,
941
- "eval_loss": 0.3290035128593445,
942
- "eval_runtime": 0.1049,
943
- "eval_samples_per_second": 438.622,
944
- "eval_steps_per_second": 9.535,
945
- "step": 120
946
- },
947
- {
948
- "epoch": 3.361111111111111,
949
- "grad_norm": 0.6981043815612793,
950
- "learning_rate": 6.828571428571429e-05,
951
- "loss": 0.33,
952
- "step": 121
953
- },
954
- {
955
- "epoch": 3.388888888888889,
956
- "grad_norm": 0.6710860133171082,
957
- "learning_rate": 6.800000000000001e-05,
958
- "loss": 0.3262,
959
- "step": 122
960
- },
961
- {
962
- "epoch": 3.4166666666666665,
963
- "grad_norm": 0.6621596813201904,
964
- "learning_rate": 6.771428571428572e-05,
965
- "loss": 0.3273,
966
- "step": 123
967
- },
968
- {
969
- "epoch": 3.4444444444444446,
970
- "grad_norm": 0.8255563974380493,
971
- "learning_rate": 6.742857142857143e-05,
972
- "loss": 0.3264,
973
- "step": 124
974
- },
975
- {
976
- "epoch": 3.4722222222222223,
977
- "grad_norm": 0.8350001573562622,
978
- "learning_rate": 6.714285714285714e-05,
979
- "loss": 0.3157,
980
- "step": 125
981
- },
982
- {
983
- "epoch": 3.5,
984
- "grad_norm": 0.7275986075401306,
985
- "learning_rate": 6.685714285714286e-05,
986
- "loss": 0.3205,
987
- "step": 126
988
- },
989
- {
990
- "epoch": 3.5277777777777777,
991
- "grad_norm": 0.652642548084259,
992
- "learning_rate": 6.657142857142857e-05,
993
- "loss": 0.3327,
994
- "step": 127
995
- },
996
- {
997
- "epoch": 3.5555555555555554,
998
- "grad_norm": 0.9522960186004639,
999
- "learning_rate": 6.628571428571428e-05,
1000
- "loss": 0.3319,
1001
- "step": 128
1002
- },
1003
- {
1004
- "epoch": 3.5833333333333335,
1005
- "grad_norm": 0.7006963491439819,
1006
- "learning_rate": 6.6e-05,
1007
- "loss": 0.3292,
1008
- "step": 129
1009
- },
1010
- {
1011
- "epoch": 3.611111111111111,
1012
- "grad_norm": 0.7161970138549805,
1013
- "learning_rate": 6.571428571428571e-05,
1014
- "loss": 0.3246,
1015
- "step": 130
1016
- },
1017
- {
1018
- "epoch": 3.611111111111111,
1019
- "eval_loss": 0.3442412316799164,
1020
- "eval_runtime": 0.1052,
1021
- "eval_samples_per_second": 437.452,
1022
- "eval_steps_per_second": 9.51,
1023
- "step": 130
1024
- },
1025
- {
1026
- "epoch": 3.638888888888889,
1027
- "grad_norm": 1.0642709732055664,
1028
- "learning_rate": 6.542857142857142e-05,
1029
- "loss": 0.3289,
1030
- "step": 131
1031
- },
1032
- {
1033
- "epoch": 3.6666666666666665,
1034
- "grad_norm": 0.7999193072319031,
1035
- "learning_rate": 6.514285714285715e-05,
1036
- "loss": 0.3234,
1037
- "step": 132
1038
- },
1039
- {
1040
- "epoch": 3.6944444444444446,
1041
- "grad_norm": 0.8324876427650452,
1042
- "learning_rate": 6.485714285714286e-05,
1043
- "loss": 0.3297,
1044
- "step": 133
1045
- },
1046
- {
1047
- "epoch": 3.7222222222222223,
1048
- "grad_norm": 0.561801552772522,
1049
- "learning_rate": 6.457142857142856e-05,
1050
- "loss": 0.3125,
1051
- "step": 134
1052
- },
1053
- {
1054
- "epoch": 3.75,
1055
- "grad_norm": 0.6995918154716492,
1056
- "learning_rate": 6.428571428571429e-05,
1057
- "loss": 0.3234,
1058
- "step": 135
1059
- },
1060
- {
1061
- "epoch": 3.7777777777777777,
1062
- "grad_norm": 0.6314477920532227,
1063
- "learning_rate": 6.400000000000001e-05,
1064
- "loss": 0.3256,
1065
- "step": 136
1066
- },
1067
- {
1068
- "epoch": 3.8055555555555554,
1069
- "grad_norm": 0.9092559814453125,
1070
- "learning_rate": 6.371428571428572e-05,
1071
- "loss": 0.3315,
1072
- "step": 137
1073
- },
1074
- {
1075
- "epoch": 3.8333333333333335,
1076
- "grad_norm": 0.7306588292121887,
1077
- "learning_rate": 6.342857142857143e-05,
1078
- "loss": 0.3241,
1079
- "step": 138
1080
- },
1081
- {
1082
- "epoch": 3.861111111111111,
1083
- "grad_norm": 0.7943991422653198,
1084
- "learning_rate": 6.314285714285715e-05,
1085
- "loss": 0.3323,
1086
- "step": 139
1087
- },
1088
- {
1089
- "epoch": 3.888888888888889,
1090
- "grad_norm": 0.8375313878059387,
1091
- "learning_rate": 6.285714285714286e-05,
1092
- "loss": 0.3273,
1093
- "step": 140
1094
- },
1095
- {
1096
- "epoch": 3.888888888888889,
1097
- "eval_loss": 0.33936312794685364,
1098
- "eval_runtime": 0.1048,
1099
- "eval_samples_per_second": 438.851,
1100
- "eval_steps_per_second": 9.54,
1101
- "step": 140
1102
- },
1103
- {
1104
- "epoch": 3.9166666666666665,
1105
- "grad_norm": 0.9479944705963135,
1106
- "learning_rate": 6.257142857142857e-05,
1107
- "loss": 0.3258,
1108
- "step": 141
1109
- },
1110
- {
1111
- "epoch": 3.9444444444444446,
1112
- "grad_norm": 0.8155922889709473,
1113
- "learning_rate": 6.22857142857143e-05,
1114
- "loss": 0.3228,
1115
- "step": 142
1116
- },
1117
- {
1118
- "epoch": 3.9722222222222223,
1119
- "grad_norm": 0.8617050647735596,
1120
- "learning_rate": 6.2e-05,
1121
- "loss": 0.3217,
1122
- "step": 143
1123
- },
1124
- {
1125
- "epoch": 4.0,
1126
- "grad_norm": 1.2106715440750122,
1127
- "learning_rate": 6.171428571428571e-05,
1128
- "loss": 0.3309,
1129
- "step": 144
1130
- },
1131
- {
1132
- "epoch": 4.027777777777778,
1133
- "grad_norm": 0.8097350001335144,
1134
- "learning_rate": 6.142857142857143e-05,
1135
- "loss": 0.3265,
1136
- "step": 145
1137
- },
1138
- {
1139
- "epoch": 4.055555555555555,
1140
- "grad_norm": 0.651019811630249,
1141
- "learning_rate": 6.114285714285714e-05,
1142
- "loss": 0.3222,
1143
- "step": 146
1144
- },
1145
- {
1146
- "epoch": 4.083333333333333,
1147
- "grad_norm": 0.8858047127723694,
1148
- "learning_rate": 6.085714285714286e-05,
1149
- "loss": 0.3245,
1150
- "step": 147
1151
- },
1152
- {
1153
- "epoch": 4.111111111111111,
1154
- "grad_norm": 0.8602396845817566,
1155
- "learning_rate": 6.0571428571428576e-05,
1156
- "loss": 0.3153,
1157
- "step": 148
1158
- },
1159
- {
1160
- "epoch": 4.138888888888889,
1161
- "grad_norm": 0.615274965763092,
1162
- "learning_rate": 6.028571428571429e-05,
1163
- "loss": 0.313,
1164
- "step": 149
1165
- },
1166
- {
1167
- "epoch": 4.166666666666667,
1168
- "grad_norm": 0.9199692010879517,
1169
- "learning_rate": 6e-05,
1170
- "loss": 0.3275,
1171
- "step": 150
1172
- },
1173
- {
1174
- "epoch": 4.166666666666667,
1175
- "eval_loss": 0.34145644307136536,
1176
- "eval_runtime": 0.1056,
1177
- "eval_samples_per_second": 435.469,
1178
- "eval_steps_per_second": 9.467,
1179
- "step": 150
1180
- },
1181
- {
1182
- "epoch": 4.194444444444445,
1183
- "grad_norm": 0.9348899722099304,
1184
- "learning_rate": 5.9714285714285724e-05,
1185
- "loss": 0.3287,
1186
- "step": 151
1187
- },
1188
- {
1189
- "epoch": 4.222222222222222,
1190
- "grad_norm": 0.7250774502754211,
1191
- "learning_rate": 5.9428571428571434e-05,
1192
- "loss": 0.3203,
1193
- "step": 152
1194
- },
1195
- {
1196
- "epoch": 4.25,
1197
- "grad_norm": 0.7376280426979065,
1198
- "learning_rate": 5.914285714285714e-05,
1199
- "loss": 0.3169,
1200
- "step": 153
1201
- },
1202
- {
1203
- "epoch": 4.277777777777778,
1204
- "grad_norm": 0.6010245680809021,
1205
- "learning_rate": 5.885714285714285e-05,
1206
- "loss": 0.3215,
1207
- "step": 154
1208
- },
1209
- {
1210
- "epoch": 4.305555555555555,
1211
- "grad_norm": 0.7241640686988831,
1212
- "learning_rate": 5.8571428571428575e-05,
1213
- "loss": 0.317,
1214
- "step": 155
1215
- },
1216
- {
1217
- "epoch": 4.333333333333333,
1218
- "grad_norm": 0.6956952810287476,
1219
- "learning_rate": 5.828571428571429e-05,
1220
- "loss": 0.3217,
1221
- "step": 156
1222
- },
1223
- {
1224
- "epoch": 4.361111111111111,
1225
- "grad_norm": 0.8463672995567322,
1226
- "learning_rate": 5.8e-05,
1227
- "loss": 0.322,
1228
- "step": 157
1229
- },
1230
- {
1231
- "epoch": 4.388888888888889,
1232
- "grad_norm": 0.5538536906242371,
1233
- "learning_rate": 5.771428571428572e-05,
1234
- "loss": 0.3129,
1235
- "step": 158
1236
- },
1237
- {
1238
- "epoch": 4.416666666666667,
1239
- "grad_norm": 0.8398566246032715,
1240
- "learning_rate": 5.742857142857143e-05,
1241
- "loss": 0.3275,
1242
- "step": 159
1243
- },
1244
- {
1245
- "epoch": 4.444444444444445,
1246
- "grad_norm": 0.5335714221000671,
1247
- "learning_rate": 5.714285714285714e-05,
1248
- "loss": 0.3225,
1249
- "step": 160
1250
- },
1251
- {
1252
- "epoch": 4.444444444444445,
1253
- "eval_loss": 0.3419412672519684,
1254
- "eval_runtime": 0.1054,
1255
- "eval_samples_per_second": 436.373,
1256
- "eval_steps_per_second": 9.486,
1257
- "step": 160
1258
- },
1259
- {
1260
- "epoch": 4.472222222222222,
1261
- "grad_norm": 0.893516480922699,
1262
- "learning_rate": 5.6857142857142865e-05,
1263
- "loss": 0.3201,
1264
- "step": 161
1265
- },
1266
- {
1267
- "epoch": 4.5,
1268
- "grad_norm": 0.7950851321220398,
1269
- "learning_rate": 5.6571428571428574e-05,
1270
- "loss": 0.3246,
1271
- "step": 162
1272
- },
1273
- {
1274
- "epoch": 4.527777777777778,
1275
- "grad_norm": 0.6274649500846863,
1276
- "learning_rate": 5.628571428571428e-05,
1277
- "loss": 0.3138,
1278
- "step": 163
1279
- },
1280
- {
1281
- "epoch": 4.555555555555555,
1282
- "grad_norm": 0.6270793676376343,
1283
- "learning_rate": 5.6000000000000006e-05,
1284
- "loss": 0.3225,
1285
- "step": 164
1286
- },
1287
- {
1288
- "epoch": 4.583333333333333,
1289
- "grad_norm": 0.6661616563796997,
1290
- "learning_rate": 5.571428571428572e-05,
1291
- "loss": 0.3222,
1292
- "step": 165
1293
- },
1294
- {
1295
- "epoch": 4.611111111111111,
1296
- "grad_norm": 0.6097862124443054,
1297
- "learning_rate": 5.542857142857143e-05,
1298
- "loss": 0.3138,
1299
- "step": 166
1300
- },
1301
- {
1302
- "epoch": 4.638888888888889,
1303
- "grad_norm": 0.6743194460868835,
1304
- "learning_rate": 5.514285714285714e-05,
1305
- "loss": 0.3109,
1306
- "step": 167
1307
- },
1308
- {
1309
- "epoch": 4.666666666666667,
1310
- "grad_norm": 0.6684880256652832,
1311
- "learning_rate": 5.485714285714286e-05,
1312
- "loss": 0.3193,
1313
- "step": 168
1314
- },
1315
- {
1316
- "epoch": 4.694444444444445,
1317
- "grad_norm": 0.7434603571891785,
1318
- "learning_rate": 5.457142857142857e-05,
1319
- "loss": 0.3212,
1320
- "step": 169
1321
- },
1322
- {
1323
- "epoch": 4.722222222222222,
1324
- "grad_norm": 0.8257206082344055,
1325
- "learning_rate": 5.428571428571428e-05,
1326
- "loss": 0.323,
1327
- "step": 170
1328
- },
1329
- {
1330
- "epoch": 4.722222222222222,
1331
- "eval_loss": 0.335285484790802,
1332
- "eval_runtime": 0.1056,
1333
- "eval_samples_per_second": 435.635,
1334
- "eval_steps_per_second": 9.47,
1335
- "step": 170
1336
- },
1337
- {
1338
- "epoch": 4.75,
1339
- "grad_norm": 0.5611714720726013,
1340
- "learning_rate": 5.4000000000000005e-05,
1341
- "loss": 0.3249,
1342
- "step": 171
1343
- },
1344
- {
1345
- "epoch": 4.777777777777778,
1346
- "grad_norm": 0.6146838068962097,
1347
- "learning_rate": 5.3714285714285714e-05,
1348
- "loss": 0.3213,
1349
- "step": 172
1350
- },
1351
- {
1352
- "epoch": 4.805555555555555,
1353
- "grad_norm": 0.6939036846160889,
1354
- "learning_rate": 5.342857142857143e-05,
1355
- "loss": 0.3239,
1356
- "step": 173
1357
- },
1358
- {
1359
- "epoch": 4.833333333333333,
1360
- "grad_norm": 0.7213876843452454,
1361
- "learning_rate": 5.314285714285715e-05,
1362
- "loss": 0.3234,
1363
- "step": 174
1364
- },
1365
- {
1366
- "epoch": 4.861111111111111,
1367
- "grad_norm": 0.6637408137321472,
1368
- "learning_rate": 5.285714285714286e-05,
1369
- "loss": 0.3186,
1370
- "step": 175
1371
- },
1372
- {
1373
- "epoch": 4.888888888888889,
1374
- "grad_norm": 0.6469508409500122,
1375
- "learning_rate": 5.257142857142857e-05,
1376
- "loss": 0.3184,
1377
- "step": 176
1378
- },
1379
- {
1380
- "epoch": 4.916666666666667,
1381
- "grad_norm": 0.6262702941894531,
1382
- "learning_rate": 5.2285714285714294e-05,
1383
- "loss": 0.3171,
1384
- "step": 177
1385
- },
1386
- {
1387
- "epoch": 4.944444444444445,
1388
- "grad_norm": 0.6692309379577637,
1389
- "learning_rate": 5.2000000000000004e-05,
1390
- "loss": 0.3271,
1391
- "step": 178
1392
- },
1393
- {
1394
- "epoch": 4.972222222222222,
1395
- "grad_norm": 0.611004114151001,
1396
- "learning_rate": 5.171428571428571e-05,
1397
- "loss": 0.3229,
1398
- "step": 179
1399
- },
1400
- {
1401
- "epoch": 5.0,
1402
- "grad_norm": 0.9707463383674622,
1403
- "learning_rate": 5.142857142857143e-05,
1404
- "loss": 0.3223,
1405
- "step": 180
1406
- },
1407
- {
1408
- "epoch": 5.0,
1409
- "eval_loss": 0.33622071146965027,
1410
- "eval_runtime": 0.105,
1411
- "eval_samples_per_second": 437.997,
1412
- "eval_steps_per_second": 9.522,
1413
- "step": 180
1414
- },
1415
- {
1416
- "epoch": 5.027777777777778,
1417
- "grad_norm": 0.43101295828819275,
1418
- "learning_rate": 5.1142857142857145e-05,
1419
- "loss": 0.3106,
1420
- "step": 181
1421
- },
1422
- {
1423
- "epoch": 5.055555555555555,
1424
- "grad_norm": 0.7981957793235779,
1425
- "learning_rate": 5.085714285714286e-05,
1426
- "loss": 0.3139,
1427
- "step": 182
1428
- },
1429
- {
1430
- "epoch": 5.083333333333333,
1431
- "grad_norm": 0.9149967432022095,
1432
- "learning_rate": 5.057142857142857e-05,
1433
- "loss": 0.3137,
1434
- "step": 183
1435
- },
1436
- {
1437
- "epoch": 5.111111111111111,
1438
- "grad_norm": 0.8689376711845398,
1439
- "learning_rate": 5.028571428571429e-05,
1440
- "loss": 0.3185,
1441
- "step": 184
1442
- },
1443
- {
1444
- "epoch": 5.138888888888889,
1445
- "grad_norm": 0.6829914450645447,
1446
- "learning_rate": 5e-05,
1447
- "loss": 0.3195,
1448
- "step": 185
1449
- },
1450
- {
1451
- "epoch": 5.166666666666667,
1452
- "grad_norm": 0.6187098026275635,
1453
- "learning_rate": 4.971428571428572e-05,
1454
- "loss": 0.3139,
1455
- "step": 186
1456
- },
1457
- {
1458
- "epoch": 5.194444444444445,
1459
- "grad_norm": 0.8703141212463379,
1460
- "learning_rate": 4.942857142857143e-05,
1461
- "loss": 0.3147,
1462
- "step": 187
1463
- },
1464
- {
1465
- "epoch": 5.222222222222222,
1466
- "grad_norm": 0.6344360709190369,
1467
- "learning_rate": 4.9142857142857144e-05,
1468
- "loss": 0.3162,
1469
- "step": 188
1470
- },
1471
- {
1472
- "epoch": 5.25,
1473
- "grad_norm": 0.7499691843986511,
1474
- "learning_rate": 4.885714285714286e-05,
1475
- "loss": 0.3228,
1476
- "step": 189
1477
- },
1478
- {
1479
- "epoch": 5.277777777777778,
1480
- "grad_norm": 0.7664843201637268,
1481
- "learning_rate": 4.8571428571428576e-05,
1482
- "loss": 0.3152,
1483
- "step": 190
1484
- },
1485
- {
1486
- "epoch": 5.277777777777778,
1487
- "eval_loss": 0.3364305794239044,
1488
- "eval_runtime": 0.1056,
1489
- "eval_samples_per_second": 435.778,
1490
- "eval_steps_per_second": 9.473,
1491
- "step": 190
1492
- },
1493
- {
1494
- "epoch": 5.305555555555555,
1495
- "grad_norm": 0.6158504486083984,
1496
- "learning_rate": 4.828571428571429e-05,
1497
- "loss": 0.3154,
1498
- "step": 191
1499
- },
1500
- {
1501
- "epoch": 5.333333333333333,
1502
- "grad_norm": 0.8614490032196045,
1503
- "learning_rate": 4.8e-05,
1504
- "loss": 0.3206,
1505
- "step": 192
1506
- },
1507
- {
1508
- "epoch": 5.361111111111111,
1509
- "grad_norm": 0.7699540257453918,
1510
- "learning_rate": 4.771428571428572e-05,
1511
- "loss": 0.3159,
1512
- "step": 193
1513
- },
1514
- {
1515
- "epoch": 5.388888888888889,
1516
- "grad_norm": 0.9598901867866516,
1517
- "learning_rate": 4.742857142857143e-05,
1518
- "loss": 0.3186,
1519
- "step": 194
1520
- },
1521
- {
1522
- "epoch": 5.416666666666667,
1523
- "grad_norm": 0.855253279209137,
1524
- "learning_rate": 4.714285714285714e-05,
1525
- "loss": 0.3118,
1526
- "step": 195
1527
- },
1528
- {
1529
- "epoch": 5.444444444444445,
1530
- "grad_norm": 0.6478847861289978,
1531
- "learning_rate": 4.685714285714286e-05,
1532
- "loss": 0.3178,
1533
- "step": 196
1534
- },
1535
- {
1536
- "epoch": 5.472222222222222,
1537
- "grad_norm": 0.8028067946434021,
1538
- "learning_rate": 4.6571428571428575e-05,
1539
- "loss": 0.3236,
1540
- "step": 197
1541
- },
1542
- {
1543
- "epoch": 5.5,
1544
- "grad_norm": 0.7795782089233398,
1545
- "learning_rate": 4.628571428571429e-05,
1546
- "loss": 0.3147,
1547
- "step": 198
1548
- },
1549
- {
1550
- "epoch": 5.527777777777778,
1551
- "grad_norm": 0.7845653891563416,
1552
- "learning_rate": 4.600000000000001e-05,
1553
- "loss": 0.3221,
1554
- "step": 199
1555
- },
1556
- {
1557
- "epoch": 5.555555555555555,
1558
- "grad_norm": 1.1422370672225952,
1559
- "learning_rate": 4.5714285714285716e-05,
1560
- "loss": 0.321,
1561
- "step": 200
1562
- },
1563
- {
1564
- "epoch": 5.555555555555555,
1565
- "eval_loss": 0.33535492420196533,
1566
- "eval_runtime": 0.1051,
1567
- "eval_samples_per_second": 437.57,
1568
- "eval_steps_per_second": 9.512,
1569
- "step": 200
1570
- },
1571
- {
1572
- "epoch": 5.583333333333333,
1573
- "grad_norm": 0.7386415600776672,
1574
- "learning_rate": 4.542857142857143e-05,
1575
- "loss": 0.3183,
1576
- "step": 201
1577
- },
1578
- {
1579
- "epoch": 5.611111111111111,
1580
- "grad_norm": 0.6756716966629028,
1581
- "learning_rate": 4.514285714285714e-05,
1582
- "loss": 0.3205,
1583
- "step": 202
1584
- },
1585
- {
1586
- "epoch": 5.638888888888889,
1587
- "grad_norm": 0.7116839289665222,
1588
- "learning_rate": 4.485714285714286e-05,
1589
- "loss": 0.3195,
1590
- "step": 203
1591
- },
1592
- {
1593
- "epoch": 5.666666666666667,
1594
- "grad_norm": 0.7919530272483826,
1595
- "learning_rate": 4.4571428571428574e-05,
1596
- "loss": 0.3248,
1597
- "step": 204
1598
- },
1599
- {
1600
- "epoch": 5.694444444444445,
1601
- "grad_norm": 0.5152342319488525,
1602
- "learning_rate": 4.428571428571428e-05,
1603
- "loss": 0.3124,
1604
- "step": 205
1605
- },
1606
- {
1607
- "epoch": 5.722222222222222,
1608
- "grad_norm": 0.9519732594490051,
1609
- "learning_rate": 4.4000000000000006e-05,
1610
- "loss": 0.3205,
1611
- "step": 206
1612
- },
1613
- {
1614
- "epoch": 5.75,
1615
- "grad_norm": 0.7759018540382385,
1616
- "learning_rate": 4.371428571428572e-05,
1617
- "loss": 0.3188,
1618
- "step": 207
1619
- },
1620
- {
1621
- "epoch": 5.777777777777778,
1622
- "grad_norm": 0.931468665599823,
1623
- "learning_rate": 4.342857142857143e-05,
1624
- "loss": 0.3195,
1625
- "step": 208
1626
- },
1627
- {
1628
- "epoch": 5.805555555555555,
1629
- "grad_norm": 1.0431036949157715,
1630
- "learning_rate": 4.314285714285715e-05,
1631
- "loss": 0.3256,
1632
- "step": 209
1633
- },
1634
- {
1635
- "epoch": 5.833333333333333,
1636
- "grad_norm": 0.6952974200248718,
1637
- "learning_rate": 4.2857142857142856e-05,
1638
- "loss": 0.3189,
1639
- "step": 210
1640
- },
1641
- {
1642
- "epoch": 5.833333333333333,
1643
- "eval_loss": 0.33737656474113464,
1644
- "eval_runtime": 0.1056,
1645
- "eval_samples_per_second": 435.436,
1646
- "eval_steps_per_second": 9.466,
1647
- "step": 210
1648
- },
1649
- {
1650
- "epoch": 5.861111111111111,
1651
- "grad_norm": 0.880944013595581,
1652
- "learning_rate": 4.257142857142857e-05,
1653
- "loss": 0.3159,
1654
- "step": 211
1655
- },
1656
- {
1657
- "epoch": 5.888888888888889,
1658
- "grad_norm": 0.6606886982917786,
1659
- "learning_rate": 4.228571428571429e-05,
1660
- "loss": 0.3177,
1661
- "step": 212
1662
- },
1663
- {
1664
- "epoch": 5.916666666666667,
1665
- "grad_norm": 0.7617785930633545,
1666
- "learning_rate": 4.2e-05,
1667
- "loss": 0.317,
1668
- "step": 213
1669
- },
1670
- {
1671
- "epoch": 5.944444444444445,
1672
- "grad_norm": 0.6381199955940247,
1673
- "learning_rate": 4.1714285714285714e-05,
1674
- "loss": 0.313,
1675
- "step": 214
1676
- },
1677
- {
1678
- "epoch": 5.972222222222222,
1679
- "grad_norm": 0.6644593477249146,
1680
- "learning_rate": 4.1428571428571437e-05,
1681
- "loss": 0.3184,
1682
- "step": 215
1683
- },
1684
- {
1685
- "epoch": 6.0,
1686
- "grad_norm": 0.9742204546928406,
1687
- "learning_rate": 4.1142857142857146e-05,
1688
- "loss": 0.3103,
1689
- "step": 216
1690
- },
1691
- {
1692
- "epoch": 6.027777777777778,
1693
- "grad_norm": 0.6401204466819763,
1694
- "learning_rate": 4.085714285714286e-05,
1695
- "loss": 0.311,
1696
- "step": 217
1697
- },
1698
- {
1699
- "epoch": 6.055555555555555,
1700
- "grad_norm": 0.785503089427948,
1701
- "learning_rate": 4.057142857142857e-05,
1702
- "loss": 0.3155,
1703
- "step": 218
1704
- },
1705
- {
1706
- "epoch": 6.083333333333333,
1707
- "grad_norm": 0.5417785048484802,
1708
- "learning_rate": 4.028571428571429e-05,
1709
- "loss": 0.3109,
1710
- "step": 219
1711
- },
1712
- {
1713
- "epoch": 6.111111111111111,
1714
- "grad_norm": 0.6186631321907043,
1715
- "learning_rate": 4e-05,
1716
- "loss": 0.3114,
1717
- "step": 220
1718
- },
1719
- {
1720
- "epoch": 6.111111111111111,
1721
- "eval_loss": 0.32762280106544495,
1722
- "eval_runtime": 0.1058,
1723
- "eval_samples_per_second": 434.862,
1724
- "eval_steps_per_second": 9.454,
1725
- "step": 220
1726
- },
1727
- {
1728
- "epoch": 6.138888888888889,
1729
- "grad_norm": 0.6042884588241577,
1730
- "learning_rate": 3.971428571428571e-05,
1731
- "loss": 0.3099,
1732
- "step": 221
1733
- },
1734
- {
1735
- "epoch": 6.166666666666667,
1736
- "grad_norm": 0.5823580026626587,
1737
- "learning_rate": 3.942857142857143e-05,
1738
- "loss": 0.3104,
1739
- "step": 222
1740
- },
1741
- {
1742
- "epoch": 6.194444444444445,
1743
- "grad_norm": 0.6572258472442627,
1744
- "learning_rate": 3.9142857142857145e-05,
1745
- "loss": 0.3154,
1746
- "step": 223
1747
- },
1748
- {
1749
- "epoch": 6.222222222222222,
1750
- "grad_norm": 0.565834641456604,
1751
- "learning_rate": 3.885714285714286e-05,
1752
- "loss": 0.3121,
1753
- "step": 224
1754
- },
1755
- {
1756
- "epoch": 6.25,
1757
- "grad_norm": 0.7184673547744751,
1758
- "learning_rate": 3.857142857142858e-05,
1759
- "loss": 0.3119,
1760
- "step": 225
1761
- },
1762
- {
1763
- "epoch": 6.277777777777778,
1764
- "grad_norm": 0.7170347571372986,
1765
- "learning_rate": 3.8285714285714286e-05,
1766
- "loss": 0.3158,
1767
- "step": 226
1768
- },
1769
- {
1770
- "epoch": 6.305555555555555,
1771
- "grad_norm": 0.6102560758590698,
1772
- "learning_rate": 3.8e-05,
1773
- "loss": 0.3038,
1774
- "step": 227
1775
- },
1776
- {
1777
- "epoch": 6.333333333333333,
1778
- "grad_norm": 0.7612823843955994,
1779
- "learning_rate": 3.771428571428572e-05,
1780
- "loss": 0.3124,
1781
- "step": 228
1782
- },
1783
- {
1784
- "epoch": 6.361111111111111,
1785
- "grad_norm": 0.6277872920036316,
1786
- "learning_rate": 3.742857142857143e-05,
1787
- "loss": 0.3028,
1788
- "step": 229
1789
- },
1790
- {
1791
- "epoch": 6.388888888888889,
1792
- "grad_norm": 0.7007192373275757,
1793
- "learning_rate": 3.7142857142857143e-05,
1794
- "loss": 0.3202,
1795
- "step": 230
1796
- },
1797
- {
1798
- "epoch": 6.388888888888889,
1799
- "eval_loss": 0.33139991760253906,
1800
- "eval_runtime": 0.1062,
1801
- "eval_samples_per_second": 433.311,
1802
- "eval_steps_per_second": 9.42,
1803
- "step": 230
1804
- },
1805
- {
1806
- "epoch": 6.416666666666667,
1807
- "grad_norm": 0.6396629810333252,
1808
- "learning_rate": 3.685714285714286e-05,
1809
- "loss": 0.313,
1810
- "step": 231
1811
- },
1812
- {
1813
- "epoch": 6.444444444444445,
1814
- "grad_norm": 0.5031012892723083,
1815
- "learning_rate": 3.6571428571428576e-05,
1816
- "loss": 0.3116,
1817
- "step": 232
1818
- },
1819
- {
1820
- "epoch": 6.472222222222222,
1821
- "grad_norm": 0.7323219776153564,
1822
- "learning_rate": 3.628571428571429e-05,
1823
- "loss": 0.3172,
1824
- "step": 233
1825
- },
1826
- {
1827
- "epoch": 6.5,
1828
- "grad_norm": 0.9094661474227905,
1829
- "learning_rate": 3.6e-05,
1830
- "loss": 0.3103,
1831
- "step": 234
1832
- },
1833
- {
1834
- "epoch": 6.527777777777778,
1835
- "grad_norm": 0.5560885667800903,
1836
- "learning_rate": 3.571428571428572e-05,
1837
- "loss": 0.3056,
1838
- "step": 235
1839
- },
1840
- {
1841
- "epoch": 6.555555555555555,
1842
- "grad_norm": 1.0145907402038574,
1843
- "learning_rate": 3.5428571428571426e-05,
1844
- "loss": 0.3096,
1845
- "step": 236
1846
- },
1847
- {
1848
- "epoch": 6.583333333333333,
1849
- "grad_norm": 0.8287002444267273,
1850
- "learning_rate": 3.514285714285714e-05,
1851
- "loss": 0.3049,
1852
- "step": 237
1853
- },
1854
- {
1855
- "epoch": 6.611111111111111,
1856
- "grad_norm": 0.5207920074462891,
1857
- "learning_rate": 3.485714285714286e-05,
1858
- "loss": 0.3047,
1859
- "step": 238
1860
- },
1861
- {
1862
- "epoch": 6.638888888888889,
1863
- "grad_norm": 1.065272331237793,
1864
- "learning_rate": 3.4571428571428574e-05,
1865
- "loss": 0.3156,
1866
- "step": 239
1867
- },
1868
- {
1869
- "epoch": 6.666666666666667,
1870
- "grad_norm": 0.6712301969528198,
1871
- "learning_rate": 3.428571428571429e-05,
1872
- "loss": 0.3055,
1873
- "step": 240
1874
- },
1875
- {
1876
- "epoch": 6.666666666666667,
1877
- "eval_loss": 0.3387901484966278,
1878
- "eval_runtime": 0.1053,
1879
- "eval_samples_per_second": 436.76,
1880
- "eval_steps_per_second": 9.495,
1881
- "step": 240
1882
- },
1883
- {
1884
- "epoch": 6.694444444444445,
1885
- "grad_norm": 0.9477025866508484,
1886
- "learning_rate": 3.4000000000000007e-05,
1887
- "loss": 0.3164,
1888
- "step": 241
1889
- },
1890
- {
1891
- "epoch": 6.722222222222222,
1892
- "grad_norm": 1.179295301437378,
1893
- "learning_rate": 3.3714285714285716e-05,
1894
- "loss": 0.3263,
1895
- "step": 242
1896
- },
1897
- {
1898
- "epoch": 6.75,
1899
- "grad_norm": 0.6969395875930786,
1900
- "learning_rate": 3.342857142857143e-05,
1901
- "loss": 0.3136,
1902
- "step": 243
1903
- },
1904
- {
1905
- "epoch": 6.777777777777778,
1906
- "grad_norm": 0.7752478718757629,
1907
- "learning_rate": 3.314285714285714e-05,
1908
- "loss": 0.3143,
1909
- "step": 244
1910
- },
1911
- {
1912
- "epoch": 6.805555555555555,
1913
- "grad_norm": 0.7640174031257629,
1914
- "learning_rate": 3.285714285714286e-05,
1915
- "loss": 0.3143,
1916
- "step": 245
1917
- },
1918
- {
1919
- "epoch": 6.833333333333333,
1920
- "grad_norm": 0.9904161691665649,
1921
- "learning_rate": 3.257142857142857e-05,
1922
- "loss": 0.3093,
1923
- "step": 246
1924
- },
1925
- {
1926
- "epoch": 6.861111111111111,
1927
- "grad_norm": 0.7707158923149109,
1928
- "learning_rate": 3.228571428571428e-05,
1929
- "loss": 0.3185,
1930
- "step": 247
1931
- },
1932
- {
1933
- "epoch": 6.888888888888889,
1934
- "grad_norm": 0.8660508394241333,
1935
- "learning_rate": 3.2000000000000005e-05,
1936
- "loss": 0.3122,
1937
- "step": 248
1938
- },
1939
- {
1940
- "epoch": 6.916666666666667,
1941
- "grad_norm": 0.7438889741897583,
1942
- "learning_rate": 3.1714285714285715e-05,
1943
- "loss": 0.3148,
1944
- "step": 249
1945
- },
1946
- {
1947
- "epoch": 6.944444444444445,
1948
- "grad_norm": 0.5746331810951233,
1949
- "learning_rate": 3.142857142857143e-05,
1950
- "loss": 0.3137,
1951
- "step": 250
1952
- },
1953
- {
1954
- "epoch": 6.944444444444445,
1955
- "eval_loss": 0.32808226346969604,
1956
- "eval_runtime": 0.1059,
1957
- "eval_samples_per_second": 434.438,
1958
- "eval_steps_per_second": 9.444,
1959
- "step": 250
1960
- },
1961
- {
1962
- "epoch": 6.972222222222222,
1963
- "grad_norm": 0.7158817052841187,
1964
- "learning_rate": 3.114285714285715e-05,
1965
- "loss": 0.3125,
1966
- "step": 251
1967
- },
1968
- {
1969
- "epoch": 7.0,
1970
- "grad_norm": 0.8010092973709106,
1971
- "learning_rate": 3.0857142857142856e-05,
1972
- "loss": 0.3094,
1973
- "step": 252
1974
- },
1975
- {
1976
- "epoch": 7.027777777777778,
1977
- "grad_norm": 0.7418866157531738,
1978
- "learning_rate": 3.057142857142857e-05,
1979
- "loss": 0.3067,
1980
- "step": 253
1981
- },
1982
- {
1983
- "epoch": 7.055555555555555,
1984
- "grad_norm": 0.6731083989143372,
1985
- "learning_rate": 3.0285714285714288e-05,
1986
- "loss": 0.3068,
1987
- "step": 254
1988
- },
1989
- {
1990
- "epoch": 7.083333333333333,
1991
- "grad_norm": 0.6405408382415771,
1992
- "learning_rate": 3e-05,
1993
- "loss": 0.3075,
1994
- "step": 255
1995
- },
1996
- {
1997
- "epoch": 7.111111111111111,
1998
- "grad_norm": 0.6403458118438721,
1999
- "learning_rate": 2.9714285714285717e-05,
2000
- "loss": 0.3096,
2001
- "step": 256
2002
- },
2003
- {
2004
- "epoch": 7.138888888888889,
2005
- "grad_norm": 0.7583682537078857,
2006
- "learning_rate": 2.9428571428571426e-05,
2007
- "loss": 0.3103,
2008
- "step": 257
2009
- },
2010
- {
2011
- "epoch": 7.166666666666667,
2012
- "grad_norm": 0.8137710094451904,
2013
- "learning_rate": 2.9142857142857146e-05,
2014
- "loss": 0.3064,
2015
- "step": 258
2016
- },
2017
- {
2018
- "epoch": 7.194444444444445,
2019
- "grad_norm": 0.7179896235466003,
2020
- "learning_rate": 2.885714285714286e-05,
2021
- "loss": 0.3067,
2022
- "step": 259
2023
- },
2024
- {
2025
- "epoch": 7.222222222222222,
2026
- "grad_norm": 0.9344987273216248,
2027
- "learning_rate": 2.857142857142857e-05,
2028
- "loss": 0.3081,
2029
- "step": 260
2030
- },
2031
- {
2032
- "epoch": 7.222222222222222,
2033
- "eval_loss": 0.33135512471199036,
2034
- "eval_runtime": 0.1054,
2035
- "eval_samples_per_second": 436.494,
2036
- "eval_steps_per_second": 9.489,
2037
- "step": 260
2038
- },
2039
- {
2040
- "epoch": 7.25,
2041
- "grad_norm": 0.7846106886863708,
2042
- "learning_rate": 2.8285714285714287e-05,
2043
- "loss": 0.3024,
2044
- "step": 261
2045
- },
2046
- {
2047
- "epoch": 7.277777777777778,
2048
- "grad_norm": 0.5884716510772705,
2049
- "learning_rate": 2.8000000000000003e-05,
2050
- "loss": 0.3062,
2051
- "step": 262
2052
- },
2053
- {
2054
- "epoch": 7.305555555555555,
2055
- "grad_norm": 0.7277001738548279,
2056
- "learning_rate": 2.7714285714285716e-05,
2057
- "loss": 0.3017,
2058
- "step": 263
2059
- },
2060
- {
2061
- "epoch": 7.333333333333333,
2062
- "grad_norm": 0.6671104431152344,
2063
- "learning_rate": 2.742857142857143e-05,
2064
- "loss": 0.3109,
2065
- "step": 264
2066
- },
2067
- {
2068
- "epoch": 7.361111111111111,
2069
- "grad_norm": 0.6468051671981812,
2070
- "learning_rate": 2.714285714285714e-05,
2071
- "loss": 0.3051,
2072
- "step": 265
2073
- },
2074
- {
2075
- "epoch": 7.388888888888889,
2076
- "grad_norm": 0.7413132190704346,
2077
- "learning_rate": 2.6857142857142857e-05,
2078
- "loss": 0.3059,
2079
- "step": 266
2080
- },
2081
- {
2082
- "epoch": 7.416666666666667,
2083
- "grad_norm": 0.8842555284500122,
2084
- "learning_rate": 2.6571428571428576e-05,
2085
- "loss": 0.3108,
2086
- "step": 267
2087
- },
2088
- {
2089
- "epoch": 7.444444444444445,
2090
- "grad_norm": 0.7701683044433594,
2091
- "learning_rate": 2.6285714285714286e-05,
2092
- "loss": 0.31,
2093
- "step": 268
2094
- },
2095
- {
2096
- "epoch": 7.472222222222222,
2097
- "grad_norm": 0.6261523962020874,
2098
- "learning_rate": 2.6000000000000002e-05,
2099
- "loss": 0.2966,
2100
- "step": 269
2101
- },
2102
- {
2103
- "epoch": 7.5,
2104
- "grad_norm": 0.6180337071418762,
2105
- "learning_rate": 2.5714285714285714e-05,
2106
- "loss": 0.3063,
2107
- "step": 270
2108
- },
2109
- {
2110
- "epoch": 7.5,
2111
- "eval_loss": 0.33175382018089294,
2112
- "eval_runtime": 0.1062,
2113
- "eval_samples_per_second": 433.162,
2114
- "eval_steps_per_second": 9.417,
2115
- "step": 270
2116
- },
2117
- {
2118
- "epoch": 7.527777777777778,
2119
- "grad_norm": 0.910743236541748,
2120
- "learning_rate": 2.542857142857143e-05,
2121
- "loss": 0.3058,
2122
- "step": 271
2123
- },
2124
- {
2125
- "epoch": 7.555555555555555,
2126
- "grad_norm": 0.8947623372077942,
2127
- "learning_rate": 2.5142857142857147e-05,
2128
- "loss": 0.3092,
2129
- "step": 272
2130
- },
2131
- {
2132
- "epoch": 7.583333333333333,
2133
- "grad_norm": 0.8466401100158691,
2134
- "learning_rate": 2.485714285714286e-05,
2135
- "loss": 0.3069,
2136
- "step": 273
2137
- },
2138
- {
2139
- "epoch": 7.611111111111111,
2140
- "grad_norm": 0.7808226943016052,
2141
- "learning_rate": 2.4571428571428572e-05,
2142
- "loss": 0.316,
2143
- "step": 274
2144
- },
2145
- {
2146
- "epoch": 7.638888888888889,
2147
- "grad_norm": 0.6704230308532715,
2148
- "learning_rate": 2.4285714285714288e-05,
2149
- "loss": 0.2993,
2150
- "step": 275
2151
- },
2152
- {
2153
- "epoch": 7.666666666666667,
2154
- "grad_norm": 0.7090719938278198,
2155
- "learning_rate": 2.4e-05,
2156
- "loss": 0.3034,
2157
- "step": 276
2158
- },
2159
- {
2160
- "epoch": 7.694444444444445,
2161
- "grad_norm": 0.7552341818809509,
2162
- "learning_rate": 2.3714285714285717e-05,
2163
- "loss": 0.3077,
2164
- "step": 277
2165
- },
2166
- {
2167
- "epoch": 7.722222222222222,
2168
- "grad_norm": 0.7747870683670044,
2169
- "learning_rate": 2.342857142857143e-05,
2170
- "loss": 0.3047,
2171
- "step": 278
2172
- },
2173
- {
2174
- "epoch": 7.75,
2175
- "grad_norm": 1.1127567291259766,
2176
- "learning_rate": 2.3142857142857145e-05,
2177
- "loss": 0.3105,
2178
- "step": 279
2179
- },
2180
- {
2181
- "epoch": 7.777777777777778,
2182
- "grad_norm": 0.7083399891853333,
2183
- "learning_rate": 2.2857142857142858e-05,
2184
- "loss": 0.2997,
2185
- "step": 280
2186
- },
2187
- {
2188
- "epoch": 7.777777777777778,
2189
- "eval_loss": 0.3296756446361542,
2190
- "eval_runtime": 0.1057,
2191
- "eval_samples_per_second": 435.219,
2192
- "eval_steps_per_second": 9.461,
2193
- "step": 280
2194
- },
2195
- {
2196
- "epoch": 7.805555555555555,
2197
- "grad_norm": 0.5560160279273987,
2198
- "learning_rate": 2.257142857142857e-05,
2199
- "loss": 0.3035,
2200
- "step": 281
2201
- },
2202
- {
2203
- "epoch": 7.833333333333333,
2204
- "grad_norm": 0.7277750372886658,
2205
- "learning_rate": 2.2285714285714287e-05,
2206
- "loss": 0.3066,
2207
- "step": 282
2208
- },
2209
- {
2210
- "epoch": 7.861111111111111,
2211
- "grad_norm": 0.683189868927002,
2212
- "learning_rate": 2.2000000000000003e-05,
2213
- "loss": 0.3049,
2214
- "step": 283
2215
- },
2216
- {
2217
- "epoch": 7.888888888888889,
2218
- "grad_norm": 0.7173867225646973,
2219
- "learning_rate": 2.1714285714285715e-05,
2220
- "loss": 0.3098,
2221
- "step": 284
2222
- },
2223
- {
2224
- "epoch": 7.916666666666667,
2225
- "grad_norm": 0.698379397392273,
2226
- "learning_rate": 2.1428571428571428e-05,
2227
- "loss": 0.3081,
2228
- "step": 285
2229
- },
2230
- {
2231
- "epoch": 7.944444444444445,
2232
- "grad_norm": 0.5789396166801453,
2233
- "learning_rate": 2.1142857142857144e-05,
2234
- "loss": 0.3109,
2235
- "step": 286
2236
- },
2237
- {
2238
- "epoch": 7.972222222222222,
2239
- "grad_norm": 0.8007158637046814,
2240
- "learning_rate": 2.0857142857142857e-05,
2241
- "loss": 0.3149,
2242
- "step": 287
2243
- },
2244
- {
2245
- "epoch": 8.0,
2246
- "grad_norm": 0.8773916959762573,
2247
- "learning_rate": 2.0571428571428573e-05,
2248
- "loss": 0.3087,
2249
- "step": 288
2250
- },
2251
- {
2252
- "epoch": 8.027777777777779,
2253
- "grad_norm": 0.7212807536125183,
2254
- "learning_rate": 2.0285714285714286e-05,
2255
- "loss": 0.2989,
2256
- "step": 289
2257
- },
2258
- {
2259
- "epoch": 8.055555555555555,
2260
- "grad_norm": 0.6096076369285583,
2261
- "learning_rate": 2e-05,
2262
- "loss": 0.3069,
2263
- "step": 290
2264
- },
2265
- {
2266
- "epoch": 8.055555555555555,
2267
- "eval_loss": 0.33069342374801636,
2268
- "eval_runtime": 0.105,
2269
- "eval_samples_per_second": 438.062,
2270
- "eval_steps_per_second": 9.523,
2271
- "step": 290
2272
- },
2273
- {
2274
- "epoch": 8.083333333333334,
2275
- "grad_norm": 0.5173125267028809,
2276
- "learning_rate": 1.9714285714285714e-05,
2277
- "loss": 0.295,
2278
- "step": 291
2279
- },
2280
- {
2281
- "epoch": 8.11111111111111,
2282
- "grad_norm": 0.6913369297981262,
2283
- "learning_rate": 1.942857142857143e-05,
2284
- "loss": 0.3014,
2285
- "step": 292
2286
- },
2287
- {
2288
- "epoch": 8.13888888888889,
2289
- "grad_norm": 0.7195921540260315,
2290
- "learning_rate": 1.9142857142857143e-05,
2291
- "loss": 0.3037,
2292
- "step": 293
2293
- },
2294
- {
2295
- "epoch": 8.166666666666666,
2296
- "grad_norm": 0.6366473436355591,
2297
- "learning_rate": 1.885714285714286e-05,
2298
- "loss": 0.3031,
2299
- "step": 294
2300
- },
2301
- {
2302
- "epoch": 8.194444444444445,
2303
- "grad_norm": 0.5457173585891724,
2304
- "learning_rate": 1.8571428571428572e-05,
2305
- "loss": 0.2957,
2306
- "step": 295
2307
- },
2308
- {
2309
- "epoch": 8.222222222222221,
2310
- "grad_norm": 0.6149912476539612,
2311
- "learning_rate": 1.8285714285714288e-05,
2312
- "loss": 0.2997,
2313
- "step": 296
2314
- },
2315
- {
2316
- "epoch": 8.25,
2317
- "grad_norm": 0.5352884531021118,
2318
- "learning_rate": 1.8e-05,
2319
- "loss": 0.3048,
2320
- "step": 297
2321
- },
2322
- {
2323
- "epoch": 8.277777777777779,
2324
- "grad_norm": 0.6278409361839294,
2325
- "learning_rate": 1.7714285714285713e-05,
2326
- "loss": 0.308,
2327
- "step": 298
2328
- },
2329
- {
2330
- "epoch": 8.305555555555555,
2331
- "grad_norm": 0.5881698727607727,
2332
- "learning_rate": 1.742857142857143e-05,
2333
- "loss": 0.3005,
2334
- "step": 299
2335
- },
2336
- {
2337
- "epoch": 8.333333333333334,
2338
- "grad_norm": 0.6125136613845825,
2339
- "learning_rate": 1.7142857142857145e-05,
2340
- "loss": 0.302,
2341
- "step": 300
2342
- },
2343
- {
2344
- "epoch": 8.333333333333334,
2345
- "eval_loss": 0.3336547017097473,
2346
- "eval_runtime": 0.1055,
2347
- "eval_samples_per_second": 436.2,
2348
- "eval_steps_per_second": 9.483,
2349
- "step": 300
2350
- },
2351
- {
2352
- "epoch": 8.36111111111111,
2353
- "grad_norm": 0.6722866892814636,
2354
- "learning_rate": 1.6857142857142858e-05,
2355
- "loss": 0.3012,
2356
- "step": 301
2357
- },
2358
- {
2359
- "epoch": 8.38888888888889,
2360
- "grad_norm": 0.6827422976493835,
2361
- "learning_rate": 1.657142857142857e-05,
2362
- "loss": 0.297,
2363
- "step": 302
2364
- },
2365
- {
2366
- "epoch": 8.416666666666666,
2367
- "grad_norm": 0.7612675428390503,
2368
- "learning_rate": 1.6285714285714287e-05,
2369
- "loss": 0.2977,
2370
- "step": 303
2371
- },
2372
- {
2373
- "epoch": 8.444444444444445,
2374
- "grad_norm": 0.5952971577644348,
2375
- "learning_rate": 1.6000000000000003e-05,
2376
- "loss": 0.3009,
2377
- "step": 304
2378
- },
2379
- {
2380
- "epoch": 8.472222222222221,
2381
- "grad_norm": 0.8323265314102173,
2382
- "learning_rate": 1.5714285714285715e-05,
2383
- "loss": 0.3043,
2384
- "step": 305
2385
- },
2386
- {
2387
- "epoch": 8.5,
2388
- "grad_norm": 0.8321357369422913,
2389
- "learning_rate": 1.5428571428571428e-05,
2390
- "loss": 0.2956,
2391
- "step": 306
2392
- },
2393
- {
2394
- "epoch": 8.527777777777779,
2395
- "grad_norm": 0.6457182168960571,
2396
- "learning_rate": 1.5142857142857144e-05,
2397
- "loss": 0.3029,
2398
- "step": 307
2399
- },
2400
- {
2401
- "epoch": 8.555555555555555,
2402
- "grad_norm": 0.5753086805343628,
2403
- "learning_rate": 1.4857142857142858e-05,
2404
- "loss": 0.2972,
2405
- "step": 308
2406
- },
2407
- {
2408
- "epoch": 8.583333333333334,
2409
- "grad_norm": 0.8767444491386414,
2410
- "learning_rate": 1.4571428571428573e-05,
2411
- "loss": 0.2966,
2412
- "step": 309
2413
- },
2414
- {
2415
- "epoch": 8.61111111111111,
2416
- "grad_norm": 0.929669201374054,
2417
- "learning_rate": 1.4285714285714285e-05,
2418
- "loss": 0.3049,
2419
- "step": 310
2420
- },
2421
- {
2422
- "epoch": 8.61111111111111,
2423
- "eval_loss": 0.3303754925727844,
2424
- "eval_runtime": 0.1057,
2425
- "eval_samples_per_second": 435.106,
2426
- "eval_steps_per_second": 9.459,
2427
- "step": 310
2428
- },
2429
- {
2430
- "epoch": 8.63888888888889,
2431
- "grad_norm": 0.7576697468757629,
2432
- "learning_rate": 1.4000000000000001e-05,
2433
- "loss": 0.2989,
2434
- "step": 311
2435
- },
2436
- {
2437
- "epoch": 8.666666666666666,
2438
- "grad_norm": 0.6402246952056885,
2439
- "learning_rate": 1.3714285714285716e-05,
2440
- "loss": 0.3051,
2441
- "step": 312
2442
- },
2443
- {
2444
- "epoch": 8.694444444444445,
2445
- "grad_norm": 0.5665248036384583,
2446
- "learning_rate": 1.3428571428571429e-05,
2447
- "loss": 0.2974,
2448
- "step": 313
2449
- },
2450
- {
2451
- "epoch": 8.722222222222221,
2452
- "grad_norm": 0.9747456312179565,
2453
- "learning_rate": 1.3142857142857143e-05,
2454
- "loss": 0.3061,
2455
- "step": 314
2456
- },
2457
- {
2458
- "epoch": 8.75,
2459
- "grad_norm": 0.657123863697052,
2460
- "learning_rate": 1.2857142857142857e-05,
2461
- "loss": 0.3012,
2462
- "step": 315
2463
- },
2464
- {
2465
- "epoch": 8.777777777777779,
2466
- "grad_norm": 0.7186892032623291,
2467
- "learning_rate": 1.2571428571428573e-05,
2468
- "loss": 0.2995,
2469
- "step": 316
2470
- },
2471
- {
2472
- "epoch": 8.805555555555555,
2473
- "grad_norm": 0.6889364123344421,
2474
- "learning_rate": 1.2285714285714286e-05,
2475
- "loss": 0.3026,
2476
- "step": 317
2477
- },
2478
- {
2479
- "epoch": 8.833333333333334,
2480
- "grad_norm": 0.6299145817756653,
2481
- "learning_rate": 1.2e-05,
2482
- "loss": 0.3009,
2483
- "step": 318
2484
- },
2485
- {
2486
- "epoch": 8.86111111111111,
2487
- "grad_norm": 0.7328559756278992,
2488
- "learning_rate": 1.1714285714285715e-05,
2489
- "loss": 0.3002,
2490
- "step": 319
2491
- },
2492
- {
2493
- "epoch": 8.88888888888889,
2494
- "grad_norm": 0.6111913919448853,
2495
- "learning_rate": 1.1428571428571429e-05,
2496
- "loss": 0.2953,
2497
- "step": 320
2498
- },
2499
- {
2500
- "epoch": 8.88888888888889,
2501
- "eval_loss": 0.3253832757472992,
2502
- "eval_runtime": 0.106,
2503
- "eval_samples_per_second": 434.082,
2504
- "eval_steps_per_second": 9.437,
2505
- "step": 320
2506
- },
2507
- {
2508
- "epoch": 8.916666666666666,
2509
- "grad_norm": 0.6739629507064819,
2510
- "learning_rate": 1.1142857142857143e-05,
2511
- "loss": 0.3023,
2512
- "step": 321
2513
- },
2514
- {
2515
- "epoch": 8.944444444444445,
2516
- "grad_norm": 0.6967675685882568,
2517
- "learning_rate": 1.0857142857142858e-05,
2518
- "loss": 0.2978,
2519
- "step": 322
2520
- },
2521
- {
2522
- "epoch": 8.972222222222221,
2523
- "grad_norm": 0.702989935874939,
2524
- "learning_rate": 1.0571428571428572e-05,
2525
- "loss": 0.3043,
2526
- "step": 323
2527
- },
2528
- {
2529
- "epoch": 9.0,
2530
- "grad_norm": 1.156525731086731,
2531
- "learning_rate": 1.0285714285714286e-05,
2532
- "loss": 0.3034,
2533
- "step": 324
2534
- },
2535
- {
2536
- "epoch": 9.027777777777779,
2537
- "grad_norm": 0.564460277557373,
2538
- "learning_rate": 1e-05,
2539
- "loss": 0.2989,
2540
- "step": 325
2541
- },
2542
- {
2543
- "epoch": 9.055555555555555,
2544
- "grad_norm": 0.5435044169425964,
2545
- "learning_rate": 9.714285714285715e-06,
2546
- "loss": 0.2955,
2547
- "step": 326
2548
- },
2549
- {
2550
- "epoch": 9.083333333333334,
2551
- "grad_norm": 0.511762797832489,
2552
- "learning_rate": 9.42857142857143e-06,
2553
- "loss": 0.2986,
2554
- "step": 327
2555
- },
2556
- {
2557
- "epoch": 9.11111111111111,
2558
- "grad_norm": 0.6208844780921936,
2559
- "learning_rate": 9.142857142857144e-06,
2560
- "loss": 0.2932,
2561
- "step": 328
2562
- },
2563
- {
2564
- "epoch": 9.13888888888889,
2565
- "grad_norm": 0.5209355354309082,
2566
- "learning_rate": 8.857142857142857e-06,
2567
- "loss": 0.2932,
2568
- "step": 329
2569
- },
2570
- {
2571
- "epoch": 9.166666666666666,
2572
- "grad_norm": 0.5852081775665283,
2573
- "learning_rate": 8.571428571428573e-06,
2574
- "loss": 0.302,
2575
- "step": 330
2576
- },
2577
- {
2578
- "epoch": 9.166666666666666,
2579
- "eval_loss": 0.32703322172164917,
2580
- "eval_runtime": 0.1055,
2581
- "eval_samples_per_second": 435.944,
2582
- "eval_steps_per_second": 9.477,
2583
- "step": 330
2584
- },
2585
- {
2586
- "epoch": 9.194444444444445,
2587
- "grad_norm": 0.6613155603408813,
2588
- "learning_rate": 8.285714285714285e-06,
2589
- "loss": 0.2973,
2590
- "step": 331
2591
- },
2592
- {
2593
- "epoch": 9.222222222222221,
2594
- "grad_norm": 0.6458805203437805,
2595
- "learning_rate": 8.000000000000001e-06,
2596
- "loss": 0.296,
2597
- "step": 332
2598
- },
2599
- {
2600
- "epoch": 9.25,
2601
- "grad_norm": 0.5602886080741882,
2602
- "learning_rate": 7.714285714285714e-06,
2603
- "loss": 0.2998,
2604
- "step": 333
2605
- },
2606
- {
2607
- "epoch": 9.277777777777779,
2608
- "grad_norm": 0.5723817348480225,
2609
- "learning_rate": 7.428571428571429e-06,
2610
- "loss": 0.2898,
2611
- "step": 334
2612
- },
2613
- {
2614
- "epoch": 9.305555555555555,
2615
- "grad_norm": 0.6257355213165283,
2616
- "learning_rate": 7.142857142857143e-06,
2617
- "loss": 0.2935,
2618
- "step": 335
2619
- },
2620
- {
2621
- "epoch": 9.333333333333334,
2622
- "grad_norm": 0.6624913811683655,
2623
- "learning_rate": 6.857142857142858e-06,
2624
- "loss": 0.2909,
2625
- "step": 336
2626
- },
2627
- {
2628
- "epoch": 9.36111111111111,
2629
- "grad_norm": 0.5716632604598999,
2630
- "learning_rate": 6.5714285714285714e-06,
2631
- "loss": 0.3001,
2632
- "step": 337
2633
- },
2634
- {
2635
- "epoch": 9.38888888888889,
2636
- "grad_norm": 0.6996496319770813,
2637
- "learning_rate": 6.285714285714287e-06,
2638
- "loss": 0.2964,
2639
- "step": 338
2640
- },
2641
- {
2642
- "epoch": 9.416666666666666,
2643
- "grad_norm": 0.7235862612724304,
2644
- "learning_rate": 6e-06,
2645
- "loss": 0.2927,
2646
- "step": 339
2647
- },
2648
- {
2649
- "epoch": 9.444444444444445,
2650
- "grad_norm": 0.6455687284469604,
2651
- "learning_rate": 5.7142857142857145e-06,
2652
- "loss": 0.2956,
2653
- "step": 340
2654
- },
2655
- {
2656
- "epoch": 9.444444444444445,
2657
- "eval_loss": 0.32588478922843933,
2658
- "eval_runtime": 0.1057,
2659
- "eval_samples_per_second": 435.067,
2660
- "eval_steps_per_second": 9.458,
2661
- "step": 340
2662
- },
2663
- {
2664
- "epoch": 9.472222222222221,
2665
- "grad_norm": 0.5984405279159546,
2666
- "learning_rate": 5.428571428571429e-06,
2667
- "loss": 0.2959,
2668
- "step": 341
2669
- },
2670
- {
2671
- "epoch": 9.5,
2672
- "grad_norm": 0.6240008473396301,
2673
- "learning_rate": 5.142857142857143e-06,
2674
- "loss": 0.2936,
2675
- "step": 342
2676
- },
2677
- {
2678
- "epoch": 9.527777777777779,
2679
- "grad_norm": 0.6871291399002075,
2680
- "learning_rate": 4.857142857142858e-06,
2681
- "loss": 0.2987,
2682
- "step": 343
2683
- },
2684
- {
2685
- "epoch": 9.555555555555555,
2686
- "grad_norm": 0.6369628310203552,
2687
- "learning_rate": 4.571428571428572e-06,
2688
- "loss": 0.2941,
2689
- "step": 344
2690
- },
2691
- {
2692
- "epoch": 9.583333333333334,
2693
- "grad_norm": 0.6651211977005005,
2694
- "learning_rate": 4.285714285714286e-06,
2695
- "loss": 0.2959,
2696
- "step": 345
2697
- },
2698
- {
2699
- "epoch": 9.61111111111111,
2700
- "grad_norm": 0.7005758285522461,
2701
- "learning_rate": 4.000000000000001e-06,
2702
- "loss": 0.2991,
2703
- "step": 346
2704
- },
2705
- {
2706
- "epoch": 9.63888888888889,
2707
- "grad_norm": 0.5685088634490967,
2708
- "learning_rate": 3.7142857142857146e-06,
2709
- "loss": 0.2936,
2710
- "step": 347
2711
- },
2712
- {
2713
- "epoch": 9.666666666666666,
2714
- "grad_norm": 0.6322896480560303,
2715
- "learning_rate": 3.428571428571429e-06,
2716
- "loss": 0.2947,
2717
- "step": 348
2718
- },
2719
- {
2720
- "epoch": 9.694444444444445,
2721
- "grad_norm": 0.6149244904518127,
2722
- "learning_rate": 3.1428571428571433e-06,
2723
- "loss": 0.2951,
2724
- "step": 349
2725
- },
2726
- {
2727
- "epoch": 9.722222222222221,
2728
- "grad_norm": 0.685043215751648,
2729
- "learning_rate": 2.8571428571428573e-06,
2730
- "loss": 0.2975,
2731
- "step": 350
2732
- },
2733
- {
2734
- "epoch": 9.722222222222221,
2735
- "eval_loss": 0.3277616798877716,
2736
- "eval_runtime": 0.1059,
2737
- "eval_samples_per_second": 434.21,
2738
- "eval_steps_per_second": 9.439,
2739
- "step": 350
2740
- },
2741
- {
2742
- "epoch": 9.75,
2743
- "grad_norm": 0.8039355874061584,
2744
- "learning_rate": 2.5714285714285716e-06,
2745
- "loss": 0.2951,
2746
- "step": 351
2747
- },
2748
- {
2749
- "epoch": 9.777777777777779,
2750
- "grad_norm": 0.6388571262359619,
2751
- "learning_rate": 2.285714285714286e-06,
2752
- "loss": 0.2928,
2753
- "step": 352
2754
- },
2755
- {
2756
- "epoch": 9.805555555555555,
2757
- "grad_norm": 0.5934285521507263,
2758
- "learning_rate": 2.0000000000000003e-06,
2759
- "loss": 0.2934,
2760
- "step": 353
2761
- },
2762
- {
2763
- "epoch": 9.833333333333334,
2764
- "grad_norm": 0.5320731401443481,
2765
- "learning_rate": 1.7142857142857145e-06,
2766
- "loss": 0.2952,
2767
- "step": 354
2768
- },
2769
- {
2770
- "epoch": 9.86111111111111,
2771
- "grad_norm": 0.6137614846229553,
2772
- "learning_rate": 1.4285714285714286e-06,
2773
- "loss": 0.2932,
2774
- "step": 355
2775
- },
2776
- {
2777
- "epoch": 9.88888888888889,
2778
- "grad_norm": 0.8172494769096375,
2779
- "learning_rate": 1.142857142857143e-06,
2780
- "loss": 0.2944,
2781
- "step": 356
2782
- },
2783
- {
2784
- "epoch": 9.916666666666666,
2785
- "grad_norm": 0.6931514739990234,
2786
- "learning_rate": 8.571428571428572e-07,
2787
- "loss": 0.2917,
2788
- "step": 357
2789
- },
2790
- {
2791
- "epoch": 9.944444444444445,
2792
- "grad_norm": 0.8408763408660889,
2793
- "learning_rate": 5.714285714285715e-07,
2794
- "loss": 0.2952,
2795
- "step": 358
2796
- },
2797
- {
2798
- "epoch": 9.972222222222221,
2799
- "grad_norm": 0.6687312126159668,
2800
- "learning_rate": 2.8571428571428575e-07,
2801
- "loss": 0.2948,
2802
- "step": 359
2803
- },
2804
- {
2805
- "epoch": 10.0,
2806
- "grad_norm": 0.8545575737953186,
2807
- "learning_rate": 0.0,
2808
- "loss": 0.2944,
2809
- "step": 360
2810
- },
2811
- {
2812
- "epoch": 10.0,
2813
- "eval_loss": 0.3285914361476898,
2814
- "eval_runtime": 0.1058,
2815
- "eval_samples_per_second": 434.729,
2816
- "eval_steps_per_second": 9.451,
2817
- "step": 360
2818
- }
2819
- ],
2820
- "logging_steps": 1,
2821
- "max_steps": 360,
2822
- "num_input_tokens_seen": 0,
2823
- "num_train_epochs": 10,
2824
- "save_steps": 5000,
2825
- "stateful_callbacks": {
2826
- "TrainerControl": {
2827
- "args": {
2828
- "should_epoch_stop": false,
2829
- "should_evaluate": false,
2830
- "should_log": false,
2831
- "should_save": true,
2832
- "should_training_stop": true
2833
- },
2834
- "attributes": {}
2835
- }
2836
- },
2837
- "total_flos": 3831354556001280.0,
2838
- "train_batch_size": 128,
2839
- "trial_name": null,
2840
- "trial_params": null
2841
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/training_args.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e928b416737e3e579d0a564d29d42c7345239d5cd5670a62b4d71260a823a6d5
3
- size 6008
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/checkpoint-360/vocab.json DELETED
The diff for this file is too large to render. See raw diff
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/config.json DELETED
@@ -1,42 +0,0 @@
1
- {
2
- "_name_or_path": "HuggingFaceTB/SmolLM2-135M-Instruct",
3
- "action_dim": 26,
4
- "architectures": [
5
- "LowdimLlamaForCausalLM"
6
- ],
7
- "attention_bias": false,
8
- "attention_dropout": 0.0,
9
- "bos_token_id": 1,
10
- "eos_token_id": 2,
11
- "head_dim": 64,
12
- "hidden_act": "silu",
13
- "hidden_size": 576,
14
- "initializer_range": 0.041666666666666664,
15
- "intermediate_size": 1536,
16
- "is_llama_config": true,
17
- "max_position_embeddings": 8192,
18
- "mlp_bias": false,
19
- "model_type": "llama_lowdim",
20
- "num_attention_heads": 9,
21
- "num_hidden_layers": 30,
22
- "num_key_value_heads": 3,
23
- "obs_dim": 46,
24
- "pad_token_id": 2,
25
- "pretraining_tp": 1,
26
- "rms_norm_eps": 1e-05,
27
- "rope_interleaved": false,
28
- "rope_scaling": null,
29
- "rope_theta": 100000,
30
- "tie_word_embeddings": true,
31
- "torch_dtype": "float32",
32
- "transformers.js_config": {
33
- "kv_cache_dtype": {
34
- "fp16": "float16",
35
- "q4f16": "float16"
36
- }
37
- },
38
- "transformers_version": "4.47.1",
39
- "use_cache": true,
40
- "use_joint_mlp_projector": true,
41
- "vocab_size": 49152
42
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/generation_config.json DELETED
@@ -1,7 +0,0 @@
1
- {
2
- "_from_model_config": true,
3
- "bos_token_id": 1,
4
- "eos_token_id": 2,
5
- "pad_token_id": 2,
6
- "transformers_version": "4.47.1"
7
- }
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/model.safetensors DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:532f9325616dbe87be8846d0b03f80c41a2de5d5a05c7397eb15b3484410ede9
3
- size 539588496
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/normalizer.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b3226fbcd5e7e17c04b2cf20350d5f793927d16481bc217684ce129a4aa170a
3
- size 5666
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/train.log DELETED
@@ -1,11 +0,0 @@
1
- [2026-03-27 16:20:52,880][numexpr.utils][INFO] - Note: NumExpr detected 24 cores but "NUMEXPR_MAX_THREADS" not set, so enforcing safe limit of 8.
2
- [2026-03-27 16:20:52,880][numexpr.utils][INFO] - NumExpr defaulting to 8 threads.
3
- [2026-03-27 16:20:55,182][datasets][INFO] - PyTorch version 2.2.2 available.
4
- [2026-03-27 16:20:55,182][datasets][INFO] - TensorFlow version 2.15.1 available.
5
- [2026-03-27 16:20:55,182][datasets][INFO] - JAX version 0.4.30 available.
6
- [2026-03-27 16:20:59,889][datasets.arrow_dataset][WARNING] - Setting TOKENIZERS_PARALLELISM=false for forked processes.
7
- [2026-03-27 16:21:01,023][datasets.arrow_dataset][WARNING] - Setting TOKENIZERS_PARALLELISM=false for forked processes.
8
- [2026-03-27 16:21:01,919][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -I/home/chyang/miniconda3/envs/llm-bc/include -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -fPIC -c /tmp/tmp7t0nrkuh/test.c -o /tmp/tmp7t0nrkuh/test.o
9
- [2026-03-27 16:21:01,969][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat /tmp/tmp7t0nrkuh/test.o -laio -o /tmp/tmp7t0nrkuh/a.out
10
- [2026-03-27 16:21:02,963][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -I/home/chyang/miniconda3/envs/llm-bc/include -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -fPIC -c /tmp/tmps_s5vyh9/test.c -o /tmp/tmps_s5vyh9/test.o
11
- [2026-03-27 16:21:03,018][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat /tmp/tmps_s5vyh9/test.o -L/usr/local/cuda -L/usr/local/cuda/lib64 -lcufile -o /tmp/tmps_s5vyh9/a.out
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/debug-internal.log DELETED
@@ -1,17 +0,0 @@
1
- {"time":"2026-03-27T16:20:56.144221699+08:00","level":"INFO","msg":"using version","core version":"0.18.6"}
2
- {"time":"2026-03-27T16:20:56.144248456+08:00","level":"INFO","msg":"created symlink","path":"/tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug-core.log"}
3
- {"time":"2026-03-27T16:20:56.25122394+08:00","level":"INFO","msg":"created new stream","id":"nhmfpc2t"}
4
- {"time":"2026-03-27T16:20:56.25128922+08:00","level":"INFO","msg":"stream: started","id":"nhmfpc2t"}
5
- {"time":"2026-03-27T16:20:56.251326647+08:00","level":"INFO","msg":"sender: started","stream_id":"nhmfpc2t"}
6
- {"time":"2026-03-27T16:20:56.251319685+08:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"nhmfpc2t"}}
7
- {"time":"2026-03-27T16:20:56.251303072+08:00","level":"INFO","msg":"handler: started","stream_id":{"value":"nhmfpc2t"}}
8
- {"time":"2026-03-27T16:20:57.417356613+08:00","level":"INFO","msg":"Starting system monitor"}
9
- {"time":"2026-03-27T16:25:43.814415705+08:00","level":"INFO","msg":"Stopping system monitor"}
10
- {"time":"2026-03-27T16:25:43.815067495+08:00","level":"INFO","msg":"Stopped system monitor"}
11
- {"time":"2026-03-27T16:25:44.815119947+08:00","level":"INFO","msg":"handler: operation stats","stats":{"operations":[{"desc":"uploading wandb-summary.json","runtime_seconds":0.136938035,"progress":"495B/495B"},{"desc":"saving job artifact","runtime_seconds":0.037243753}],"total_operations":2}}
12
- {"time":"2026-03-27T16:25:49.240469481+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
13
- {"time":"2026-03-27T16:25:50.901076411+08:00","level":"INFO","msg":"stream: closing","id":"nhmfpc2t"}
14
- {"time":"2026-03-27T16:25:50.901107424+08:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"nhmfpc2t"}}
15
- {"time":"2026-03-27T16:25:50.901135119+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"nhmfpc2t"}}
16
- {"time":"2026-03-27T16:25:50.901152717+08:00","level":"INFO","msg":"sender: closed","stream_id":"nhmfpc2t"}
17
- {"time":"2026-03-27T16:25:50.90122351+08:00","level":"INFO","msg":"stream: closed","id":"nhmfpc2t"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/debug.log DELETED
@@ -1,35 +0,0 @@
1
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Current SDK version is 0.18.6
2
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Configure stats pid to 2703097
3
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/.config/wandb/settings
4
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Loading settings from /tmp2/chyang/workspace/LLM-BC/wandb/settings
5
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': 'online', '_disable_service': None}
7
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train.py', 'program_abspath': '/tmp2/chyang/workspace/LLM-BC/train.py', 'program': '/tmp2/chyang/workspace/LLM-BC/./train.py'}
8
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Applying login settings: {}
9
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:_log_setup():533] Logging user logs to /tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug.log
10
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:_log_setup():534] Logging internal logs to /tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug-internal.log
11
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:init():619] calling init triggers
12
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:init():626] wandb.init called with sweep_config: {}
13
- config: {'name': 'train_llm_lowdim', '_target_': 'llmbc.workspace.train_llm_workspace.TrainLLMWorkspace', 'obs_dim': 46, 'action_dim': 26, 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1, 'task_name': 'adroit-hand-hammer-v1', 'exp_name': 'train llm', 'model_name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'use_quantization': False, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'dataset': {'test_data_ratio': 0.01}, 'debug': False, 'training': {'seed': 42, 'per_device_train_batch_size': 128, 'per_device_eval_batch_size': 128, 'gradient_accumulation_steps': 1, 'optim': 'paged_adamw_32bit', 'num_train_epochs': 10, 'eval_strategy': 'steps', 'logging_steps': 1, 'warmup_steps': 10, 'logging_strategy': 'steps', 'learning_rate': 0.0001, 'fp16': False, 'bf16': True, 'tf32': True, 'group_by_length': True, 'report_to': 'wandb', 'save_steps': 5000, 'eval_steps': 10, 'use_joint_mlp_projector': True, 'joint_obs_action_mlp_lr': 5e-05}, 'trainer': {'obs_dim': 46, 'action_dim': 26, 'use_joint_mlp_projector': True, 'max_seq_length': 100, 'dataset_text_field': 'text', 'packing': False}, 'logging': {'project': 'llm_module_finetuning', 'resume': True, 'mode': 'online', 'name': '2026.03.27-16.20.52_train_llm_lowdim_adroit-hand-hammer-v1', 'tags': ['train_llm_lowdim', 'adroit-hand-hammer-v1', 'train llm'], 'id': None, 'group': None}, 'multi_run': {'run_dir': 'data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1', 'wandb_name_base': '2026.03.27-16.20.52_train_llm_lowdim_adroit-hand-hammer-v1'}, 'task': {'name': 'adroit-hand-hammer-v1', 'obs_dim': 46, 'action_dim': 26, 'env_runner': {'_target_': 'llmbc.env_runner.adroit_lowdim_runner.AdroitHandLowdimRunner', 'env_name': 'llf-adroit-adroit-hand-hammer-v1', 'n_train': 10, 'n_test': 50, 'n_envs': 10, 'max_steps': 150, 'n_obs_steps': 1, 'n_action_steps': 1, 'instruction_type': 'b', 'feedback_type': ['hp', 'hn', 'fp'], 'visual': False, 'discount': 0.99}, 'dataset': {'_target_': 'llmbc.dataset.adroit_lowdim_dataset.AdroitHandLowdimDataset', 'data_path': 'datasets/adroit-hand-hammer-v1-general.pt', 'data_path2': 'datasets/adroit-hand-hammer-v1.pt', 'horizon': 1, 'pad_before': 0, 'pad_after': 0, 'obs_eef_target': True, 'use_manual_normalizer': False, 'val_ratio': 0.05, 'dummy_normalizer': False}, 'instructor': {'_target_': 'llmbc.translator.instructor.adroit_instructor.adroit_hand_hammer_v1_instructor.AdroitHandHammerV1Instructor'}}, 'llm': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'use_quantization': False, 'use_joint_mlp_projector': True, 'llm_mode': 'mlp-finetuned', 'finetune_mode': 'orig', 'checkpoint': 'data/outputs/2026.03.27/14.38.20_train_mlp_projector_adroit-hand-hammer-v1/checkpoints/latest.ckpt', 'max_length': 100, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2026.03.27/16.20.52_HuggingFaceTB/SmolLM2-135M-Instruct'}}}}
14
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:init():669] starting backend
15
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:init():673] sending inform_init request
16
- 2026-03-27 16:20:56,140 INFO MainThread:2703097 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
- 2026-03-27 16:20:56,140 INFO MainThread:2703097 [wandb_init.py:init():686] backend started and connected
18
- 2026-03-27 16:20:56,144 INFO MainThread:2703097 [wandb_init.py:init():781] updated telemetry
19
- 2026-03-27 16:20:56,170 INFO MainThread:2703097 [wandb_init.py:init():814] communicating run to backend with 90.0 second timeout
20
- 2026-03-27 16:20:57,414 INFO MainThread:2703097 [wandb_init.py:init():867] starting run threads in backend
21
- 2026-03-27 16:20:57,521 INFO MainThread:2703097 [wandb_run.py:_console_start():2451] atexit reg
22
- 2026-03-27 16:20:57,522 INFO MainThread:2703097 [wandb_run.py:_redirect():2299] redirect: wrap_raw
23
- 2026-03-27 16:20:57,522 INFO MainThread:2703097 [wandb_run.py:_redirect():2364] Wrapping output streams.
24
- 2026-03-27 16:20:57,522 INFO MainThread:2703097 [wandb_run.py:_redirect():2389] Redirects installed.
25
- 2026-03-27 16:20:57,524 INFO MainThread:2703097 [wandb_init.py:init():911] run started, returning control to user process
26
- 2026-03-27 16:21:04,359 INFO MainThread:2703097 [wandb_run.py:_config_callback():1389] config_cb None None {'obs_dim': 46, 'action_dim': 26, 'use_joint_mlp_projector': True, 'vocab_size': 49152, 'max_position_embeddings': 8192, 'hidden_size': 576, 'intermediate_size': 1536, 'num_hidden_layers': 30, 'num_attention_heads': 9, 'num_key_value_heads': 3, 'hidden_act': 'silu', 'initializer_range': 0.041666666666666664, 'rms_norm_eps': 1e-05, 'pretraining_tp': 1, 'use_cache': False, 'rope_theta': 100000, 'rope_scaling': None, 'attention_bias': False, 'attention_dropout': 0.0, 'mlp_bias': False, 'head_dim': 64, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'bfloat16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlamaForCausalLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 2, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'HuggingFaceTB/SmolLM2-135M-Instruct', '_attn_implementation_autoset': True, 'transformers_version': '4.47.1', 'is_llama_config': True, 'model_type': 'llama_lowdim', 'rope_interleaved': False, 'transformers.js_config': {'kv_cache_dtype': {'q4f16': 'float16', 'fp16': 'float16'}}, 'output_dir': '/tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'eval_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 128, 'per_device_eval_batch_size': 128, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'torch_empty_cache_steps': None, 'learning_rate': 0.0001, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 10, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/runs/Mar27_16-21-01_A6000-2', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 5000, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': True, 'fp16': False, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': True, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 10, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'paged_adamw_32bit', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': None, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'include_for_metrics': [], 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'evaluation_strategy': None, 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'use_liger_kernel': False, 'eval_use_gather_object': False, 'average_tokens_across_devices': False, 'dataset_text_field': 'text', 'packing': False, 'max_seq_length': 100, 'dataset_num_proc': None, 'dataset_batch_size': 1000, 'model_init_kwargs': None, 'dataset_kwargs': {}, 'eval_packing': None, 'num_of_sequences': 1024, 'chars_per_token': '<CHARS_PER_TOKEN>', 'use_liger': False, 'joint_obs_action_mlp_lr': 5e-05, 'obs_mlp_lr': None, 'action_mlp_lr': None}
27
- 2026-03-27 16:21:04,361 INFO MainThread:2703097 [wandb_config.py:__setitem__():154] config set model/num_parameters = 134889408 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7e6758745670>>
28
- 2026-03-27 16:21:04,361 INFO MainThread:2703097 [wandb_run.py:_config_callback():1389] config_cb model/num_parameters 134889408 None
29
- 2026-03-27 16:25:43,813 INFO MainThread:2703097 [wandb_run.py:_finish():2146] finishing run chyang25-national-taiwan-university/llm_module_finetuning/nhmfpc2t
30
- 2026-03-27 16:25:43,813 INFO MainThread:2703097 [wandb_run.py:_atexit_cleanup():2414] got exitcode: 0
31
- 2026-03-27 16:25:43,813 INFO MainThread:2703097 [wandb_run.py:_restore():2396] restore
32
- 2026-03-27 16:25:43,814 INFO MainThread:2703097 [wandb_run.py:_restore():2402] restore done
33
- 2026-03-27 16:25:50,896 INFO MainThread:2703097 [wandb_run.py:_footer_history_summary_info():3963] rendering history
34
- 2026-03-27 16:25:50,896 INFO MainThread:2703097 [wandb_run.py:_footer_history_summary_info():3995] rendering summary
35
- 2026-03-27 16:25:50,900 INFO MainThread:2703097 [wandb_run.py:_footer_sync_info():3922] logging synced files
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/files/config.yaml DELETED
@@ -1,711 +0,0 @@
1
- _attn_implementation_autoset:
2
- value: true
3
- _name_or_path:
4
- value: HuggingFaceTB/SmolLM2-135M-Instruct
5
- _target_:
6
- value: llmbc.workspace.train_llm_workspace.TrainLLMWorkspace
7
- _wandb:
8
- value:
9
- cli_version: 0.18.6
10
- m:
11
- - "1": train/loss
12
- "5": 2
13
- "6":
14
- - 1
15
- - 3
16
- "7": []
17
- - "1": train/global_step
18
- "6":
19
- - 3
20
- "7": []
21
- - "1": train/learning_rate
22
- "5": 2
23
- "6":
24
- - 1
25
- - 3
26
- "7": []
27
- - "1": train/epoch
28
- "5": 2
29
- "6":
30
- - 1
31
- - 3
32
- "7": []
33
- - "1": train/grad_norm
34
- "5": 2
35
- "6":
36
- - 1
37
- - 3
38
- "7": []
39
- - "1": eval/loss
40
- "5": 2
41
- "6":
42
- - 1
43
- - 3
44
- "7": []
45
- - "1": eval/runtime
46
- "5": 2
47
- "6":
48
- - 1
49
- - 3
50
- "7": []
51
- - "1": eval/samples_per_second
52
- "5": 2
53
- "6":
54
- - 1
55
- - 3
56
- "7": []
57
- - "1": eval/steps_per_second
58
- "5": 2
59
- "6":
60
- - 1
61
- - 3
62
- "7": []
63
- python_version: 3.9.20
64
- t:
65
- "1":
66
- - 1
67
- - 2
68
- - 3
69
- - 5
70
- - 11
71
- - 12
72
- - 41
73
- - 49
74
- - 50
75
- - 51
76
- - 53
77
- - 55
78
- - 71
79
- - 84
80
- - 98
81
- "2":
82
- - 1
83
- - 2
84
- - 3
85
- - 5
86
- - 11
87
- - 12
88
- - 41
89
- - 49
90
- - 50
91
- - 51
92
- - 53
93
- - 55
94
- - 71
95
- - 84
96
- - 98
97
- "3":
98
- - 2
99
- - 7
100
- - 13
101
- - 15
102
- - 16
103
- - 19
104
- - 23
105
- - 55
106
- - 62
107
- - 66
108
- "4": 3.9.20
109
- "5": 0.18.6
110
- "6": 4.47.1
111
- "8":
112
- - 5
113
- "9":
114
- "1": transformers_trainer
115
- "12": 0.18.6
116
- "13": linux-x86_64
117
- accelerator_config:
118
- value:
119
- dispatch_batches: null
120
- even_batches: true
121
- gradient_accumulation_kwargs: null
122
- non_blocking: false
123
- split_batches: false
124
- use_seedable_sampler: true
125
- action_dim:
126
- value: 26
127
- action_mlp_lr:
128
- value: null
129
- adafactor:
130
- value: false
131
- adam_beta1:
132
- value: 0.9
133
- adam_beta2:
134
- value: 0.999
135
- adam_epsilon:
136
- value: 1e-08
137
- add_cross_attention:
138
- value: false
139
- architectures:
140
- value:
141
- - LlamaForCausalLM
142
- attention_bias:
143
- value: false
144
- attention_dropout:
145
- value: 0
146
- auto_find_batch_size:
147
- value: false
148
- average_tokens_across_devices:
149
- value: false
150
- bad_words_ids:
151
- value: null
152
- batch_eval_metrics:
153
- value: false
154
- begin_suppress_tokens:
155
- value: null
156
- bf16:
157
- value: true
158
- bf16_full_eval:
159
- value: false
160
- bos_token_id:
161
- value: 1
162
- chars_per_token:
163
- value: <CHARS_PER_TOKEN>
164
- chunk_size_feed_forward:
165
- value: 0
166
- cross_attention_hidden_size:
167
- value: null
168
- data_seed:
169
- value: null
170
- dataloader_drop_last:
171
- value: false
172
- dataloader_num_workers:
173
- value: 0
174
- dataloader_persistent_workers:
175
- value: false
176
- dataloader_pin_memory:
177
- value: true
178
- dataloader_prefetch_factor:
179
- value: null
180
- dataset:
181
- value:
182
- test_data_ratio: 0.01
183
- dataset_batch_size:
184
- value: 1000
185
- dataset_num_proc:
186
- value: null
187
- dataset_text_field:
188
- value: text
189
- ddp_backend:
190
- value: null
191
- ddp_broadcast_buffers:
192
- value: null
193
- ddp_bucket_cap_mb:
194
- value: null
195
- ddp_find_unused_parameters:
196
- value: null
197
- ddp_timeout:
198
- value: 1800
199
- debug:
200
- value: []
201
- decoder_start_token_id:
202
- value: null
203
- deepspeed:
204
- value: null
205
- disable_tqdm:
206
- value: false
207
- dispatch_batches:
208
- value: null
209
- diversity_penalty:
210
- value: 0
211
- do_eval:
212
- value: true
213
- do_predict:
214
- value: false
215
- do_sample:
216
- value: false
217
- do_train:
218
- value: false
219
- early_stopping:
220
- value: false
221
- encoder_no_repeat_ngram_size:
222
- value: 0
223
- eos_token_id:
224
- value: 2
225
- eval_accumulation_steps:
226
- value: null
227
- eval_delay:
228
- value: 0
229
- eval_do_concat_batches:
230
- value: true
231
- eval_on_start:
232
- value: false
233
- eval_packing:
234
- value: null
235
- eval_steps:
236
- value: 10
237
- eval_strategy:
238
- value: steps
239
- eval_use_gather_object:
240
- value: false
241
- evaluation_strategy:
242
- value: null
243
- exp_name:
244
- value: train llm
245
- exponential_decay_length_penalty:
246
- value: null
247
- finetuning_task:
248
- value: null
249
- forced_bos_token_id:
250
- value: null
251
- forced_eos_token_id:
252
- value: null
253
- fp16:
254
- value: false
255
- fp16_backend:
256
- value: auto
257
- fp16_full_eval:
258
- value: false
259
- fp16_opt_level:
260
- value: O1
261
- fsdp:
262
- value: []
263
- fsdp_config:
264
- value:
265
- min_num_params: 0
266
- xla: false
267
- xla_fsdp_grad_ckpt: false
268
- xla_fsdp_v2: false
269
- fsdp_min_num_params:
270
- value: 0
271
- fsdp_transformer_layer_cls_to_wrap:
272
- value: null
273
- full_determinism:
274
- value: false
275
- gradient_accumulation_steps:
276
- value: 1
277
- gradient_checkpointing:
278
- value: false
279
- gradient_checkpointing_kwargs:
280
- value: null
281
- greater_is_better:
282
- value: null
283
- group_by_length:
284
- value: true
285
- half_precision_backend:
286
- value: auto
287
- head_dim:
288
- value: 64
289
- hidden_act:
290
- value: silu
291
- hidden_size:
292
- value: 576
293
- horizon:
294
- value: 1
295
- hub_always_push:
296
- value: false
297
- hub_model_id:
298
- value: null
299
- hub_private_repo:
300
- value: null
301
- hub_strategy:
302
- value: every_save
303
- hub_token:
304
- value: <HUB_TOKEN>
305
- id2label:
306
- value:
307
- "0": LABEL_0
308
- "1": LABEL_1
309
- ignore_data_skip:
310
- value: false
311
- include_for_metrics:
312
- value: []
313
- include_inputs_for_metrics:
314
- value: false
315
- include_num_input_tokens_seen:
316
- value: false
317
- include_tokens_per_second:
318
- value: false
319
- initializer_range:
320
- value: 0.041666666666666664
321
- intermediate_size:
322
- value: 1536
323
- is_decoder:
324
- value: false
325
- is_encoder_decoder:
326
- value: false
327
- is_llama_config:
328
- value: true
329
- jit_mode_eval:
330
- value: false
331
- joint_obs_action_mlp_lr:
332
- value: 5e-05
333
- label_names:
334
- value: null
335
- label_smoothing_factor:
336
- value: 0
337
- label2id:
338
- value:
339
- LABEL_0: 0
340
- LABEL_1: 1
341
- learning_rate:
342
- value: 0.0001
343
- length_column_name:
344
- value: length
345
- length_penalty:
346
- value: 1
347
- llm:
348
- value:
349
- causal_lm_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM
350
- checkpoint: data/outputs/2026.03.27/14.38.20_train_mlp_projector_adroit-hand-hammer-v1/checkpoints/latest.ckpt
351
- config_target: llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig
352
- finetune_mode: orig
353
- hydra:
354
- job:
355
- override_dirname: HuggingFaceTB/SmolLM2-135M-Instruct
356
- run:
357
- dir: data/outputs/2026.03.27/16.20.52_HuggingFaceTB/SmolLM2-135M-Instruct
358
- llm_mode: mlp-finetuned
359
- lora_config:
360
- bias: none
361
- lora_alpha: 64
362
- lora_dropout: 0.05
363
- r: 32
364
- task_type: CAUSAL_LM
365
- max_length: 100
366
- model_name: SmolLM2-135M-Instruct
367
- name: HuggingFaceTB/SmolLM2-135M-Instruct
368
- prompter:
369
- _target_: llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter
370
- use_joint_mlp_projector: true
371
- use_joint_mlp_projector: true
372
- use_quantization: false
373
- load_best_model_at_end:
374
- value: false
375
- local_rank:
376
- value: 0
377
- log_level:
378
- value: passive
379
- log_level_replica:
380
- value: warning
381
- log_on_each_node:
382
- value: true
383
- logging:
384
- value:
385
- group: null
386
- id: null
387
- mode: online
388
- name: 2026.03.27-16.20.52_train_llm_lowdim_adroit-hand-hammer-v1
389
- project: llm_module_finetuning
390
- resume: true
391
- tags:
392
- - train_llm_lowdim
393
- - adroit-hand-hammer-v1
394
- - train llm
395
- logging_dir:
396
- value: /tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/runs/Mar27_16-21-01_A6000-2
397
- logging_first_step:
398
- value: false
399
- logging_nan_inf_filter:
400
- value: true
401
- logging_steps:
402
- value: 1
403
- logging_strategy:
404
- value: steps
405
- lora_config:
406
- value:
407
- bias: none
408
- lora_alpha: 64
409
- lora_dropout: 0.05
410
- r: 32
411
- task_type: CAUSAL_LM
412
- lr_scheduler_type:
413
- value: linear
414
- max_grad_norm:
415
- value: 1
416
- max_length:
417
- value: 20
418
- max_position_embeddings:
419
- value: 8192
420
- max_seq_length:
421
- value: 100
422
- max_steps:
423
- value: -1
424
- metric_for_best_model:
425
- value: null
426
- min_length:
427
- value: 0
428
- mlp_bias:
429
- value: false
430
- model/num_parameters:
431
- value: 134889408
432
- model_init_kwargs:
433
- value: null
434
- model_name:
435
- value: HuggingFaceTB/SmolLM2-135M-Instruct
436
- model_type:
437
- value: llama_lowdim
438
- mp_parameters:
439
- value: ""
440
- multi_run:
441
- value:
442
- run_dir: data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1
443
- wandb_name_base: 2026.03.27-16.20.52_train_llm_lowdim_adroit-hand-hammer-v1
444
- n_action_steps:
445
- value: 1
446
- n_obs_steps:
447
- value: 1
448
- name:
449
- value: train_llm_lowdim
450
- neftune_noise_alpha:
451
- value: null
452
- no_cuda:
453
- value: false
454
- no_repeat_ngram_size:
455
- value: 0
456
- num_attention_heads:
457
- value: 9
458
- num_beam_groups:
459
- value: 1
460
- num_beams:
461
- value: 1
462
- num_hidden_layers:
463
- value: 30
464
- num_key_value_heads:
465
- value: 3
466
- num_of_sequences:
467
- value: 1024
468
- num_return_sequences:
469
- value: 1
470
- num_train_epochs:
471
- value: 10
472
- obs_dim:
473
- value: 46
474
- obs_mlp_lr:
475
- value: null
476
- optim:
477
- value: paged_adamw_32bit
478
- optim_args:
479
- value: null
480
- optim_target_modules:
481
- value: null
482
- output_attentions:
483
- value: false
484
- output_dir:
485
- value: /tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1
486
- output_hidden_states:
487
- value: false
488
- output_scores:
489
- value: false
490
- overwrite_output_dir:
491
- value: false
492
- packing:
493
- value: false
494
- pad_token_id:
495
- value: 2
496
- past_index:
497
- value: -1
498
- per_device_eval_batch_size:
499
- value: 128
500
- per_device_train_batch_size:
501
- value: 128
502
- per_gpu_eval_batch_size:
503
- value: null
504
- per_gpu_train_batch_size:
505
- value: null
506
- prediction_loss_only:
507
- value: false
508
- prefix:
509
- value: null
510
- pretraining_tp:
511
- value: 1
512
- problem_type:
513
- value: null
514
- push_to_hub:
515
- value: false
516
- push_to_hub_model_id:
517
- value: null
518
- push_to_hub_organization:
519
- value: null
520
- push_to_hub_token:
521
- value: <PUSH_TO_HUB_TOKEN>
522
- ray_scope:
523
- value: last
524
- remove_invalid_values:
525
- value: false
526
- remove_unused_columns:
527
- value: true
528
- repetition_penalty:
529
- value: 1
530
- report_to:
531
- value:
532
- - wandb
533
- restore_callback_states_from_checkpoint:
534
- value: false
535
- resume_from_checkpoint:
536
- value: null
537
- return_dict:
538
- value: true
539
- return_dict_in_generate:
540
- value: false
541
- rms_norm_eps:
542
- value: 1e-05
543
- rope_interleaved:
544
- value: false
545
- rope_scaling:
546
- value: null
547
- rope_theta:
548
- value: 100000
549
- run_name:
550
- value: /tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1
551
- save_on_each_node:
552
- value: false
553
- save_only_model:
554
- value: false
555
- save_safetensors:
556
- value: true
557
- save_steps:
558
- value: 5000
559
- save_strategy:
560
- value: steps
561
- save_total_limit:
562
- value: null
563
- seed:
564
- value: 42
565
- sep_token_id:
566
- value: null
567
- skip_memory_metrics:
568
- value: true
569
- split_batches:
570
- value: null
571
- suppress_tokens:
572
- value: null
573
- task:
574
- value:
575
- action_dim: 26
576
- dataset:
577
- _target_: llmbc.dataset.adroit_lowdim_dataset.AdroitHandLowdimDataset
578
- data_path: datasets/adroit-hand-hammer-v1-general.pt
579
- data_path2: datasets/adroit-hand-hammer-v1.pt
580
- dummy_normalizer: false
581
- horizon: 1
582
- obs_eef_target: true
583
- pad_after: 0
584
- pad_before: 0
585
- use_manual_normalizer: false
586
- val_ratio: 0.05
587
- env_runner:
588
- _target_: llmbc.env_runner.adroit_lowdim_runner.AdroitHandLowdimRunner
589
- discount: 0.99
590
- env_name: llf-adroit-adroit-hand-hammer-v1
591
- feedback_type:
592
- - hp
593
- - hn
594
- - fp
595
- instruction_type: b
596
- max_steps: 150
597
- n_action_steps: 1
598
- n_envs: 10
599
- n_obs_steps: 1
600
- n_test: 50
601
- n_train: 10
602
- visual: false
603
- instructor:
604
- _target_: llmbc.translator.instructor.adroit_instructor.adroit_hand_hammer_v1_instructor.AdroitHandHammerV1Instructor
605
- name: adroit-hand-hammer-v1
606
- obs_dim: 46
607
- task_name:
608
- value: adroit-hand-hammer-v1
609
- task_specific_params:
610
- value: null
611
- temperature:
612
- value: 1
613
- tf_legacy_loss:
614
- value: false
615
- tf32:
616
- value: true
617
- tie_encoder_decoder:
618
- value: false
619
- tie_word_embeddings:
620
- value: true
621
- tokenizer_class:
622
- value: null
623
- top_k:
624
- value: 50
625
- top_p:
626
- value: 1
627
- torch_compile:
628
- value: false
629
- torch_compile_backend:
630
- value: null
631
- torch_compile_mode:
632
- value: null
633
- torch_dtype:
634
- value: bfloat16
635
- torch_empty_cache_steps:
636
- value: null
637
- torchdynamo:
638
- value: null
639
- torchscript:
640
- value: false
641
- tpu_metrics_debug:
642
- value: false
643
- tpu_num_cores:
644
- value: null
645
- trainer:
646
- value:
647
- action_dim: 26
648
- dataset_text_field: text
649
- max_seq_length: 100
650
- obs_dim: 46
651
- packing: false
652
- use_joint_mlp_projector: true
653
- training:
654
- value:
655
- bf16: true
656
- eval_steps: 10
657
- eval_strategy: steps
658
- fp16: false
659
- gradient_accumulation_steps: 1
660
- group_by_length: true
661
- joint_obs_action_mlp_lr: 5e-05
662
- learning_rate: 0.0001
663
- logging_steps: 1
664
- logging_strategy: steps
665
- num_train_epochs: 10
666
- optim: paged_adamw_32bit
667
- per_device_eval_batch_size: 128
668
- per_device_train_batch_size: 128
669
- report_to: wandb
670
- save_steps: 5000
671
- seed: 42
672
- tf32: true
673
- use_joint_mlp_projector: true
674
- warmup_steps: 10
675
- transformers.js_config:
676
- value:
677
- kv_cache_dtype:
678
- fp16: float16
679
- q4f16: float16
680
- transformers_version:
681
- value: 4.47.1
682
- typical_p:
683
- value: 1
684
- use_bfloat16:
685
- value: false
686
- use_cache:
687
- value: false
688
- use_cpu:
689
- value: false
690
- use_ipex:
691
- value: false
692
- use_joint_mlp_projector:
693
- value: true
694
- use_legacy_prediction_loop:
695
- value: false
696
- use_liger:
697
- value: false
698
- use_liger_kernel:
699
- value: false
700
- use_mps_device:
701
- value: false
702
- use_quantization:
703
- value: false
704
- vocab_size:
705
- value: 49152
706
- warmup_ratio:
707
- value: 0
708
- warmup_steps:
709
- value: 10
710
- weight_decay:
711
- value: 0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/files/output.log DELETED
@@ -1,509 +0,0 @@
1
- You are using a model of type llama to instantiate a model of type llama_lowdim. This is not supported for all configurations of models and can yield errors.
2
- Some weights of LowdimLlamaForCausalLM were not initialized from the model checkpoint at HuggingFaceTB/SmolLM2-135M-Instruct and are newly initialized: ['model.joint_obs_action_projector.projector.0.bias', 'model.joint_obs_action_projector.projector.0.weight', 'model.joint_obs_action_projector.projector.2.bias', 'model.joint_obs_action_projector.projector.2.weight']
3
- You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
4
- Loading from mlp projector checkpoint: data/outputs/2026.03.27/14.38.20_train_mlp_projector_adroit-hand-hammer-v1/checkpoints/latest.ckpt
5
- Finetune the whole original LLM SmolLM2-135M-Instruct.
6
- Multistep Flattening Dataset: 100%|██████████████████████████████████████████████████████████████████| 4585/4585 [00:00<00:00, 6904.74it/s]
7
- Setting TOKENIZERS_PARALLELISM=false for forked processes.
8
- [2026-03-27 16:20:59,889][datasets.arrow_dataset][WARNING] - Setting TOKENIZERS_PARALLELISM=false for forked processes.
9
- Map (num_proc=4): 100%|███████████████████████████████████████████████████████████████████████| 4585/4585 [00:00<00:00, 4937.91 examples/s]
10
- Setting TOKENIZERS_PARALLELISM=false for forked processes.
11
- [2026-03-27 16:21:01,023][datasets.arrow_dataset][WARNING] - Setting TOKENIZERS_PARALLELISM=false for forked processes.
12
- Map (num_proc=4): 100%|███████████████████████████████████████████████████████████████████████| 4585/4585 [00:00<00:00, 9277.34 examples/s]
13
- DatasetDict({
14
- train: Dataset({
15
- features: ['obs', 'action', 'description', 'input', 'output', 'text', 'input_ids', 'labels'],
16
- num_rows: 4539
17
- })
18
- test: Dataset({
19
- features: ['obs', 'action', 'description', 'input', 'output', 'text', 'input_ids', 'labels'],
20
- num_rows: 46
21
- })
22
- })
23
- /home/chyang/miniconda3/envs/llm-bc/lib/python3.9/site-packages/huggingface_hub/utils/_deprecation.py:100: FutureWarning: Deprecated argument(s) used in '__init__': max_seq_length, dataset_text_field. Will not be supported from version '1.0.0'.
24
-
25
- Deprecated positional argument(s) used in SFTTrainer, please use the SFTConfig to set these arguments instead.
26
- warnings.warn(message, FutureWarning)
27
- /home/chyang/miniconda3/envs/llm-bc/lib/python3.9/site-packages/trl/trainer/sft_trainer.py:283: UserWarning: You passed a `max_seq_length` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.
28
- warnings.warn(
29
- /home/chyang/miniconda3/envs/llm-bc/lib/python3.9/site-packages/trl/trainer/sft_trainer.py:321: UserWarning: You passed a `dataset_text_field` argument to the SFTTrainer, the value you passed will override the one in the `SFTConfig`.
30
- warnings.warn(
31
- /home/chyang/miniconda3/envs/llm-bc/lib/python3.9/site-packages/trl/trainer/sft_trainer.py:401: FutureWarning: `tokenizer` is deprecated and will be removed in version 5.0.0 for `LowdimSFTTrainer.__init__`. Use `processing_class` instead.
32
- super().__init__(
33
- [2026-03-27 16:21:01,783] [INFO] [real_accelerator.py:219:get_accelerator] Setting ds_accelerator to cuda (auto detect)
34
- [2026-03-27 16:21:01,919][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -I/home/chyang/miniconda3/envs/llm-bc/include -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -fPIC -c /tmp/tmp7t0nrkuh/test.c -o /tmp/tmp7t0nrkuh/test.o
35
- [2026-03-27 16:21:01,969][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat /tmp/tmp7t0nrkuh/test.o -laio -o /tmp/tmp7t0nrkuh/a.out
36
- [2026-03-27 16:21:02,963][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat -Wno-unused-result -Wsign-compare -DNDEBUG -O2 -Wall -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -I/home/chyang/miniconda3/envs/llm-bc/include -fPIC -O2 -isystem /home/chyang/miniconda3/envs/llm-bc/include -fPIC -c /tmp/tmps_s5vyh9/test.c -o /tmp/tmps_s5vyh9/test.o
37
- [2026-03-27 16:21:03,018][root][INFO] - gcc -pthread -B /home/chyang/miniconda3/envs/llm-bc/compiler_compat /tmp/tmps_s5vyh9/test.o -L/usr/local/cuda -L/usr/local/cuda/lib64 -lcufile -o /tmp/tmps_s5vyh9/a.out
38
- wandb: WARNING The `run_name` is currently set to the same value as `TrainingArguments.output_dir`. If this was not intended, please specify a different run name by setting the `TrainingArguments.run_name` parameter.
39
- 3%|██▊ | 10/360 [00:08<04:32, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
40
- {'loss': 1.6431, 'grad_norm': 13.871646881103516, 'learning_rate': 1e-05, 'epoch': 0.03}
41
- {'loss': 1.6018, 'grad_norm': 17.890308380126953, 'learning_rate': 2e-05, 'epoch': 0.06}
42
- {'loss': 1.5953, 'grad_norm': 13.746294021606445, 'learning_rate': 3e-05, 'epoch': 0.08}
43
- {'loss': 1.5355, 'grad_norm': 15.9970121383667, 'learning_rate': 4e-05, 'epoch': 0.11}
44
- {'loss': 1.552, 'grad_norm': 18.634761810302734, 'learning_rate': 5e-05, 'epoch': 0.14}
45
- {'loss': 1.4926, 'grad_norm': 10.22042179107666, 'learning_rate': 6e-05, 'epoch': 0.17}
46
- {'loss': 1.1827, 'grad_norm': 14.976595878601074, 'learning_rate': 7e-05, 'epoch': 0.19}
47
- {'loss': 1.173, 'grad_norm': 34.35334396362305, 'learning_rate': 8e-05, 'epoch': 0.22}
48
- {'loss': 0.7987, 'grad_norm': 7.392702579498291, 'learning_rate': 9e-05, 'epoch': 0.25}
49
- {'loss': 0.7641, 'grad_norm': 8.6481294631958, 'learning_rate': 0.0001, 'epoch': 0.28}
50
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
51
- 6%|█████▌ | 20/360 [00:15<04:21, 1.30it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
52
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
53
- {'eval_loss': 0.719104528427124, 'eval_runtime': 0.1041, 'eval_samples_per_second': 441.967, 'eval_steps_per_second': 9.608, 'epoch': 0.28}
54
- {'loss': 0.6656, 'grad_norm': 11.896777153015137, 'learning_rate': 9.971428571428571e-05, 'epoch': 0.31}
55
- {'loss': 0.5743, 'grad_norm': 6.002552032470703, 'learning_rate': 9.942857142857144e-05, 'epoch': 0.33}
56
- {'loss': 0.5204, 'grad_norm': 4.569210529327393, 'learning_rate': 9.914285714285715e-05, 'epoch': 0.36}
57
- {'loss': 0.5217, 'grad_norm': 3.2792978286743164, 'learning_rate': 9.885714285714286e-05, 'epoch': 0.39}
58
- {'loss': 0.5046, 'grad_norm': 3.6701369285583496, 'learning_rate': 9.857142857142858e-05, 'epoch': 0.42}
59
- {'loss': 0.4803, 'grad_norm': 4.064358711242676, 'learning_rate': 9.828571428571429e-05, 'epoch': 0.44}
60
- {'loss': 0.4466, 'grad_norm': 2.9059529304504395, 'learning_rate': 9.8e-05, 'epoch': 0.47}
61
- {'loss': 0.4453, 'grad_norm': 2.499434471130371, 'learning_rate': 9.771428571428572e-05, 'epoch': 0.5}
62
- {'loss': 0.4382, 'grad_norm': 2.084019899368286, 'learning_rate': 9.742857142857143e-05, 'epoch': 0.53}
63
- {'loss': 0.4208, 'grad_norm': 1.2787052392959595, 'learning_rate': 9.714285714285715e-05, 'epoch': 0.56}
64
- 8%|████████▍ | 30/360 [00:23<04:10, 1.32it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
65
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
66
- {'eval_loss': 0.4429771900177002, 'eval_runtime': 0.1036, 'eval_samples_per_second': 443.983, 'eval_steps_per_second': 9.652, 'epoch': 0.56}
67
- {'loss': 0.431, 'grad_norm': 1.8704657554626465, 'learning_rate': 9.685714285714286e-05, 'epoch': 0.58}
68
- {'loss': 0.4317, 'grad_norm': 1.7761015892028809, 'learning_rate': 9.657142857142858e-05, 'epoch': 0.61}
69
- {'loss': 0.4229, 'grad_norm': 1.7499357461929321, 'learning_rate': 9.628571428571429e-05, 'epoch': 0.64}
70
- {'loss': 0.4126, 'grad_norm': 1.2509069442749023, 'learning_rate': 9.6e-05, 'epoch': 0.67}
71
- {'loss': 0.3888, 'grad_norm': 1.3415058851242065, 'learning_rate': 9.571428571428573e-05, 'epoch': 0.69}
72
- {'loss': 0.4111, 'grad_norm': 1.513482689857483, 'learning_rate': 9.542857142857143e-05, 'epoch': 0.72}
73
- {'loss': 0.3904, 'grad_norm': 1.0207685232162476, 'learning_rate': 9.514285714285714e-05, 'epoch': 0.75}
74
- {'loss': 0.3911, 'grad_norm': 1.0765091180801392, 'learning_rate': 9.485714285714287e-05, 'epoch': 0.78}
75
- {'loss': 0.3893, 'grad_norm': 1.2146029472351074, 'learning_rate': 9.457142857142858e-05, 'epoch': 0.81}
76
- {'loss': 0.3915, 'grad_norm': 1.302972435951233, 'learning_rate': 9.428571428571429e-05, 'epoch': 0.83}
77
- 11%|███████████▏ | 40/360 [00:30<03:54, 1.36it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
78
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
79
- {'eval_loss': 0.39956018328666687, 'eval_runtime': 0.1036, 'eval_samples_per_second': 444.089, 'eval_steps_per_second': 9.654, 'epoch': 0.83}
80
- {'loss': 0.377, 'grad_norm': 1.2195433378219604, 'learning_rate': 9.4e-05, 'epoch': 0.86}
81
- {'loss': 0.3837, 'grad_norm': 1.2320094108581543, 'learning_rate': 9.371428571428572e-05, 'epoch': 0.89}
82
- {'loss': 0.3776, 'grad_norm': 1.0609043836593628, 'learning_rate': 9.342857142857143e-05, 'epoch': 0.92}
83
- {'loss': 0.3887, 'grad_norm': 0.9609966278076172, 'learning_rate': 9.314285714285715e-05, 'epoch': 0.94}
84
- {'loss': 0.3761, 'grad_norm': 1.0595581531524658, 'learning_rate': 9.285714285714286e-05, 'epoch': 0.97}
85
- {'loss': 0.3744, 'grad_norm': 0.990327775478363, 'learning_rate': 9.257142857142858e-05, 'epoch': 1.0}
86
- {'loss': 0.3625, 'grad_norm': 1.272873044013977, 'learning_rate': 9.228571428571429e-05, 'epoch': 1.03}
87
- {'loss': 0.3869, 'grad_norm': 1.9024567604064941, 'learning_rate': 9.200000000000001e-05, 'epoch': 1.06}
88
- {'loss': 0.3751, 'grad_norm': 1.3398654460906982, 'learning_rate': 9.171428571428572e-05, 'epoch': 1.08}
89
- {'loss': 0.3662, 'grad_norm': 1.9176064729690552, 'learning_rate': 9.142857142857143e-05, 'epoch': 1.11}
90
- 14%|██████████████ | 50/360 [00:38<03:57, 1.31it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
91
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
92
- {'eval_loss': 0.37765416502952576, 'eval_runtime': 0.1036, 'eval_samples_per_second': 444.001, 'eval_steps_per_second': 9.652, 'epoch': 1.11}
93
- {'loss': 0.3665, 'grad_norm': 1.1852660179138184, 'learning_rate': 9.114285714285716e-05, 'epoch': 1.14}
94
- {'loss': 0.3705, 'grad_norm': 1.831186056137085, 'learning_rate': 9.085714285714286e-05, 'epoch': 1.17}
95
- {'loss': 0.3582, 'grad_norm': 1.1574777364730835, 'learning_rate': 9.057142857142857e-05, 'epoch': 1.19}
96
- {'loss': 0.3724, 'grad_norm': 1.3485198020935059, 'learning_rate': 9.028571428571428e-05, 'epoch': 1.22}
97
- {'loss': 0.3549, 'grad_norm': 1.0934721231460571, 'learning_rate': 9e-05, 'epoch': 1.25}
98
- {'loss': 0.3607, 'grad_norm': 1.2588518857955933, 'learning_rate': 8.971428571428571e-05, 'epoch': 1.28}
99
- {'loss': 0.3492, 'grad_norm': 0.9038533568382263, 'learning_rate': 8.942857142857142e-05, 'epoch': 1.31}
100
- {'loss': 0.361, 'grad_norm': 1.083348274230957, 'learning_rate': 8.914285714285715e-05, 'epoch': 1.33}
101
- {'loss': 0.3475, 'grad_norm': 0.8287424445152283, 'learning_rate': 8.885714285714286e-05, 'epoch': 1.36}
102
- {'loss': 0.363, 'grad_norm': 1.3475714921951294, 'learning_rate': 8.857142857142857e-05, 'epoch': 1.39}
103
- 17%|████████████████▊ | 60/360 [00:46<03:47, 1.32it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
104
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
105
- {'eval_loss': 0.3627206087112427, 'eval_runtime': 0.1039, 'eval_samples_per_second': 442.561, 'eval_steps_per_second': 9.621, 'epoch': 1.39}
106
- {'loss': 0.3527, 'grad_norm': 1.1012552976608276, 'learning_rate': 8.828571428571429e-05, 'epoch': 1.42}
107
- {'loss': 0.3418, 'grad_norm': 0.6421935558319092, 'learning_rate': 8.800000000000001e-05, 'epoch': 1.44}
108
- {'loss': 0.3513, 'grad_norm': 1.1574995517730713, 'learning_rate': 8.771428571428572e-05, 'epoch': 1.47}
109
- {'loss': 0.3515, 'grad_norm': 1.0251258611679077, 'learning_rate': 8.742857142857144e-05, 'epoch': 1.5}
110
- {'loss': 0.3609, 'grad_norm': 0.9864039421081543, 'learning_rate': 8.714285714285715e-05, 'epoch': 1.53}
111
- {'loss': 0.3454, 'grad_norm': 0.757999062538147, 'learning_rate': 8.685714285714286e-05, 'epoch': 1.56}
112
- {'loss': 0.3488, 'grad_norm': 1.0983614921569824, 'learning_rate': 8.657142857142858e-05, 'epoch': 1.58}
113
- {'loss': 0.3562, 'grad_norm': 1.4811136722564697, 'learning_rate': 8.62857142857143e-05, 'epoch': 1.61}
114
- {'loss': 0.349, 'grad_norm': 0.9457672834396362, 'learning_rate': 8.6e-05, 'epoch': 1.64}
115
- {'loss': 0.3551, 'grad_norm': 1.4347460269927979, 'learning_rate': 8.571428571428571e-05, 'epoch': 1.67}
116
- 19%|███████████████████▋ | 70/360 [00:54<03:43, 1.30it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
117
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
118
- {'eval_loss': 0.35965070128440857, 'eval_runtime': 0.1045, 'eval_samples_per_second': 440.164, 'eval_steps_per_second': 9.569, 'epoch': 1.67}
119
- {'loss': 0.3485, 'grad_norm': 1.0592706203460693, 'learning_rate': 8.542857142857144e-05, 'epoch': 1.69}
120
- {'loss': 0.3512, 'grad_norm': 1.3444126844406128, 'learning_rate': 8.514285714285714e-05, 'epoch': 1.72}
121
- {'loss': 0.3525, 'grad_norm': 0.9045667052268982, 'learning_rate': 8.485714285714285e-05, 'epoch': 1.75}
122
- {'loss': 0.3483, 'grad_norm': 1.135429859161377, 'learning_rate': 8.457142857142858e-05, 'epoch': 1.78}
123
- {'loss': 0.3445, 'grad_norm': 0.7742411494255066, 'learning_rate': 8.428571428571429e-05, 'epoch': 1.81}
124
- {'loss': 0.3425, 'grad_norm': 1.2747840881347656, 'learning_rate': 8.4e-05, 'epoch': 1.83}
125
- {'loss': 0.3506, 'grad_norm': 1.1280975341796875, 'learning_rate': 8.371428571428572e-05, 'epoch': 1.86}
126
- {'loss': 0.3458, 'grad_norm': 1.3229925632476807, 'learning_rate': 8.342857142857143e-05, 'epoch': 1.89}
127
- {'loss': 0.3443, 'grad_norm': 1.0970568656921387, 'learning_rate': 8.314285714285715e-05, 'epoch': 1.92}
128
- {'loss': 0.3612, 'grad_norm': 1.7599389553070068, 'learning_rate': 8.285714285714287e-05, 'epoch': 1.94}
129
- 22%|██████████████████████▍ | 80/360 [01:01<03:32, 1.32it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
130
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
131
- {'eval_loss': 0.35299769043922424, 'eval_runtime': 0.1042, 'eval_samples_per_second': 441.433, 'eval_steps_per_second': 9.596, 'epoch': 1.94}
132
- {'loss': 0.3373, 'grad_norm': 0.8275991678237915, 'learning_rate': 8.257142857142858e-05, 'epoch': 1.97}
133
- {'loss': 0.3624, 'grad_norm': 1.5045437812805176, 'learning_rate': 8.228571428571429e-05, 'epoch': 2.0}
134
- {'loss': 0.3434, 'grad_norm': 0.9771829843521118, 'learning_rate': 8.2e-05, 'epoch': 2.03}
135
- {'loss': 0.3347, 'grad_norm': 0.8552800416946411, 'learning_rate': 8.171428571428572e-05, 'epoch': 2.06}
136
- {'loss': 0.3448, 'grad_norm': 0.8917291164398193, 'learning_rate': 8.142857142857143e-05, 'epoch': 2.08}
137
- {'loss': 0.3309, 'grad_norm': 0.9143850207328796, 'learning_rate': 8.114285714285714e-05, 'epoch': 2.11}
138
- {'loss': 0.3471, 'grad_norm': 1.359926700592041, 'learning_rate': 8.085714285714287e-05, 'epoch': 2.14}
139
- {'loss': 0.3433, 'grad_norm': 0.84107506275177, 'learning_rate': 8.057142857142857e-05, 'epoch': 2.17}
140
- {'loss': 0.3495, 'grad_norm': 1.2953639030456543, 'learning_rate': 8.028571428571428e-05, 'epoch': 2.19}
141
- {'loss': 0.3388, 'grad_norm': 0.9937311410903931, 'learning_rate': 8e-05, 'epoch': 2.22}
142
- 25%|█████████████████████████▎ | 90/360 [01:09<03:26, 1.31it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
143
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
144
- {'eval_loss': 0.3491460978984833, 'eval_runtime': 0.1044, 'eval_samples_per_second': 440.593, 'eval_steps_per_second': 9.578, 'epoch': 2.22}
145
- {'loss': 0.3435, 'grad_norm': 1.0681490898132324, 'learning_rate': 7.971428571428572e-05, 'epoch': 2.25}
146
- {'loss': 0.3333, 'grad_norm': 0.8466928601264954, 'learning_rate': 7.942857142857143e-05, 'epoch': 2.28}
147
- {'loss': 0.3305, 'grad_norm': 0.8183342814445496, 'learning_rate': 7.914285714285715e-05, 'epoch': 2.31}
148
- {'loss': 0.3289, 'grad_norm': 0.833314061164856, 'learning_rate': 7.885714285714286e-05, 'epoch': 2.33}
149
- {'loss': 0.3331, 'grad_norm': 0.8347731828689575, 'learning_rate': 7.857142857142858e-05, 'epoch': 2.36}
150
- {'loss': 0.3437, 'grad_norm': 1.0877679586410522, 'learning_rate': 7.828571428571429e-05, 'epoch': 2.39}
151
- {'loss': 0.3331, 'grad_norm': 0.9570125937461853, 'learning_rate': 7.800000000000001e-05, 'epoch': 2.42}
152
- {'loss': 0.3363, 'grad_norm': 0.7662280797958374, 'learning_rate': 7.771428571428572e-05, 'epoch': 2.44}
153
- {'loss': 0.3305, 'grad_norm': 0.9321999549865723, 'learning_rate': 7.742857142857143e-05, 'epoch': 2.47}
154
- {'loss': 0.3332, 'grad_norm': 0.8284544348716736, 'learning_rate': 7.714285714285715e-05, 'epoch': 2.5}
155
- 28%|███████████████████████████▊ | 100/360 [01:17<03:19, 1.30it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
156
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
157
- {'eval_loss': 0.34498000144958496, 'eval_runtime': 0.1046, 'eval_samples_per_second': 439.848, 'eval_steps_per_second': 9.562, 'epoch': 2.5}
158
- {'loss': 0.3448, 'grad_norm': 1.0568827390670776, 'learning_rate': 7.685714285714286e-05, 'epoch': 2.53}
159
- {'loss': 0.334, 'grad_norm': 0.9136806130409241, 'learning_rate': 7.657142857142857e-05, 'epoch': 2.56}
160
- {'loss': 0.3425, 'grad_norm': 1.2551990747451782, 'learning_rate': 7.62857142857143e-05, 'epoch': 2.58}
161
- {'loss': 0.3265, 'grad_norm': 0.8284862637519836, 'learning_rate': 7.6e-05, 'epoch': 2.61}
162
- {'loss': 0.3267, 'grad_norm': 0.7161554098129272, 'learning_rate': 7.571428571428571e-05, 'epoch': 2.64}
163
- {'loss': 0.3342, 'grad_norm': 0.8050905466079712, 'learning_rate': 7.542857142857144e-05, 'epoch': 2.67}
164
- {'loss': 0.3325, 'grad_norm': 0.7441209554672241, 'learning_rate': 7.514285714285715e-05, 'epoch': 2.69}
165
- {'loss': 0.334, 'grad_norm': 0.591927707195282, 'learning_rate': 7.485714285714285e-05, 'epoch': 2.72}
166
- {'loss': 0.3473, 'grad_norm': 0.8902866244316101, 'learning_rate': 7.457142857142856e-05, 'epoch': 2.75}
167
- {'loss': 0.326, 'grad_norm': 0.6760069131851196, 'learning_rate': 7.428571428571429e-05, 'epoch': 2.78}
168
- 31%|██████████████████████████████▌ | 110/360 [01:24<02:57, 1.41it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
169
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
170
- {'eval_loss': 0.3410107493400574, 'eval_runtime': 0.1043, 'eval_samples_per_second': 441.026, 'eval_steps_per_second': 9.588, 'epoch': 2.78}
171
- {'loss': 0.3348, 'grad_norm': 0.6579345464706421, 'learning_rate': 7.4e-05, 'epoch': 2.81}
172
- {'loss': 0.3253, 'grad_norm': 1.0648415088653564, 'learning_rate': 7.371428571428572e-05, 'epoch': 2.83}
173
- {'loss': 0.3297, 'grad_norm': 0.6868814826011658, 'learning_rate': 7.342857142857144e-05, 'epoch': 2.86}
174
- {'loss': 0.3401, 'grad_norm': 1.1149464845657349, 'learning_rate': 7.314285714285715e-05, 'epoch': 2.89}
175
- {'loss': 0.3348, 'grad_norm': 0.8934164047241211, 'learning_rate': 7.285714285714286e-05, 'epoch': 2.92}
176
- {'loss': 0.3427, 'grad_norm': 1.1119507551193237, 'learning_rate': 7.257142857142858e-05, 'epoch': 2.94}
177
- {'loss': 0.3374, 'grad_norm': 0.8103634715080261, 'learning_rate': 7.228571428571429e-05, 'epoch': 2.97}
178
- {'loss': 0.3395, 'grad_norm': 0.8421126008033752, 'learning_rate': 7.2e-05, 'epoch': 3.0}
179
- {'loss': 0.331, 'grad_norm': 0.8583278656005859, 'learning_rate': 7.171428571428572e-05, 'epoch': 3.03}
180
- {'loss': 0.3355, 'grad_norm': 1.2129111289978027, 'learning_rate': 7.142857142857143e-05, 'epoch': 3.06}
181
- 33%|█████████████████████████████████▎ | 120/360 [01:32<03:02, 1.32it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
182
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
183
- {'eval_loss': 0.332057386636734, 'eval_runtime': 0.1051, 'eval_samples_per_second': 437.685, 'eval_steps_per_second': 9.515, 'epoch': 3.06}
184
- {'loss': 0.3294, 'grad_norm': 0.9463130235671997, 'learning_rate': 7.114285714285714e-05, 'epoch': 3.08}
185
- {'loss': 0.3327, 'grad_norm': 0.9692079424858093, 'learning_rate': 7.085714285714285e-05, 'epoch': 3.11}
186
- {'loss': 0.3295, 'grad_norm': 0.9853659868240356, 'learning_rate': 7.057142857142858e-05, 'epoch': 3.14}
187
- {'loss': 0.3358, 'grad_norm': 0.7222715616226196, 'learning_rate': 7.028571428571428e-05, 'epoch': 3.17}
188
- {'loss': 0.3406, 'grad_norm': 1.1528452634811401, 'learning_rate': 7e-05, 'epoch': 3.19}
189
- {'loss': 0.329, 'grad_norm': 1.0079970359802246, 'learning_rate': 6.971428571428572e-05, 'epoch': 3.22}
190
- {'loss': 0.327, 'grad_norm': 0.7162885665893555, 'learning_rate': 6.942857142857143e-05, 'epoch': 3.25}
191
- {'loss': 0.336, 'grad_norm': 0.9302375912666321, 'learning_rate': 6.914285714285715e-05, 'epoch': 3.28}
192
- {'loss': 0.3356, 'grad_norm': 0.8540468215942383, 'learning_rate': 6.885714285714286e-05, 'epoch': 3.31}
193
- {'loss': 0.3279, 'grad_norm': 0.598040759563446, 'learning_rate': 6.857142857142858e-05, 'epoch': 3.33}
194
- 36%|████████████████████████████████████ | 130/360 [01:40<02:57, 1.30it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
195
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
196
- {'eval_loss': 0.3290035128593445, 'eval_runtime': 0.1049, 'eval_samples_per_second': 438.622, 'eval_steps_per_second': 9.535, 'epoch': 3.33}
197
- {'loss': 0.33, 'grad_norm': 0.6981043815612793, 'learning_rate': 6.828571428571429e-05, 'epoch': 3.36}
198
- {'loss': 0.3262, 'grad_norm': 0.6710860133171082, 'learning_rate': 6.800000000000001e-05, 'epoch': 3.39}
199
- {'loss': 0.3273, 'grad_norm': 0.6621596813201904, 'learning_rate': 6.771428571428572e-05, 'epoch': 3.42}
200
- {'loss': 0.3264, 'grad_norm': 0.8255563974380493, 'learning_rate': 6.742857142857143e-05, 'epoch': 3.44}
201
- {'loss': 0.3157, 'grad_norm': 0.8350001573562622, 'learning_rate': 6.714285714285714e-05, 'epoch': 3.47}
202
- {'loss': 0.3205, 'grad_norm': 0.7275986075401306, 'learning_rate': 6.685714285714286e-05, 'epoch': 3.5}
203
- {'loss': 0.3327, 'grad_norm': 0.652642548084259, 'learning_rate': 6.657142857142857e-05, 'epoch': 3.53}
204
- {'loss': 0.3319, 'grad_norm': 0.9522960186004639, 'learning_rate': 6.628571428571428e-05, 'epoch': 3.56}
205
- {'loss': 0.3292, 'grad_norm': 0.7006963491439819, 'learning_rate': 6.6e-05, 'epoch': 3.58}
206
- {'loss': 0.3246, 'grad_norm': 0.7161970138549805, 'learning_rate': 6.571428571428571e-05, 'epoch': 3.61}
207
- 39%|██████████████████████████████████████▉ | 140/360 [01:48<02:51, 1.28it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
208
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
209
- {'eval_loss': 0.3442412316799164, 'eval_runtime': 0.1052, 'eval_samples_per_second': 437.452, 'eval_steps_per_second': 9.51, 'epoch': 3.61}
210
- {'loss': 0.3289, 'grad_norm': 1.0642709732055664, 'learning_rate': 6.542857142857142e-05, 'epoch': 3.64}
211
- {'loss': 0.3234, 'grad_norm': 0.7999193072319031, 'learning_rate': 6.514285714285715e-05, 'epoch': 3.67}
212
- {'loss': 0.3297, 'grad_norm': 0.8324876427650452, 'learning_rate': 6.485714285714286e-05, 'epoch': 3.69}
213
- {'loss': 0.3125, 'grad_norm': 0.561801552772522, 'learning_rate': 6.457142857142856e-05, 'epoch': 3.72}
214
- {'loss': 0.3234, 'grad_norm': 0.6995918154716492, 'learning_rate': 6.428571428571429e-05, 'epoch': 3.75}
215
- {'loss': 0.3256, 'grad_norm': 0.6314477920532227, 'learning_rate': 6.400000000000001e-05, 'epoch': 3.78}
216
- {'loss': 0.3315, 'grad_norm': 0.9092559814453125, 'learning_rate': 6.371428571428572e-05, 'epoch': 3.81}
217
- {'loss': 0.3241, 'grad_norm': 0.7306588292121887, 'learning_rate': 6.342857142857143e-05, 'epoch': 3.83}
218
- {'loss': 0.3323, 'grad_norm': 0.7943991422653198, 'learning_rate': 6.314285714285715e-05, 'epoch': 3.86}
219
- {'loss': 0.3273, 'grad_norm': 0.8375313878059387, 'learning_rate': 6.285714285714286e-05, 'epoch': 3.89}
220
- 42%|█████████████████████████████████████████▋ | 150/360 [01:55<02:38, 1.33it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
221
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
222
- {'eval_loss': 0.33936312794685364, 'eval_runtime': 0.1048, 'eval_samples_per_second': 438.851, 'eval_steps_per_second': 9.54, 'epoch': 3.89}
223
- {'loss': 0.3258, 'grad_norm': 0.9479944705963135, 'learning_rate': 6.257142857142857e-05, 'epoch': 3.92}
224
- {'loss': 0.3228, 'grad_norm': 0.8155922889709473, 'learning_rate': 6.22857142857143e-05, 'epoch': 3.94}
225
- {'loss': 0.3217, 'grad_norm': 0.8617050647735596, 'learning_rate': 6.2e-05, 'epoch': 3.97}
226
- {'loss': 0.3309, 'grad_norm': 1.2106715440750122, 'learning_rate': 6.171428571428571e-05, 'epoch': 4.0}
227
- {'loss': 0.3265, 'grad_norm': 0.8097350001335144, 'learning_rate': 6.142857142857143e-05, 'epoch': 4.03}
228
- {'loss': 0.3222, 'grad_norm': 0.651019811630249, 'learning_rate': 6.114285714285714e-05, 'epoch': 4.06}
229
- {'loss': 0.3245, 'grad_norm': 0.8858047127723694, 'learning_rate': 6.085714285714286e-05, 'epoch': 4.08}
230
- {'loss': 0.3153, 'grad_norm': 0.8602396845817566, 'learning_rate': 6.0571428571428576e-05, 'epoch': 4.11}
231
- {'loss': 0.313, 'grad_norm': 0.615274965763092, 'learning_rate': 6.028571428571429e-05, 'epoch': 4.14}
232
- {'loss': 0.3275, 'grad_norm': 0.9199692010879517, 'learning_rate': 6e-05, 'epoch': 4.17}
233
- 44%|████████████████████████████████████████████▍ | 160/360 [02:03<02:34, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
234
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
235
- {'eval_loss': 0.34145644307136536, 'eval_runtime': 0.1056, 'eval_samples_per_second': 435.469, 'eval_steps_per_second': 9.467, 'epoch': 4.17}
236
- {'loss': 0.3287, 'grad_norm': 0.9348899722099304, 'learning_rate': 5.9714285714285724e-05, 'epoch': 4.19}
237
- {'loss': 0.3203, 'grad_norm': 0.7250774502754211, 'learning_rate': 5.9428571428571434e-05, 'epoch': 4.22}
238
- {'loss': 0.3169, 'grad_norm': 0.7376280426979065, 'learning_rate': 5.914285714285714e-05, 'epoch': 4.25}
239
- {'loss': 0.3215, 'grad_norm': 0.6010245680809021, 'learning_rate': 5.885714285714285e-05, 'epoch': 4.28}
240
- {'loss': 0.317, 'grad_norm': 0.7241640686988831, 'learning_rate': 5.8571428571428575e-05, 'epoch': 4.31}
241
- {'loss': 0.3217, 'grad_norm': 0.6956952810287476, 'learning_rate': 5.828571428571429e-05, 'epoch': 4.33}
242
- {'loss': 0.322, 'grad_norm': 0.8463672995567322, 'learning_rate': 5.8e-05, 'epoch': 4.36}
243
- {'loss': 0.3129, 'grad_norm': 0.5538536906242371, 'learning_rate': 5.771428571428572e-05, 'epoch': 4.39}
244
- {'loss': 0.3275, 'grad_norm': 0.8398566246032715, 'learning_rate': 5.742857142857143e-05, 'epoch': 4.42}
245
- {'loss': 0.3225, 'grad_norm': 0.5335714221000671, 'learning_rate': 5.714285714285714e-05, 'epoch': 4.44}
246
- 47%|███████████████████████████████████████████████▏ | 170/360 [02:11<02:27, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
247
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
248
- {'eval_loss': 0.3419412672519684, 'eval_runtime': 0.1054, 'eval_samples_per_second': 436.373, 'eval_steps_per_second': 9.486, 'epoch': 4.44}
249
- {'loss': 0.3201, 'grad_norm': 0.893516480922699, 'learning_rate': 5.6857142857142865e-05, 'epoch': 4.47}
250
- {'loss': 0.3246, 'grad_norm': 0.7950851321220398, 'learning_rate': 5.6571428571428574e-05, 'epoch': 4.5}
251
- {'loss': 0.3138, 'grad_norm': 0.6274649500846863, 'learning_rate': 5.628571428571428e-05, 'epoch': 4.53}
252
- {'loss': 0.3225, 'grad_norm': 0.6270793676376343, 'learning_rate': 5.6000000000000006e-05, 'epoch': 4.56}
253
- {'loss': 0.3222, 'grad_norm': 0.6661616563796997, 'learning_rate': 5.571428571428572e-05, 'epoch': 4.58}
254
- {'loss': 0.3138, 'grad_norm': 0.6097862124443054, 'learning_rate': 5.542857142857143e-05, 'epoch': 4.61}
255
- {'loss': 0.3109, 'grad_norm': 0.6743194460868835, 'learning_rate': 5.514285714285714e-05, 'epoch': 4.64}
256
- {'loss': 0.3193, 'grad_norm': 0.6684880256652832, 'learning_rate': 5.485714285714286e-05, 'epoch': 4.67}
257
- {'loss': 0.3212, 'grad_norm': 0.7434603571891785, 'learning_rate': 5.457142857142857e-05, 'epoch': 4.69}
258
- {'loss': 0.323, 'grad_norm': 0.8257206082344055, 'learning_rate': 5.428571428571428e-05, 'epoch': 4.72}
259
- 50%|██████████████████████████████████████████████████ | 180/360 [02:18<01:58, 1.52it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
260
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
261
- {'eval_loss': 0.335285484790802, 'eval_runtime': 0.1056, 'eval_samples_per_second': 435.635, 'eval_steps_per_second': 9.47, 'epoch': 4.72}
262
- {'loss': 0.3249, 'grad_norm': 0.5611714720726013, 'learning_rate': 5.4000000000000005e-05, 'epoch': 4.75}
263
- {'loss': 0.3213, 'grad_norm': 0.6146838068962097, 'learning_rate': 5.3714285714285714e-05, 'epoch': 4.78}
264
- {'loss': 0.3239, 'grad_norm': 0.6939036846160889, 'learning_rate': 5.342857142857143e-05, 'epoch': 4.81}
265
- {'loss': 0.3234, 'grad_norm': 0.7213876843452454, 'learning_rate': 5.314285714285715e-05, 'epoch': 4.83}
266
- {'loss': 0.3186, 'grad_norm': 0.6637408137321472, 'learning_rate': 5.285714285714286e-05, 'epoch': 4.86}
267
- {'loss': 0.3184, 'grad_norm': 0.6469508409500122, 'learning_rate': 5.257142857142857e-05, 'epoch': 4.89}
268
- {'loss': 0.3171, 'grad_norm': 0.6262702941894531, 'learning_rate': 5.2285714285714294e-05, 'epoch': 4.92}
269
- {'loss': 0.3271, 'grad_norm': 0.6692309379577637, 'learning_rate': 5.2000000000000004e-05, 'epoch': 4.94}
270
- {'loss': 0.3229, 'grad_norm': 0.611004114151001, 'learning_rate': 5.171428571428571e-05, 'epoch': 4.97}
271
- {'loss': 0.3223, 'grad_norm': 0.9707463383674622, 'learning_rate': 5.142857142857143e-05, 'epoch': 5.0}
272
- 53%|████████████████████████████████████████████████████▊ | 190/360 [02:26<02:11, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
273
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
274
- {'eval_loss': 0.33622071146965027, 'eval_runtime': 0.105, 'eval_samples_per_second': 437.997, 'eval_steps_per_second': 9.522, 'epoch': 5.0}
275
- {'loss': 0.3106, 'grad_norm': 0.43101295828819275, 'learning_rate': 5.1142857142857145e-05, 'epoch': 5.03}
276
- {'loss': 0.3139, 'grad_norm': 0.7981957793235779, 'learning_rate': 5.085714285714286e-05, 'epoch': 5.06}
277
- {'loss': 0.3137, 'grad_norm': 0.9149967432022095, 'learning_rate': 5.057142857142857e-05, 'epoch': 5.08}
278
- {'loss': 0.3185, 'grad_norm': 0.8689376711845398, 'learning_rate': 5.028571428571429e-05, 'epoch': 5.11}
279
- {'loss': 0.3195, 'grad_norm': 0.6829914450645447, 'learning_rate': 5e-05, 'epoch': 5.14}
280
- {'loss': 0.3139, 'grad_norm': 0.6187098026275635, 'learning_rate': 4.971428571428572e-05, 'epoch': 5.17}
281
- {'loss': 0.3147, 'grad_norm': 0.8703141212463379, 'learning_rate': 4.942857142857143e-05, 'epoch': 5.19}
282
- {'loss': 0.3162, 'grad_norm': 0.6344360709190369, 'learning_rate': 4.9142857142857144e-05, 'epoch': 5.22}
283
- {'loss': 0.3228, 'grad_norm': 0.7499691843986511, 'learning_rate': 4.885714285714286e-05, 'epoch': 5.25}
284
- {'loss': 0.3152, 'grad_norm': 0.7664843201637268, 'learning_rate': 4.8571428571428576e-05, 'epoch': 5.28}
285
- 56%|███████████████████████████████████████████████████████▌ | 200/360 [02:34<02:03, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
286
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
287
- {'eval_loss': 0.3364305794239044, 'eval_runtime': 0.1056, 'eval_samples_per_second': 435.778, 'eval_steps_per_second': 9.473, 'epoch': 5.28}
288
- {'loss': 0.3154, 'grad_norm': 0.6158504486083984, 'learning_rate': 4.828571428571429e-05, 'epoch': 5.31}
289
- {'loss': 0.3206, 'grad_norm': 0.8614490032196045, 'learning_rate': 4.8e-05, 'epoch': 5.33}
290
- {'loss': 0.3159, 'grad_norm': 0.7699540257453918, 'learning_rate': 4.771428571428572e-05, 'epoch': 5.36}
291
- {'loss': 0.3186, 'grad_norm': 0.9598901867866516, 'learning_rate': 4.742857142857143e-05, 'epoch': 5.39}
292
- {'loss': 0.3118, 'grad_norm': 0.855253279209137, 'learning_rate': 4.714285714285714e-05, 'epoch': 5.42}
293
- {'loss': 0.3178, 'grad_norm': 0.6478847861289978, 'learning_rate': 4.685714285714286e-05, 'epoch': 5.44}
294
- {'loss': 0.3236, 'grad_norm': 0.8028067946434021, 'learning_rate': 4.6571428571428575e-05, 'epoch': 5.47}
295
- {'loss': 0.3147, 'grad_norm': 0.7795782089233398, 'learning_rate': 4.628571428571429e-05, 'epoch': 5.5}
296
- {'loss': 0.3221, 'grad_norm': 0.7845653891563416, 'learning_rate': 4.600000000000001e-05, 'epoch': 5.53}
297
- {'loss': 0.321, 'grad_norm': 1.1422370672225952, 'learning_rate': 4.5714285714285716e-05, 'epoch': 5.56}
298
- 58%|██████████████████████████████████████████████████████████▎ | 210/360 [02:42<01:55, 1.30it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
299
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
300
- {'eval_loss': 0.33535492420196533, 'eval_runtime': 0.1051, 'eval_samples_per_second': 437.57, 'eval_steps_per_second': 9.512, 'epoch': 5.56}
301
- {'loss': 0.3183, 'grad_norm': 0.7386415600776672, 'learning_rate': 4.542857142857143e-05, 'epoch': 5.58}
302
- {'loss': 0.3205, 'grad_norm': 0.6756716966629028, 'learning_rate': 4.514285714285714e-05, 'epoch': 5.61}
303
- {'loss': 0.3195, 'grad_norm': 0.7116839289665222, 'learning_rate': 4.485714285714286e-05, 'epoch': 5.64}
304
- {'loss': 0.3248, 'grad_norm': 0.7919530272483826, 'learning_rate': 4.4571428571428574e-05, 'epoch': 5.67}
305
- {'loss': 0.3124, 'grad_norm': 0.5152342319488525, 'learning_rate': 4.428571428571428e-05, 'epoch': 5.69}
306
- {'loss': 0.3205, 'grad_norm': 0.9519732594490051, 'learning_rate': 4.4000000000000006e-05, 'epoch': 5.72}
307
- {'loss': 0.3188, 'grad_norm': 0.7759018540382385, 'learning_rate': 4.371428571428572e-05, 'epoch': 5.75}
308
- {'loss': 0.3195, 'grad_norm': 0.931468665599823, 'learning_rate': 4.342857142857143e-05, 'epoch': 5.78}
309
- {'loss': 0.3256, 'grad_norm': 1.0431036949157715, 'learning_rate': 4.314285714285715e-05, 'epoch': 5.81}
310
- {'loss': 0.3189, 'grad_norm': 0.6952974200248718, 'learning_rate': 4.2857142857142856e-05, 'epoch': 5.83}
311
- 61%|█████████████████████████████████████████████████████████████ | 220/360 [02:49<01:45, 1.33it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
312
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
313
- {'eval_loss': 0.33737656474113464, 'eval_runtime': 0.1056, 'eval_samples_per_second': 435.436, 'eval_steps_per_second': 9.466, 'epoch': 5.83}
314
- {'loss': 0.3159, 'grad_norm': 0.880944013595581, 'learning_rate': 4.257142857142857e-05, 'epoch': 5.86}
315
- {'loss': 0.3177, 'grad_norm': 0.6606886982917786, 'learning_rate': 4.228571428571429e-05, 'epoch': 5.89}
316
- {'loss': 0.317, 'grad_norm': 0.7617785930633545, 'learning_rate': 4.2e-05, 'epoch': 5.92}
317
- {'loss': 0.313, 'grad_norm': 0.6381199955940247, 'learning_rate': 4.1714285714285714e-05, 'epoch': 5.94}
318
- {'loss': 0.3184, 'grad_norm': 0.6644593477249146, 'learning_rate': 4.1428571428571437e-05, 'epoch': 5.97}
319
- {'loss': 0.3103, 'grad_norm': 0.9742204546928406, 'learning_rate': 4.1142857142857146e-05, 'epoch': 6.0}
320
- {'loss': 0.311, 'grad_norm': 0.6401204466819763, 'learning_rate': 4.085714285714286e-05, 'epoch': 6.03}
321
- {'loss': 0.3155, 'grad_norm': 0.785503089427948, 'learning_rate': 4.057142857142857e-05, 'epoch': 6.06}
322
- {'loss': 0.3109, 'grad_norm': 0.5417785048484802, 'learning_rate': 4.028571428571429e-05, 'epoch': 6.08}
323
- {'loss': 0.3114, 'grad_norm': 0.6186631321907043, 'learning_rate': 4e-05, 'epoch': 6.11}
324
- 64%|███████████████████████████████████████████████████████████████▉ | 230/360 [02:57<01:40, 1.30it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
325
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
326
- {'eval_loss': 0.32762280106544495, 'eval_runtime': 0.1058, 'eval_samples_per_second': 434.862, 'eval_steps_per_second': 9.454, 'epoch': 6.11}
327
- {'loss': 0.3099, 'grad_norm': 0.6042884588241577, 'learning_rate': 3.971428571428571e-05, 'epoch': 6.14}
328
- {'loss': 0.3104, 'grad_norm': 0.5823580026626587, 'learning_rate': 3.942857142857143e-05, 'epoch': 6.17}
329
- {'loss': 0.3154, 'grad_norm': 0.6572258472442627, 'learning_rate': 3.9142857142857145e-05, 'epoch': 6.19}
330
- {'loss': 0.3121, 'grad_norm': 0.565834641456604, 'learning_rate': 3.885714285714286e-05, 'epoch': 6.22}
331
- {'loss': 0.3119, 'grad_norm': 0.7184673547744751, 'learning_rate': 3.857142857142858e-05, 'epoch': 6.25}
332
- {'loss': 0.3158, 'grad_norm': 0.7170347571372986, 'learning_rate': 3.8285714285714286e-05, 'epoch': 6.28}
333
- {'loss': 0.3038, 'grad_norm': 0.6102560758590698, 'learning_rate': 3.8e-05, 'epoch': 6.31}
334
- {'loss': 0.3124, 'grad_norm': 0.7612823843955994, 'learning_rate': 3.771428571428572e-05, 'epoch': 6.33}
335
- {'loss': 0.3028, 'grad_norm': 0.6277872920036316, 'learning_rate': 3.742857142857143e-05, 'epoch': 6.36}
336
- {'loss': 0.3202, 'grad_norm': 0.7007192373275757, 'learning_rate': 3.7142857142857143e-05, 'epoch': 6.39}
337
- 67%|██████████████████████████████████████████████████████████████████▋ | 240/360 [03:05<01:33, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
338
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
339
- {'eval_loss': 0.33139991760253906, 'eval_runtime': 0.1062, 'eval_samples_per_second': 433.311, 'eval_steps_per_second': 9.42, 'epoch': 6.39}
340
- {'loss': 0.313, 'grad_norm': 0.6396629810333252, 'learning_rate': 3.685714285714286e-05, 'epoch': 6.42}
341
- {'loss': 0.3116, 'grad_norm': 0.5031012892723083, 'learning_rate': 3.6571428571428576e-05, 'epoch': 6.44}
342
- {'loss': 0.3172, 'grad_norm': 0.7323219776153564, 'learning_rate': 3.628571428571429e-05, 'epoch': 6.47}
343
- {'loss': 0.3103, 'grad_norm': 0.9094661474227905, 'learning_rate': 3.6e-05, 'epoch': 6.5}
344
- {'loss': 0.3056, 'grad_norm': 0.5560885667800903, 'learning_rate': 3.571428571428572e-05, 'epoch': 6.53}
345
- {'loss': 0.3096, 'grad_norm': 1.0145907402038574, 'learning_rate': 3.5428571428571426e-05, 'epoch': 6.56}
346
- {'loss': 0.3049, 'grad_norm': 0.8287002444267273, 'learning_rate': 3.514285714285714e-05, 'epoch': 6.58}
347
- {'loss': 0.3047, 'grad_norm': 0.5207920074462891, 'learning_rate': 3.485714285714286e-05, 'epoch': 6.61}
348
- {'loss': 0.3156, 'grad_norm': 1.065272331237793, 'learning_rate': 3.4571428571428574e-05, 'epoch': 6.64}
349
- {'loss': 0.3055, 'grad_norm': 0.6712301969528198, 'learning_rate': 3.428571428571429e-05, 'epoch': 6.67}
350
- 69%|█████████████████████████████████████████████████████████████████████▍ | 250/360 [03:13<01:25, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
351
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
352
- {'eval_loss': 0.3387901484966278, 'eval_runtime': 0.1053, 'eval_samples_per_second': 436.76, 'eval_steps_per_second': 9.495, 'epoch': 6.67}
353
- {'loss': 0.3164, 'grad_norm': 0.9477025866508484, 'learning_rate': 3.4000000000000007e-05, 'epoch': 6.69}
354
- {'loss': 0.3263, 'grad_norm': 1.179295301437378, 'learning_rate': 3.3714285714285716e-05, 'epoch': 6.72}
355
- {'loss': 0.3136, 'grad_norm': 0.6969395875930786, 'learning_rate': 3.342857142857143e-05, 'epoch': 6.75}
356
- {'loss': 0.3143, 'grad_norm': 0.7752478718757629, 'learning_rate': 3.314285714285714e-05, 'epoch': 6.78}
357
- {'loss': 0.3143, 'grad_norm': 0.7640174031257629, 'learning_rate': 3.285714285714286e-05, 'epoch': 6.81}
358
- {'loss': 0.3093, 'grad_norm': 0.9904161691665649, 'learning_rate': 3.257142857142857e-05, 'epoch': 6.83}
359
- {'loss': 0.3185, 'grad_norm': 0.7707158923149109, 'learning_rate': 3.228571428571428e-05, 'epoch': 6.86}
360
- {'loss': 0.3122, 'grad_norm': 0.8660508394241333, 'learning_rate': 3.2000000000000005e-05, 'epoch': 6.89}
361
- {'loss': 0.3148, 'grad_norm': 0.7438889741897583, 'learning_rate': 3.1714285714285715e-05, 'epoch': 6.92}
362
- {'loss': 0.3137, 'grad_norm': 0.5746331810951233, 'learning_rate': 3.142857142857143e-05, 'epoch': 6.94}
363
- 72%|████████████████████████████████████████████████████████████████████████▏ | 260/360 [03:20<01:17, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
364
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
365
- {'eval_loss': 0.32808226346969604, 'eval_runtime': 0.1059, 'eval_samples_per_second': 434.438, 'eval_steps_per_second': 9.444, 'epoch': 6.94}
366
- {'loss': 0.3125, 'grad_norm': 0.7158817052841187, 'learning_rate': 3.114285714285715e-05, 'epoch': 6.97}
367
- {'loss': 0.3094, 'grad_norm': 0.8010092973709106, 'learning_rate': 3.0857142857142856e-05, 'epoch': 7.0}
368
- {'loss': 0.3067, 'grad_norm': 0.7418866157531738, 'learning_rate': 3.057142857142857e-05, 'epoch': 7.03}
369
- {'loss': 0.3068, 'grad_norm': 0.6731083989143372, 'learning_rate': 3.0285714285714288e-05, 'epoch': 7.06}
370
- {'loss': 0.3075, 'grad_norm': 0.6405408382415771, 'learning_rate': 3e-05, 'epoch': 7.08}
371
- {'loss': 0.3096, 'grad_norm': 0.6403458118438721, 'learning_rate': 2.9714285714285717e-05, 'epoch': 7.11}
372
- {'loss': 0.3103, 'grad_norm': 0.7583682537078857, 'learning_rate': 2.9428571428571426e-05, 'epoch': 7.14}
373
- {'loss': 0.3064, 'grad_norm': 0.8137710094451904, 'learning_rate': 2.9142857142857146e-05, 'epoch': 7.17}
374
- {'loss': 0.3067, 'grad_norm': 0.7179896235466003, 'learning_rate': 2.885714285714286e-05, 'epoch': 7.19}
375
- {'loss': 0.3081, 'grad_norm': 0.9344987273216248, 'learning_rate': 2.857142857142857e-05, 'epoch': 7.22}
376
- 75%|███████████████████████████████████████████████████████████████████████████ | 270/360 [03:28<01:10, 1.28it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
377
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
378
- {'eval_loss': 0.33135512471199036, 'eval_runtime': 0.1054, 'eval_samples_per_second': 436.494, 'eval_steps_per_second': 9.489, 'epoch': 7.22}
379
- {'loss': 0.3024, 'grad_norm': 0.7846106886863708, 'learning_rate': 2.8285714285714287e-05, 'epoch': 7.25}
380
- {'loss': 0.3062, 'grad_norm': 0.5884716510772705, 'learning_rate': 2.8000000000000003e-05, 'epoch': 7.28}
381
- {'loss': 0.3017, 'grad_norm': 0.7277001738548279, 'learning_rate': 2.7714285714285716e-05, 'epoch': 7.31}
382
- {'loss': 0.3109, 'grad_norm': 0.6671104431152344, 'learning_rate': 2.742857142857143e-05, 'epoch': 7.33}
383
- {'loss': 0.3051, 'grad_norm': 0.6468051671981812, 'learning_rate': 2.714285714285714e-05, 'epoch': 7.36}
384
- {'loss': 0.3059, 'grad_norm': 0.7413132190704346, 'learning_rate': 2.6857142857142857e-05, 'epoch': 7.39}
385
- {'loss': 0.3108, 'grad_norm': 0.8842555284500122, 'learning_rate': 2.6571428571428576e-05, 'epoch': 7.42}
386
- {'loss': 0.31, 'grad_norm': 0.7701683044433594, 'learning_rate': 2.6285714285714286e-05, 'epoch': 7.44}
387
- {'loss': 0.2966, 'grad_norm': 0.6261523962020874, 'learning_rate': 2.6000000000000002e-05, 'epoch': 7.47}
388
- {'loss': 0.3063, 'grad_norm': 0.6180337071418762, 'learning_rate': 2.5714285714285714e-05, 'epoch': 7.5}
389
- 78%|█████████████████████████████████████████████████████████████████████████████▊ | 280/360 [03:36<01:00, 1.32it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
390
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
391
- {'eval_loss': 0.33175382018089294, 'eval_runtime': 0.1062, 'eval_samples_per_second': 433.162, 'eval_steps_per_second': 9.417, 'epoch': 7.5}
392
- {'loss': 0.3058, 'grad_norm': 0.910743236541748, 'learning_rate': 2.542857142857143e-05, 'epoch': 7.53}
393
- {'loss': 0.3092, 'grad_norm': 0.8947623372077942, 'learning_rate': 2.5142857142857147e-05, 'epoch': 7.56}
394
- {'loss': 0.3069, 'grad_norm': 0.8466401100158691, 'learning_rate': 2.485714285714286e-05, 'epoch': 7.58}
395
- {'loss': 0.316, 'grad_norm': 0.7808226943016052, 'learning_rate': 2.4571428571428572e-05, 'epoch': 7.61}
396
- {'loss': 0.2993, 'grad_norm': 0.6704230308532715, 'learning_rate': 2.4285714285714288e-05, 'epoch': 7.64}
397
- {'loss': 0.3034, 'grad_norm': 0.7090719938278198, 'learning_rate': 2.4e-05, 'epoch': 7.67}
398
- {'loss': 0.3077, 'grad_norm': 0.7552341818809509, 'learning_rate': 2.3714285714285717e-05, 'epoch': 7.69}
399
- {'loss': 0.3047, 'grad_norm': 0.7747870683670044, 'learning_rate': 2.342857142857143e-05, 'epoch': 7.72}
400
- {'loss': 0.3105, 'grad_norm': 1.1127567291259766, 'learning_rate': 2.3142857142857145e-05, 'epoch': 7.75}
401
- {'loss': 0.2997, 'grad_norm': 0.7083399891853333, 'learning_rate': 2.2857142857142858e-05, 'epoch': 7.78}
402
- 81%|████████████████████████████████████████████████████████████████████████████████▌ | 290/360 [03:43<00:50, 1.40it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
403
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
404
- {'eval_loss': 0.3296756446361542, 'eval_runtime': 0.1057, 'eval_samples_per_second': 435.219, 'eval_steps_per_second': 9.461, 'epoch': 7.78}
405
- {'loss': 0.3035, 'grad_norm': 0.5560160279273987, 'learning_rate': 2.257142857142857e-05, 'epoch': 7.81}
406
- {'loss': 0.3066, 'grad_norm': 0.7277750372886658, 'learning_rate': 2.2285714285714287e-05, 'epoch': 7.83}
407
- {'loss': 0.3049, 'grad_norm': 0.683189868927002, 'learning_rate': 2.2000000000000003e-05, 'epoch': 7.86}
408
- {'loss': 0.3098, 'grad_norm': 0.7173867225646973, 'learning_rate': 2.1714285714285715e-05, 'epoch': 7.89}
409
- {'loss': 0.3081, 'grad_norm': 0.698379397392273, 'learning_rate': 2.1428571428571428e-05, 'epoch': 7.92}
410
- {'loss': 0.3109, 'grad_norm': 0.5789396166801453, 'learning_rate': 2.1142857142857144e-05, 'epoch': 7.94}
411
- {'loss': 0.3149, 'grad_norm': 0.8007158637046814, 'learning_rate': 2.0857142857142857e-05, 'epoch': 7.97}
412
- {'loss': 0.3087, 'grad_norm': 0.8773916959762573, 'learning_rate': 2.0571428571428573e-05, 'epoch': 8.0}
413
- {'loss': 0.2989, 'grad_norm': 0.7212807536125183, 'learning_rate': 2.0285714285714286e-05, 'epoch': 8.03}
414
- {'loss': 0.3069, 'grad_norm': 0.6096076369285583, 'learning_rate': 2e-05, 'epoch': 8.06}
415
- 83%|███████████████████████████████████████████████████████████████████████████████████▎ | 300/360 [03:51<00:46, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
416
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
417
- {'eval_loss': 0.33069342374801636, 'eval_runtime': 0.105, 'eval_samples_per_second': 438.062, 'eval_steps_per_second': 9.523, 'epoch': 8.06}
418
- {'loss': 0.295, 'grad_norm': 0.5173125267028809, 'learning_rate': 1.9714285714285714e-05, 'epoch': 8.08}
419
- {'loss': 0.3014, 'grad_norm': 0.6913369297981262, 'learning_rate': 1.942857142857143e-05, 'epoch': 8.11}
420
- {'loss': 0.3037, 'grad_norm': 0.7195921540260315, 'learning_rate': 1.9142857142857143e-05, 'epoch': 8.14}
421
- {'loss': 0.3031, 'grad_norm': 0.6366473436355591, 'learning_rate': 1.885714285714286e-05, 'epoch': 8.17}
422
- {'loss': 0.2957, 'grad_norm': 0.5457173585891724, 'learning_rate': 1.8571428571428572e-05, 'epoch': 8.19}
423
- {'loss': 0.2997, 'grad_norm': 0.6149912476539612, 'learning_rate': 1.8285714285714288e-05, 'epoch': 8.22}
424
- {'loss': 0.3048, 'grad_norm': 0.5352884531021118, 'learning_rate': 1.8e-05, 'epoch': 8.25}
425
- {'loss': 0.308, 'grad_norm': 0.6278409361839294, 'learning_rate': 1.7714285714285713e-05, 'epoch': 8.28}
426
- {'loss': 0.3005, 'grad_norm': 0.5881698727607727, 'learning_rate': 1.742857142857143e-05, 'epoch': 8.31}
427
- {'loss': 0.302, 'grad_norm': 0.6125136613845825, 'learning_rate': 1.7142857142857145e-05, 'epoch': 8.33}
428
- 86%|██████████████████████████████████████████████████████████████████████████████████████ | 310/360 [03:59<00:38, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
429
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
430
- {'eval_loss': 0.3336547017097473, 'eval_runtime': 0.1055, 'eval_samples_per_second': 436.2, 'eval_steps_per_second': 9.483, 'epoch': 8.33}
431
- {'loss': 0.3012, 'grad_norm': 0.6722866892814636, 'learning_rate': 1.6857142857142858e-05, 'epoch': 8.36}
432
- {'loss': 0.297, 'grad_norm': 0.6827422976493835, 'learning_rate': 1.657142857142857e-05, 'epoch': 8.39}
433
- {'loss': 0.2977, 'grad_norm': 0.7612675428390503, 'learning_rate': 1.6285714285714287e-05, 'epoch': 8.42}
434
- {'loss': 0.3009, 'grad_norm': 0.5952971577644348, 'learning_rate': 1.6000000000000003e-05, 'epoch': 8.44}
435
- {'loss': 0.3043, 'grad_norm': 0.8323265314102173, 'learning_rate': 1.5714285714285715e-05, 'epoch': 8.47}
436
- {'loss': 0.2956, 'grad_norm': 0.8321357369422913, 'learning_rate': 1.5428571428571428e-05, 'epoch': 8.5}
437
- {'loss': 0.3029, 'grad_norm': 0.6457182168960571, 'learning_rate': 1.5142857142857144e-05, 'epoch': 8.53}
438
- {'loss': 0.2972, 'grad_norm': 0.5753086805343628, 'learning_rate': 1.4857142857142858e-05, 'epoch': 8.56}
439
- {'loss': 0.2966, 'grad_norm': 0.8767444491386414, 'learning_rate': 1.4571428571428573e-05, 'epoch': 8.58}
440
- {'loss': 0.3049, 'grad_norm': 0.929669201374054, 'learning_rate': 1.4285714285714285e-05, 'epoch': 8.61}
441
- 89%|████████████████████████████████████████████████████████████████████████████████████████▉ | 320/360 [04:07<00:31, 1.29it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
442
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
443
- {'eval_loss': 0.3303754925727844, 'eval_runtime': 0.1057, 'eval_samples_per_second': 435.106, 'eval_steps_per_second': 9.459, 'epoch': 8.61}
444
- {'loss': 0.2989, 'grad_norm': 0.7576697468757629, 'learning_rate': 1.4000000000000001e-05, 'epoch': 8.64}
445
- {'loss': 0.3051, 'grad_norm': 0.6402246952056885, 'learning_rate': 1.3714285714285716e-05, 'epoch': 8.67}
446
- {'loss': 0.2974, 'grad_norm': 0.5665248036384583, 'learning_rate': 1.3428571428571429e-05, 'epoch': 8.69}
447
- {'loss': 0.3061, 'grad_norm': 0.9747456312179565, 'learning_rate': 1.3142857142857143e-05, 'epoch': 8.72}
448
- {'loss': 0.3012, 'grad_norm': 0.657123863697052, 'learning_rate': 1.2857142857142857e-05, 'epoch': 8.75}
449
- {'loss': 0.2995, 'grad_norm': 0.7186892032623291, 'learning_rate': 1.2571428571428573e-05, 'epoch': 8.78}
450
- {'loss': 0.3026, 'grad_norm': 0.6889364123344421, 'learning_rate': 1.2285714285714286e-05, 'epoch': 8.81}
451
- {'loss': 0.3009, 'grad_norm': 0.6299145817756653, 'learning_rate': 1.2e-05, 'epoch': 8.83}
452
- {'loss': 0.3002, 'grad_norm': 0.7328559756278992, 'learning_rate': 1.1714285714285715e-05, 'epoch': 8.86}
453
- {'loss': 0.2953, 'grad_norm': 0.6111913919448853, 'learning_rate': 1.1428571428571429e-05, 'epoch': 8.89}
454
- 92%|███████████████████████████████████████████████████████████████████████████████████████████▋ | 330/360 [04:14<00:22, 1.31it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
455
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
456
- {'eval_loss': 0.3253832757472992, 'eval_runtime': 0.106, 'eval_samples_per_second': 434.082, 'eval_steps_per_second': 9.437, 'epoch': 8.89}
457
- {'loss': 0.3023, 'grad_norm': 0.6739629507064819, 'learning_rate': 1.1142857142857143e-05, 'epoch': 8.92}
458
- {'loss': 0.2978, 'grad_norm': 0.6967675685882568, 'learning_rate': 1.0857142857142858e-05, 'epoch': 8.94}
459
- {'loss': 0.3043, 'grad_norm': 0.702989935874939, 'learning_rate': 1.0571428571428572e-05, 'epoch': 8.97}
460
- {'loss': 0.3034, 'grad_norm': 1.156525731086731, 'learning_rate': 1.0285714285714286e-05, 'epoch': 9.0}
461
- {'loss': 0.2989, 'grad_norm': 0.564460277557373, 'learning_rate': 1e-05, 'epoch': 9.03}
462
- {'loss': 0.2955, 'grad_norm': 0.5435044169425964, 'learning_rate': 9.714285714285715e-06, 'epoch': 9.06}
463
- {'loss': 0.2986, 'grad_norm': 0.511762797832489, 'learning_rate': 9.42857142857143e-06, 'epoch': 9.08}
464
- {'loss': 0.2932, 'grad_norm': 0.6208844780921936, 'learning_rate': 9.142857142857144e-06, 'epoch': 9.11}
465
- {'loss': 0.2932, 'grad_norm': 0.5209355354309082, 'learning_rate': 8.857142857142857e-06, 'epoch': 9.14}
466
- {'loss': 0.302, 'grad_norm': 0.5852081775665283, 'learning_rate': 8.571428571428573e-06, 'epoch': 9.17}
467
- 94%|██████████████████████████████████████████████████████████████████████████████████████████████▍ | 340/360 [04:22<00:15, 1.28it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
468
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
469
- {'eval_loss': 0.32703322172164917, 'eval_runtime': 0.1055, 'eval_samples_per_second': 435.944, 'eval_steps_per_second': 9.477, 'epoch': 9.17}
470
- {'loss': 0.2973, 'grad_norm': 0.6613155603408813, 'learning_rate': 8.285714285714285e-06, 'epoch': 9.19}
471
- {'loss': 0.296, 'grad_norm': 0.6458805203437805, 'learning_rate': 8.000000000000001e-06, 'epoch': 9.22}
472
- {'loss': 0.2998, 'grad_norm': 0.5602886080741882, 'learning_rate': 7.714285714285714e-06, 'epoch': 9.25}
473
- {'loss': 0.2898, 'grad_norm': 0.5723817348480225, 'learning_rate': 7.428571428571429e-06, 'epoch': 9.28}
474
- {'loss': 0.2935, 'grad_norm': 0.6257355213165283, 'learning_rate': 7.142857142857143e-06, 'epoch': 9.31}
475
- {'loss': 0.2909, 'grad_norm': 0.6624913811683655, 'learning_rate': 6.857142857142858e-06, 'epoch': 9.33}
476
- {'loss': 0.3001, 'grad_norm': 0.5716632604598999, 'learning_rate': 6.5714285714285714e-06, 'epoch': 9.36}
477
- {'loss': 0.2964, 'grad_norm': 0.6996496319770813, 'learning_rate': 6.285714285714287e-06, 'epoch': 9.39}
478
- {'loss': 0.2927, 'grad_norm': 0.7235862612724304, 'learning_rate': 6e-06, 'epoch': 9.42}
479
- {'loss': 0.2956, 'grad_norm': 0.6455687284469604, 'learning_rate': 5.7142857142857145e-06, 'epoch': 9.44}
480
- 97%|█████████████████████████████████████████████████████████████████████████████████████████████████▏ | 350/360 [04:30<00:07, 1.28it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
481
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
482
- {'eval_loss': 0.32588478922843933, 'eval_runtime': 0.1057, 'eval_samples_per_second': 435.067, 'eval_steps_per_second': 9.458, 'epoch': 9.44}
483
- {'loss': 0.2959, 'grad_norm': 0.5984405279159546, 'learning_rate': 5.428571428571429e-06, 'epoch': 9.47}
484
- {'loss': 0.2936, 'grad_norm': 0.6240008473396301, 'learning_rate': 5.142857142857143e-06, 'epoch': 9.5}
485
- {'loss': 0.2987, 'grad_norm': 0.6871291399002075, 'learning_rate': 4.857142857142858e-06, 'epoch': 9.53}
486
- {'loss': 0.2941, 'grad_norm': 0.6369628310203552, 'learning_rate': 4.571428571428572e-06, 'epoch': 9.56}
487
- {'loss': 0.2959, 'grad_norm': 0.6651211977005005, 'learning_rate': 4.285714285714286e-06, 'epoch': 9.58}
488
- {'loss': 0.2991, 'grad_norm': 0.7005758285522461, 'learning_rate': 4.000000000000001e-06, 'epoch': 9.61}
489
- {'loss': 0.2936, 'grad_norm': 0.5685088634490967, 'learning_rate': 3.7142857142857146e-06, 'epoch': 9.64}
490
- {'loss': 0.2947, 'grad_norm': 0.6322896480560303, 'learning_rate': 3.428571428571429e-06, 'epoch': 9.67}
491
- {'loss': 0.2951, 'grad_norm': 0.6149244904518127, 'learning_rate': 3.1428571428571433e-06, 'epoch': 9.69}
492
- {'loss': 0.2975, 'grad_norm': 0.685043215751648, 'learning_rate': 2.8571428571428573e-06, 'epoch': 9.72}
493
- 100%|████████████████████████████████████████████████████████████████████████████████████████████████████| 360/360 [04:37<00:00, 1.54it/s]Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
494
- Trainer.tokenizer is now deprecated. You should use Trainer.processing_class instead.
495
- {'eval_loss': 0.3277616798877716, 'eval_runtime': 0.1059, 'eval_samples_per_second': 434.21, 'eval_steps_per_second': 9.439, 'epoch': 9.72}
496
- {'loss': 0.2951, 'grad_norm': 0.8039355874061584, 'learning_rate': 2.5714285714285716e-06, 'epoch': 9.75}
497
- {'loss': 0.2928, 'grad_norm': 0.6388571262359619, 'learning_rate': 2.285714285714286e-06, 'epoch': 9.78}
498
- {'loss': 0.2934, 'grad_norm': 0.5934285521507263, 'learning_rate': 2.0000000000000003e-06, 'epoch': 9.81}
499
- {'loss': 0.2952, 'grad_norm': 0.5320731401443481, 'learning_rate': 1.7142857142857145e-06, 'epoch': 9.83}
500
- {'loss': 0.2932, 'grad_norm': 0.6137614846229553, 'learning_rate': 1.4285714285714286e-06, 'epoch': 9.86}
501
- {'loss': 0.2944, 'grad_norm': 0.8172494769096375, 'learning_rate': 1.142857142857143e-06, 'epoch': 9.89}
502
- {'loss': 0.2917, 'grad_norm': 0.6931514739990234, 'learning_rate': 8.571428571428572e-07, 'epoch': 9.92}
503
- {'loss': 0.2952, 'grad_norm': 0.8408763408660889, 'learning_rate': 5.714285714285715e-07, 'epoch': 9.94}
504
- {'loss': 0.2948, 'grad_norm': 0.6687312126159668, 'learning_rate': 2.8571428571428575e-07, 'epoch': 9.97}
505
- {'loss': 0.2944, 'grad_norm': 0.8545575737953186, 'learning_rate': 0.0, 'epoch': 10.0}
506
- 100%|████████████████████████████████████████████████████████████████████████████████████████████████████| 360/360 [04:39<00:00, 1.29it/s]
507
-
508
- {'eval_loss': 0.3285914361476898, 'eval_runtime': 0.1058, 'eval_samples_per_second': 434.729, 'eval_steps_per_second': 9.451, 'epoch': 10.0}
509
- {'train_runtime': 279.4567, 'train_samples_per_second': 162.422, 'train_steps_per_second': 1.288, 'train_loss': 0.35597237489289707, 'epoch': 10.0}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/files/wandb-metadata.json DELETED
@@ -1,55 +0,0 @@
1
- {
2
- "os": "Linux-6.8.0-101-generic-x86_64-with-glibc2.35",
3
- "python": "3.9.20",
4
- "startedAt": "2026-03-27T08:20:56.140561Z",
5
- "args": [
6
- "--config-path",
7
- "config/main_table",
8
- "--config-name",
9
- "llmdp_llm_adroit-hand-hammer-v1.yaml"
10
- ],
11
- "program": "/tmp2/chyang/workspace/LLM-BC/./train.py",
12
- "codePath": "train.py",
13
- "git": {
14
- "remote": "https://github.com/CHYang25/LLM-BC.git",
15
- "commit": "2e85824cdb13f64f31923d9430e890dadc78d394"
16
- },
17
- "email": "chris920325@gmail.com",
18
- "root": "/tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1",
19
- "host": "A6000-2",
20
- "username": "chyang",
21
- "executable": "/home/chyang/miniconda3/envs/llm-bc/bin/python3",
22
- "codePathLocal": "train.py",
23
- "cpu_count": 12,
24
- "cpu_count_logical": 24,
25
- "gpu": "NVIDIA RTX A6000",
26
- "gpu_count": 2,
27
- "disk": {
28
- "/": {
29
- "total": "1967317549056",
30
- "used": "733081182208"
31
- }
32
- },
33
- "memory": {
34
- "total": "134538502144"
35
- },
36
- "cpu": {
37
- "count": 12,
38
- "countLogical": 24
39
- },
40
- "gpu_nvidia": [
41
- {
42
- "name": "NVIDIA RTX A6000",
43
- "memoryTotal": "51527024640",
44
- "cudaCores": 10752,
45
- "architecture": "Ampere"
46
- },
47
- {
48
- "name": "NVIDIA RTX A6000",
49
- "memoryTotal": "51527024640",
50
- "cudaCores": 10752,
51
- "architecture": "Ampere"
52
- }
53
- ],
54
- "cudaVersion": "12.6"
55
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/files/wandb-summary.json DELETED
@@ -1 +0,0 @@
1
- {"eval/loss":0.3285914361476898,"train_loss":0.35597237489289707,"eval/steps_per_second":9.451,"train/global_step":360,"train/grad_norm":0.8545575737953186,"train/learning_rate":0,"_timestamp":1.774599943812631e+09,"_step":396,"eval/runtime":0.1058,"train_steps_per_second":1.288,"eval/samples_per_second":434.729,"_runtime":287.672274788,"train_samples_per_second":162.422,"train/loss":0.2944,"_wandb":{"runtime":287},"train_runtime":279.4567,"train/epoch":10,"total_flos":3.83135455600128e+15}
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug-core.log DELETED
@@ -1,16 +0,0 @@
1
- {"time":"2026-03-27T16:20:55.459583429+08:00","level":"INFO","msg":"started logging, with flags","port-filename":"/tmp/tmp0ig_2z3y/port-2703097.txt","pid":2703097,"debug":false,"disable-analytics":false}
2
- {"time":"2026-03-27T16:20:55.4596054+08:00","level":"INFO","msg":"FeatureState","shutdownOnParentExitEnabled":false}
3
- {"time":"2026-03-27T16:20:55.460720879+08:00","level":"INFO","msg":"server is running","addr":{"IP":"127.0.0.1","Port":44997,"Zone":""}}
4
- {"time":"2026-03-27T16:20:55.460793184+08:00","level":"INFO","msg":"Will exit if parent process dies.","ppid":2703097}
5
- {"time":"2026-03-27T16:20:55.657444973+08:00","level":"INFO","msg":"connection: ManageConnectionData: new connection created","id":"127.0.0.1:60248"}
6
- {"time":"2026-03-27T16:20:56.143936837+08:00","level":"INFO","msg":"handleInformInit: received","streamId":"nhmfpc2t","id":"127.0.0.1:60248"}
7
- {"time":"2026-03-27T16:20:56.25130017+08:00","level":"INFO","msg":"handleInformInit: stream started","streamId":"nhmfpc2t","id":"127.0.0.1:60248"}
8
- {"time":"2026-03-27T16:25:50.90098896+08:00","level":"INFO","msg":"handleInformFinish: finish message received","streamId":"nhmfpc2t","id":"127.0.0.1:60248"}
9
- {"time":"2026-03-27T16:25:50.901237575+08:00","level":"INFO","msg":"handleInformFinish: stream closed","streamId":"nhmfpc2t","id":"127.0.0.1:60248"}
10
- {"time":"2026-03-27T16:25:51.590522527+08:00","level":"INFO","msg":"handleInformTeardown: server teardown initiated","id":"127.0.0.1:60248"}
11
- {"time":"2026-03-27T16:25:51.59056499+08:00","level":"INFO","msg":"handleInformTeardown: server shutdown complete","id":"127.0.0.1:60248"}
12
- {"time":"2026-03-27T16:25:51.590573489+08:00","level":"INFO","msg":"server is shutting down"}
13
- {"time":"2026-03-27T16:25:51.590600946+08:00","level":"INFO","msg":"connection: Close: initiating connection closure","id":"127.0.0.1:60248"}
14
- {"time":"2026-03-27T16:25:51.590777149+08:00","level":"INFO","msg":"connection: Close: connection successfully closed","id":"127.0.0.1:60248"}
15
- {"time":"2026-03-27T16:25:51.590829802+08:00","level":"INFO","msg":"connection: ManageConnectionData: connection closed","id":"127.0.0.1:60248"}
16
- {"time":"2026-03-27T16:25:51.590853327+08:00","level":"INFO","msg":"server is closed"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug-internal.log DELETED
@@ -1,17 +0,0 @@
1
- {"time":"2026-03-27T16:20:56.144221699+08:00","level":"INFO","msg":"using version","core version":"0.18.6"}
2
- {"time":"2026-03-27T16:20:56.144248456+08:00","level":"INFO","msg":"created symlink","path":"/tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug-core.log"}
3
- {"time":"2026-03-27T16:20:56.25122394+08:00","level":"INFO","msg":"created new stream","id":"nhmfpc2t"}
4
- {"time":"2026-03-27T16:20:56.25128922+08:00","level":"INFO","msg":"stream: started","id":"nhmfpc2t"}
5
- {"time":"2026-03-27T16:20:56.251326647+08:00","level":"INFO","msg":"sender: started","stream_id":"nhmfpc2t"}
6
- {"time":"2026-03-27T16:20:56.251319685+08:00","level":"INFO","msg":"writer: Do: started","stream_id":{"value":"nhmfpc2t"}}
7
- {"time":"2026-03-27T16:20:56.251303072+08:00","level":"INFO","msg":"handler: started","stream_id":{"value":"nhmfpc2t"}}
8
- {"time":"2026-03-27T16:20:57.417356613+08:00","level":"INFO","msg":"Starting system monitor"}
9
- {"time":"2026-03-27T16:25:43.814415705+08:00","level":"INFO","msg":"Stopping system monitor"}
10
- {"time":"2026-03-27T16:25:43.815067495+08:00","level":"INFO","msg":"Stopped system monitor"}
11
- {"time":"2026-03-27T16:25:44.815119947+08:00","level":"INFO","msg":"handler: operation stats","stats":{"operations":[{"desc":"uploading wandb-summary.json","runtime_seconds":0.136938035,"progress":"495B/495B"},{"desc":"saving job artifact","runtime_seconds":0.037243753}],"total_operations":2}}
12
- {"time":"2026-03-27T16:25:49.240469481+08:00","level":"INFO","msg":"fileTransfer: Close: file transfer manager closed"}
13
- {"time":"2026-03-27T16:25:50.901076411+08:00","level":"INFO","msg":"stream: closing","id":"nhmfpc2t"}
14
- {"time":"2026-03-27T16:25:50.901107424+08:00","level":"INFO","msg":"handler: closed","stream_id":{"value":"nhmfpc2t"}}
15
- {"time":"2026-03-27T16:25:50.901135119+08:00","level":"INFO","msg":"writer: Close: closed","stream_id":{"value":"nhmfpc2t"}}
16
- {"time":"2026-03-27T16:25:50.901152717+08:00","level":"INFO","msg":"sender: closed","stream_id":"nhmfpc2t"}
17
- {"time":"2026-03-27T16:25:50.90122351+08:00","level":"INFO","msg":"stream: closed","id":"nhmfpc2t"}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug.log DELETED
@@ -1,35 +0,0 @@
1
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Current SDK version is 0.18.6
2
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Configure stats pid to 2703097
3
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Loading settings from /home/chyang/.config/wandb/settings
4
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Loading settings from /tmp2/chyang/workspace/LLM-BC/wandb/settings
5
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Loading settings from environment variables: {}
6
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Applying setup settings: {'mode': 'online', '_disable_service': None}
7
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Inferring run settings from compute environment: {'program_relpath': 'train.py', 'program_abspath': '/tmp2/chyang/workspace/LLM-BC/train.py', 'program': '/tmp2/chyang/workspace/LLM-BC/./train.py'}
8
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_setup.py:_flush():79] Applying login settings: {}
9
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:_log_setup():533] Logging user logs to /tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug.log
10
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:_log_setup():534] Logging internal logs to /tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/logs/debug-internal.log
11
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:init():619] calling init triggers
12
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:init():626] wandb.init called with sweep_config: {}
13
- config: {'name': 'train_llm_lowdim', '_target_': 'llmbc.workspace.train_llm_workspace.TrainLLMWorkspace', 'obs_dim': 46, 'action_dim': 26, 'horizon': 1, 'n_obs_steps': 1, 'n_action_steps': 1, 'task_name': 'adroit-hand-hammer-v1', 'exp_name': 'train llm', 'model_name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'use_quantization': False, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'dataset': {'test_data_ratio': 0.01}, 'debug': False, 'training': {'seed': 42, 'per_device_train_batch_size': 128, 'per_device_eval_batch_size': 128, 'gradient_accumulation_steps': 1, 'optim': 'paged_adamw_32bit', 'num_train_epochs': 10, 'eval_strategy': 'steps', 'logging_steps': 1, 'warmup_steps': 10, 'logging_strategy': 'steps', 'learning_rate': 0.0001, 'fp16': False, 'bf16': True, 'tf32': True, 'group_by_length': True, 'report_to': 'wandb', 'save_steps': 5000, 'eval_steps': 10, 'use_joint_mlp_projector': True, 'joint_obs_action_mlp_lr': 5e-05}, 'trainer': {'obs_dim': 46, 'action_dim': 26, 'use_joint_mlp_projector': True, 'max_seq_length': 100, 'dataset_text_field': 'text', 'packing': False}, 'logging': {'project': 'llm_module_finetuning', 'resume': True, 'mode': 'online', 'name': '2026.03.27-16.20.52_train_llm_lowdim_adroit-hand-hammer-v1', 'tags': ['train_llm_lowdim', 'adroit-hand-hammer-v1', 'train llm'], 'id': None, 'group': None}, 'multi_run': {'run_dir': 'data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1', 'wandb_name_base': '2026.03.27-16.20.52_train_llm_lowdim_adroit-hand-hammer-v1'}, 'task': {'name': 'adroit-hand-hammer-v1', 'obs_dim': 46, 'action_dim': 26, 'env_runner': {'_target_': 'llmbc.env_runner.adroit_lowdim_runner.AdroitHandLowdimRunner', 'env_name': 'llf-adroit-adroit-hand-hammer-v1', 'n_train': 10, 'n_test': 50, 'n_envs': 10, 'max_steps': 150, 'n_obs_steps': 1, 'n_action_steps': 1, 'instruction_type': 'b', 'feedback_type': ['hp', 'hn', 'fp'], 'visual': False, 'discount': 0.99}, 'dataset': {'_target_': 'llmbc.dataset.adroit_lowdim_dataset.AdroitHandLowdimDataset', 'data_path': 'datasets/adroit-hand-hammer-v1-general.pt', 'data_path2': 'datasets/adroit-hand-hammer-v1.pt', 'horizon': 1, 'pad_before': 0, 'pad_after': 0, 'obs_eef_target': True, 'use_manual_normalizer': False, 'val_ratio': 0.05, 'dummy_normalizer': False}, 'instructor': {'_target_': 'llmbc.translator.instructor.adroit_instructor.adroit_hand_hammer_v1_instructor.AdroitHandHammerV1Instructor'}}, 'llm': {'name': 'HuggingFaceTB/SmolLM2-135M-Instruct', 'model_name': 'SmolLM2-135M-Instruct', 'config_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaConfig', 'causal_lm_target': 'llmbc.model.llm.llama_lowdim_model.LowdimLlamaForCausalLM', 'use_quantization': False, 'use_joint_mlp_projector': True, 'llm_mode': 'mlp-finetuned', 'finetune_mode': 'orig', 'checkpoint': 'data/outputs/2026.03.27/14.38.20_train_mlp_projector_adroit-hand-hammer-v1/checkpoints/latest.ckpt', 'max_length': 100, 'lora_config': {'r': 32, 'lora_alpha': 64, 'lora_dropout': 0.05, 'bias': 'none', 'task_type': 'CAUSAL_LM'}, 'prompter': {'_target_': 'llmbc.translator.prompter.smollm2_prompter.SmolLM2Prompter', 'use_joint_mlp_projector': True}, 'hydra': {'job': {'override_dirname': 'HuggingFaceTB/SmolLM2-135M-Instruct'}, 'run': {'dir': 'data/outputs/2026.03.27/16.20.52_HuggingFaceTB/SmolLM2-135M-Instruct'}}}}
14
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:init():669] starting backend
15
- 2026-03-27 16:20:56,139 INFO MainThread:2703097 [wandb_init.py:init():673] sending inform_init request
16
- 2026-03-27 16:20:56,140 INFO MainThread:2703097 [backend.py:_multiprocessing_setup():104] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
17
- 2026-03-27 16:20:56,140 INFO MainThread:2703097 [wandb_init.py:init():686] backend started and connected
18
- 2026-03-27 16:20:56,144 INFO MainThread:2703097 [wandb_init.py:init():781] updated telemetry
19
- 2026-03-27 16:20:56,170 INFO MainThread:2703097 [wandb_init.py:init():814] communicating run to backend with 90.0 second timeout
20
- 2026-03-27 16:20:57,414 INFO MainThread:2703097 [wandb_init.py:init():867] starting run threads in backend
21
- 2026-03-27 16:20:57,521 INFO MainThread:2703097 [wandb_run.py:_console_start():2451] atexit reg
22
- 2026-03-27 16:20:57,522 INFO MainThread:2703097 [wandb_run.py:_redirect():2299] redirect: wrap_raw
23
- 2026-03-27 16:20:57,522 INFO MainThread:2703097 [wandb_run.py:_redirect():2364] Wrapping output streams.
24
- 2026-03-27 16:20:57,522 INFO MainThread:2703097 [wandb_run.py:_redirect():2389] Redirects installed.
25
- 2026-03-27 16:20:57,524 INFO MainThread:2703097 [wandb_init.py:init():911] run started, returning control to user process
26
- 2026-03-27 16:21:04,359 INFO MainThread:2703097 [wandb_run.py:_config_callback():1389] config_cb None None {'obs_dim': 46, 'action_dim': 26, 'use_joint_mlp_projector': True, 'vocab_size': 49152, 'max_position_embeddings': 8192, 'hidden_size': 576, 'intermediate_size': 1536, 'num_hidden_layers': 30, 'num_attention_heads': 9, 'num_key_value_heads': 3, 'hidden_act': 'silu', 'initializer_range': 0.041666666666666664, 'rms_norm_eps': 1e-05, 'pretraining_tp': 1, 'use_cache': False, 'rope_theta': 100000, 'rope_scaling': None, 'attention_bias': False, 'attention_dropout': 0.0, 'mlp_bias': False, 'head_dim': 64, 'return_dict': True, 'output_hidden_states': False, 'output_attentions': False, 'torchscript': False, 'torch_dtype': 'bfloat16', 'use_bfloat16': False, 'tf_legacy_loss': False, 'pruned_heads': {}, 'tie_word_embeddings': True, 'chunk_size_feed_forward': 0, 'is_encoder_decoder': False, 'is_decoder': False, 'cross_attention_hidden_size': None, 'add_cross_attention': False, 'tie_encoder_decoder': False, 'max_length': 20, 'min_length': 0, 'do_sample': False, 'early_stopping': False, 'num_beams': 1, 'num_beam_groups': 1, 'diversity_penalty': 0.0, 'temperature': 1.0, 'top_k': 50, 'top_p': 1.0, 'typical_p': 1.0, 'repetition_penalty': 1.0, 'length_penalty': 1.0, 'no_repeat_ngram_size': 0, 'encoder_no_repeat_ngram_size': 0, 'bad_words_ids': None, 'num_return_sequences': 1, 'output_scores': False, 'return_dict_in_generate': False, 'forced_bos_token_id': None, 'forced_eos_token_id': None, 'remove_invalid_values': False, 'exponential_decay_length_penalty': None, 'suppress_tokens': None, 'begin_suppress_tokens': None, 'architectures': ['LlamaForCausalLM'], 'finetuning_task': None, 'id2label': {0: 'LABEL_0', 1: 'LABEL_1'}, 'label2id': {'LABEL_0': 0, 'LABEL_1': 1}, 'tokenizer_class': None, 'prefix': None, 'bos_token_id': 1, 'pad_token_id': 2, 'eos_token_id': 2, 'sep_token_id': None, 'decoder_start_token_id': None, 'task_specific_params': None, 'problem_type': None, '_name_or_path': 'HuggingFaceTB/SmolLM2-135M-Instruct', '_attn_implementation_autoset': True, 'transformers_version': '4.47.1', 'is_llama_config': True, 'model_type': 'llama_lowdim', 'rope_interleaved': False, 'transformers.js_config': {'kv_cache_dtype': {'q4f16': 'float16', 'fp16': 'float16'}}, 'output_dir': '/tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1', 'overwrite_output_dir': False, 'do_train': False, 'do_eval': True, 'do_predict': False, 'eval_strategy': 'steps', 'prediction_loss_only': False, 'per_device_train_batch_size': 128, 'per_device_eval_batch_size': 128, 'per_gpu_train_batch_size': None, 'per_gpu_eval_batch_size': None, 'gradient_accumulation_steps': 1, 'eval_accumulation_steps': None, 'eval_delay': 0, 'torch_empty_cache_steps': None, 'learning_rate': 0.0001, 'weight_decay': 0.0, 'adam_beta1': 0.9, 'adam_beta2': 0.999, 'adam_epsilon': 1e-08, 'max_grad_norm': 1.0, 'num_train_epochs': 10, 'max_steps': -1, 'lr_scheduler_type': 'linear', 'lr_scheduler_kwargs': {}, 'warmup_ratio': 0.0, 'warmup_steps': 10, 'log_level': 'passive', 'log_level_replica': 'warning', 'log_on_each_node': True, 'logging_dir': '/tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1/runs/Mar27_16-21-01_A6000-2', 'logging_strategy': 'steps', 'logging_first_step': False, 'logging_steps': 1, 'logging_nan_inf_filter': True, 'save_strategy': 'steps', 'save_steps': 5000, 'save_total_limit': None, 'save_safetensors': True, 'save_on_each_node': False, 'save_only_model': False, 'restore_callback_states_from_checkpoint': False, 'no_cuda': False, 'use_cpu': False, 'use_mps_device': False, 'seed': 42, 'data_seed': None, 'jit_mode_eval': False, 'use_ipex': False, 'bf16': True, 'fp16': False, 'fp16_opt_level': 'O1', 'half_precision_backend': 'auto', 'bf16_full_eval': False, 'fp16_full_eval': False, 'tf32': True, 'local_rank': 0, 'ddp_backend': None, 'tpu_num_cores': None, 'tpu_metrics_debug': False, 'debug': [], 'dataloader_drop_last': False, 'eval_steps': 10, 'dataloader_num_workers': 0, 'dataloader_prefetch_factor': None, 'past_index': -1, 'run_name': '/tmp2/chyang/workspace/LLM-BC/data/outputs/2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/HuggingFaceTB/SmolLM2-135M-Instruct-finetuned-adroit-hand-hammer-v1', 'disable_tqdm': False, 'remove_unused_columns': True, 'label_names': None, 'load_best_model_at_end': False, 'metric_for_best_model': None, 'greater_is_better': None, 'ignore_data_skip': False, 'fsdp': [], 'fsdp_min_num_params': 0, 'fsdp_config': {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}, 'fsdp_transformer_layer_cls_to_wrap': None, 'accelerator_config': {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}, 'deepspeed': None, 'label_smoothing_factor': 0.0, 'optim': 'paged_adamw_32bit', 'optim_args': None, 'adafactor': False, 'group_by_length': True, 'length_column_name': 'length', 'report_to': ['wandb'], 'ddp_find_unused_parameters': None, 'ddp_bucket_cap_mb': None, 'ddp_broadcast_buffers': None, 'dataloader_pin_memory': True, 'dataloader_persistent_workers': False, 'skip_memory_metrics': True, 'use_legacy_prediction_loop': False, 'push_to_hub': False, 'resume_from_checkpoint': None, 'hub_model_id': None, 'hub_strategy': 'every_save', 'hub_token': '<HUB_TOKEN>', 'hub_private_repo': None, 'hub_always_push': False, 'gradient_checkpointing': False, 'gradient_checkpointing_kwargs': None, 'include_inputs_for_metrics': False, 'include_for_metrics': [], 'eval_do_concat_batches': True, 'fp16_backend': 'auto', 'evaluation_strategy': None, 'push_to_hub_model_id': None, 'push_to_hub_organization': None, 'push_to_hub_token': '<PUSH_TO_HUB_TOKEN>', 'mp_parameters': '', 'auto_find_batch_size': False, 'full_determinism': False, 'torchdynamo': None, 'ray_scope': 'last', 'ddp_timeout': 1800, 'torch_compile': False, 'torch_compile_backend': None, 'torch_compile_mode': None, 'dispatch_batches': None, 'split_batches': None, 'include_tokens_per_second': False, 'include_num_input_tokens_seen': False, 'neftune_noise_alpha': None, 'optim_target_modules': None, 'batch_eval_metrics': False, 'eval_on_start': False, 'use_liger_kernel': False, 'eval_use_gather_object': False, 'average_tokens_across_devices': False, 'dataset_text_field': 'text', 'packing': False, 'max_seq_length': 100, 'dataset_num_proc': None, 'dataset_batch_size': 1000, 'model_init_kwargs': None, 'dataset_kwargs': {}, 'eval_packing': None, 'num_of_sequences': 1024, 'chars_per_token': '<CHARS_PER_TOKEN>', 'use_liger': False, 'joint_obs_action_mlp_lr': 5e-05, 'obs_mlp_lr': None, 'action_mlp_lr': None}
27
- 2026-03-27 16:21:04,361 INFO MainThread:2703097 [wandb_config.py:__setitem__():154] config set model/num_parameters = 134889408 - <bound method Run._config_callback of <wandb.sdk.wandb_run.Run object at 0x7e6758745670>>
28
- 2026-03-27 16:21:04,361 INFO MainThread:2703097 [wandb_run.py:_config_callback():1389] config_cb model/num_parameters 134889408 None
29
- 2026-03-27 16:25:43,813 INFO MainThread:2703097 [wandb_run.py:_finish():2146] finishing run chyang25-national-taiwan-university/llm_module_finetuning/nhmfpc2t
30
- 2026-03-27 16:25:43,813 INFO MainThread:2703097 [wandb_run.py:_atexit_cleanup():2414] got exitcode: 0
31
- 2026-03-27 16:25:43,813 INFO MainThread:2703097 [wandb_run.py:_restore():2396] restore
32
- 2026-03-27 16:25:43,814 INFO MainThread:2703097 [wandb_run.py:_restore():2402] restore done
33
- 2026-03-27 16:25:50,896 INFO MainThread:2703097 [wandb_run.py:_footer_history_summary_info():3963] rendering history
34
- 2026-03-27 16:25:50,896 INFO MainThread:2703097 [wandb_run.py:_footer_history_summary_info():3995] rendering summary
35
- 2026-03-27 16:25:50,900 INFO MainThread:2703097 [wandb_run.py:_footer_sync_info():3922] logging synced files
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2026.03.27/16.20.52_train_llm_lowdim_adroit-hand-hammer-v1/wandb/run-20260327_162056-nhmfpc2t/run-nhmfpc2t.wandb DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:19281f2406b878cd01e76af72f5dd82f13698026367ccc8dd2b76d39269c8f31
3
- size 858565