aadityap commited on
Commit
e025dda
·
verified ·
1 Parent(s): da258c6

Model save

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ license: mit
4
+ base_model: deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
5
+ tags:
6
+ - trl
7
+ - sft
8
+ - generated_from_trainer
9
+ model-index:
10
+ - name: model_3k_forcing_022225_step1_1500buffer
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # model_3k_forcing_022225_step1_1500buffer
18
+
19
+ This model is a fine-tuned version of [deepseek-ai/DeepSeek-R1-Distill-Qwen-32B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Qwen-32B) on an unknown dataset.
20
+
21
+ ## Model description
22
+
23
+ More information needed
24
+
25
+ ## Intended uses & limitations
26
+
27
+ More information needed
28
+
29
+ ## Training and evaluation data
30
+
31
+ More information needed
32
+
33
+ ## Training procedure
34
+
35
+ ### Training hyperparameters
36
+
37
+ The following hyperparameters were used during training:
38
+ - learning_rate: 8e-05
39
+ - train_batch_size: 1
40
+ - eval_batch_size: 1
41
+ - seed: 100
42
+ - distributed_type: multi-GPU
43
+ - num_devices: 8
44
+ - total_train_batch_size: 8
45
+ - total_eval_batch_size: 8
46
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
47
+ - lr_scheduler_type: cosine
48
+ - lr_scheduler_warmup_ratio: 0.1
49
+ - num_epochs: 3
50
+
51
+ ### Training results
52
+
53
+
54
+
55
+ ### Framework versions
56
+
57
+ - PEFT 0.13.2
58
+ - Transformers 4.47.0.dev0
59
+ - Pytorch 2.4.0+cu121
60
+ - Datasets 3.1.0
61
+ - Tokenizers 0.20.3
adapter_config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 128,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "down_proj",
24
+ "v_proj",
25
+ "gate_proj",
26
+ "k_proj",
27
+ "o_proj",
28
+ "q_proj",
29
+ "up_proj"
30
+ ],
31
+ "task_type": "CAUSAL_LM",
32
+ "use_dora": false,
33
+ "use_rslora": false
34
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ed758a33700d7b883c35053b2488346dcf47d43362fe1552f970f211e92b91a
3
+ size 2147607752
special_tokens_map.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|begin▁of▁sentence|>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end▁of▁sentence|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|end▁of▁sentence|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ }
23
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa00f924d6bcc8400fc15a30c4cae46269afbd4f0d04c848c9126a0c7431c9ee
3
+ size 11422878
tokenizer_config.json ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "add_prefix_space": null,
5
+ "added_tokens_decoder": {
6
+ "151643": {
7
+ "content": "<|end▁of▁sentence|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false,
12
+ "special": true
13
+ },
14
+ "151644": {
15
+ "content": "<|User|>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": false
21
+ },
22
+ "151645": {
23
+ "content": "<|Assistant|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": false
29
+ },
30
+ "151646": {
31
+ "content": "<|begin▁of▁sentence|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
+ },
38
+ "151647": {
39
+ "content": "<|EOT|>",
40
+ "lstrip": false,
41
+ "normalized": false,
42
+ "rstrip": false,
43
+ "single_word": false,
44
+ "special": false
45
+ },
46
+ "151648": {
47
+ "content": "<think>",
48
+ "lstrip": false,
49
+ "normalized": false,
50
+ "rstrip": false,
51
+ "single_word": false,
52
+ "special": false
53
+ },
54
+ "151649": {
55
+ "content": "</think>",
56
+ "lstrip": false,
57
+ "normalized": false,
58
+ "rstrip": false,
59
+ "single_word": false,
60
+ "special": false
61
+ },
62
+ "151650": {
63
+ "content": "<|quad_start|>",
64
+ "lstrip": false,
65
+ "normalized": false,
66
+ "rstrip": false,
67
+ "single_word": false,
68
+ "special": true
69
+ },
70
+ "151651": {
71
+ "content": "<|quad_end|>",
72
+ "lstrip": false,
73
+ "normalized": false,
74
+ "rstrip": false,
75
+ "single_word": false,
76
+ "special": true
77
+ },
78
+ "151652": {
79
+ "content": "<|vision_start|>",
80
+ "lstrip": false,
81
+ "normalized": false,
82
+ "rstrip": false,
83
+ "single_word": false,
84
+ "special": true
85
+ },
86
+ "151653": {
87
+ "content": "<|vision_end|>",
88
+ "lstrip": false,
89
+ "normalized": false,
90
+ "rstrip": false,
91
+ "single_word": false,
92
+ "special": true
93
+ },
94
+ "151654": {
95
+ "content": "<|vision_pad|>",
96
+ "lstrip": false,
97
+ "normalized": false,
98
+ "rstrip": false,
99
+ "single_word": false,
100
+ "special": true
101
+ },
102
+ "151655": {
103
+ "content": "<|image_pad|>",
104
+ "lstrip": false,
105
+ "normalized": false,
106
+ "rstrip": false,
107
+ "single_word": false,
108
+ "special": true
109
+ },
110
+ "151656": {
111
+ "content": "<|video_pad|>",
112
+ "lstrip": false,
113
+ "normalized": false,
114
+ "rstrip": false,
115
+ "single_word": false,
116
+ "special": true
117
+ },
118
+ "151657": {
119
+ "content": "<tool_call>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false,
124
+ "special": false
125
+ },
126
+ "151658": {
127
+ "content": "</tool_call>",
128
+ "lstrip": false,
129
+ "normalized": false,
130
+ "rstrip": false,
131
+ "single_word": false,
132
+ "special": false
133
+ },
134
+ "151659": {
135
+ "content": "<|fim_prefix|>",
136
+ "lstrip": false,
137
+ "normalized": false,
138
+ "rstrip": false,
139
+ "single_word": false,
140
+ "special": false
141
+ },
142
+ "151660": {
143
+ "content": "<|fim_middle|>",
144
+ "lstrip": false,
145
+ "normalized": false,
146
+ "rstrip": false,
147
+ "single_word": false,
148
+ "special": false
149
+ },
150
+ "151661": {
151
+ "content": "<|fim_suffix|>",
152
+ "lstrip": false,
153
+ "normalized": false,
154
+ "rstrip": false,
155
+ "single_word": false,
156
+ "special": false
157
+ },
158
+ "151662": {
159
+ "content": "<|fim_pad|>",
160
+ "lstrip": false,
161
+ "normalized": false,
162
+ "rstrip": false,
163
+ "single_word": false,
164
+ "special": false
165
+ },
166
+ "151663": {
167
+ "content": "<|repo_name|>",
168
+ "lstrip": false,
169
+ "normalized": false,
170
+ "rstrip": false,
171
+ "single_word": false,
172
+ "special": false
173
+ },
174
+ "151664": {
175
+ "content": "<|file_sep|>",
176
+ "lstrip": false,
177
+ "normalized": false,
178
+ "rstrip": false,
179
+ "single_word": false,
180
+ "special": false
181
+ }
182
+ },
183
+ "bos_token": "<|begin▁of▁sentence|>",
184
+ "chat_template": "{% if not add_generation_prompt is defined %}{% set add_generation_prompt = false %}{% endif %}{% set ns = namespace(is_first=false, is_tool=false, is_output_first=true, system_prompt='') %}{%- for message in messages %}{%- if message['role'] == 'system' %}{% set ns.system_prompt = message['content'] %}{%- endif %}{%- endfor %}{{bos_token}}{{ns.system_prompt}}{%- for message in messages %}{%- if message['role'] == 'user' %}{%- set ns.is_tool = false -%}{{'<|User|>' + message['content']}}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is none %}{%- set ns.is_tool = false -%}{%- for tool in message['tool_calls']%}{%- if not ns.is_first %}{{'<|Assistant|><|tool▁calls▁begin|><|tool▁call▁begin��>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{%- set ns.is_first = true -%}{%- else %}{{'\\n' + '<|tool▁call▁begin|>' + tool['type'] + '<|tool▁sep|>' + tool['function']['name'] + '\\n' + '```json' + '\\n' + tool['function']['arguments'] + '\\n' + '```' + '<|tool▁call▁end|>'}}{{'<|tool▁calls▁end|><|end▁of▁sentence|>'}}{%- endif %}{%- endfor %}{%- endif %}{%- if message['role'] == 'assistant' and message['content'] is not none %}{%- if ns.is_tool %}{{'<|tool▁outputs▁end|>' + message['content'] + '<|end▁of▁sentence|>'}}{%- set ns.is_tool = false -%}{%- else %}{% set content = message['content'] %}{% if '</think>' in content %}{% set content = content.split('</think>')[-1] %}{% endif %}{{'<|Assistant|>' + content + '<|end▁of▁sentence|>'}}{%- endif %}{%- endif %}{%- if message['role'] == 'tool' %}{%- set ns.is_tool = true -%}{%- if ns.is_output_first %}{{'<|tool▁outputs▁begin|><|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- set ns.is_output_first = false %}{%- else %}{{'\\n<|tool▁output▁begin|>' + message['content'] + '<|tool▁output▁end|>'}}{%- endif %}{%- endif %}{%- endfor -%}{% if ns.is_tool %}{{'<|tool▁outputs▁end|>'}}{% endif %}{% if add_generation_prompt and not ns.is_tool %}{{'<|Assistant|><think>\\n'}}{% endif %}",
185
+ "clean_up_tokenization_spaces": false,
186
+ "eos_token": "<|end▁of▁sentence|>",
187
+ "legacy": true,
188
+ "model_max_length": 16384,
189
+ "pad_token": "<|end▁of▁sentence|>",
190
+ "sp_model_kwargs": {},
191
+ "tokenizer_class": "LlamaTokenizer",
192
+ "unk_token": null,
193
+ "use_default_system_prompt": false
194
+ }
trainer_state.json ADDED
@@ -0,0 +1,2142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 500,
6
+ "global_step": 300,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.01,
13
+ "grad_norm": 0.00975973888931374,
14
+ "learning_rate": 2.666666666666667e-06,
15
+ "loss": 0.1789,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.02,
20
+ "grad_norm": 0.009969429066062586,
21
+ "learning_rate": 5.333333333333334e-06,
22
+ "loss": 0.1742,
23
+ "step": 2
24
+ },
25
+ {
26
+ "epoch": 0.03,
27
+ "grad_norm": 0.009285111583115442,
28
+ "learning_rate": 8.000000000000001e-06,
29
+ "loss": 0.178,
30
+ "step": 3
31
+ },
32
+ {
33
+ "epoch": 0.04,
34
+ "grad_norm": 0.009718578509161862,
35
+ "learning_rate": 1.0666666666666667e-05,
36
+ "loss": 0.1834,
37
+ "step": 4
38
+ },
39
+ {
40
+ "epoch": 0.05,
41
+ "grad_norm": 0.008978663147300486,
42
+ "learning_rate": 1.3333333333333333e-05,
43
+ "loss": 0.1862,
44
+ "step": 5
45
+ },
46
+ {
47
+ "epoch": 0.06,
48
+ "grad_norm": 0.009330523259646785,
49
+ "learning_rate": 1.6000000000000003e-05,
50
+ "loss": 0.1826,
51
+ "step": 6
52
+ },
53
+ {
54
+ "epoch": 0.07,
55
+ "grad_norm": 0.009481936990823576,
56
+ "learning_rate": 1.866666666666667e-05,
57
+ "loss": 0.1842,
58
+ "step": 7
59
+ },
60
+ {
61
+ "epoch": 0.08,
62
+ "grad_norm": 0.0096143452400136,
63
+ "learning_rate": 2.1333333333333335e-05,
64
+ "loss": 0.1773,
65
+ "step": 8
66
+ },
67
+ {
68
+ "epoch": 0.09,
69
+ "grad_norm": 0.010401070952623532,
70
+ "learning_rate": 2.4e-05,
71
+ "loss": 0.1825,
72
+ "step": 9
73
+ },
74
+ {
75
+ "epoch": 0.1,
76
+ "grad_norm": 0.010633511876520278,
77
+ "learning_rate": 2.6666666666666667e-05,
78
+ "loss": 0.1858,
79
+ "step": 10
80
+ },
81
+ {
82
+ "epoch": 0.11,
83
+ "grad_norm": 0.012753339317992721,
84
+ "learning_rate": 2.9333333333333333e-05,
85
+ "loss": 0.1787,
86
+ "step": 11
87
+ },
88
+ {
89
+ "epoch": 0.12,
90
+ "grad_norm": 0.012737915674957006,
91
+ "learning_rate": 3.2000000000000005e-05,
92
+ "loss": 0.1735,
93
+ "step": 12
94
+ },
95
+ {
96
+ "epoch": 0.13,
97
+ "grad_norm": 0.014237237743851433,
98
+ "learning_rate": 3.466666666666667e-05,
99
+ "loss": 0.1737,
100
+ "step": 13
101
+ },
102
+ {
103
+ "epoch": 0.14,
104
+ "grad_norm": 0.013481977038419132,
105
+ "learning_rate": 3.733333333333334e-05,
106
+ "loss": 0.1771,
107
+ "step": 14
108
+ },
109
+ {
110
+ "epoch": 0.15,
111
+ "grad_norm": 0.014211210454419258,
112
+ "learning_rate": 4e-05,
113
+ "loss": 0.18,
114
+ "step": 15
115
+ },
116
+ {
117
+ "epoch": 0.16,
118
+ "grad_norm": 0.01406671564349945,
119
+ "learning_rate": 4.266666666666667e-05,
120
+ "loss": 0.164,
121
+ "step": 16
122
+ },
123
+ {
124
+ "epoch": 0.17,
125
+ "grad_norm": 0.013382254076509769,
126
+ "learning_rate": 4.5333333333333335e-05,
127
+ "loss": 0.1627,
128
+ "step": 17
129
+ },
130
+ {
131
+ "epoch": 0.18,
132
+ "grad_norm": 0.012462250592034499,
133
+ "learning_rate": 4.8e-05,
134
+ "loss": 0.1599,
135
+ "step": 18
136
+ },
137
+ {
138
+ "epoch": 0.19,
139
+ "grad_norm": 0.011623909141186887,
140
+ "learning_rate": 5.066666666666667e-05,
141
+ "loss": 0.1561,
142
+ "step": 19
143
+ },
144
+ {
145
+ "epoch": 0.2,
146
+ "grad_norm": 0.010567055994868338,
147
+ "learning_rate": 5.333333333333333e-05,
148
+ "loss": 0.1608,
149
+ "step": 20
150
+ },
151
+ {
152
+ "epoch": 0.21,
153
+ "grad_norm": 0.011578898312408135,
154
+ "learning_rate": 5.6e-05,
155
+ "loss": 0.149,
156
+ "step": 21
157
+ },
158
+ {
159
+ "epoch": 0.22,
160
+ "grad_norm": 0.01263043598230939,
161
+ "learning_rate": 5.8666666666666665e-05,
162
+ "loss": 0.1502,
163
+ "step": 22
164
+ },
165
+ {
166
+ "epoch": 0.23,
167
+ "grad_norm": 0.014619527333110586,
168
+ "learning_rate": 6.133333333333334e-05,
169
+ "loss": 0.147,
170
+ "step": 23
171
+ },
172
+ {
173
+ "epoch": 0.24,
174
+ "grad_norm": 0.015562732552308571,
175
+ "learning_rate": 6.400000000000001e-05,
176
+ "loss": 0.1477,
177
+ "step": 24
178
+ },
179
+ {
180
+ "epoch": 0.25,
181
+ "grad_norm": 0.01988850057450678,
182
+ "learning_rate": 6.666666666666667e-05,
183
+ "loss": 0.1315,
184
+ "step": 25
185
+ },
186
+ {
187
+ "epoch": 0.26,
188
+ "grad_norm": 0.01592720334780788,
189
+ "learning_rate": 6.933333333333334e-05,
190
+ "loss": 0.1381,
191
+ "step": 26
192
+ },
193
+ {
194
+ "epoch": 0.27,
195
+ "grad_norm": 0.01213399023718665,
196
+ "learning_rate": 7.2e-05,
197
+ "loss": 0.1284,
198
+ "step": 27
199
+ },
200
+ {
201
+ "epoch": 0.28,
202
+ "grad_norm": 0.010747416386187284,
203
+ "learning_rate": 7.466666666666667e-05,
204
+ "loss": 0.1266,
205
+ "step": 28
206
+ },
207
+ {
208
+ "epoch": 0.29,
209
+ "grad_norm": 0.012311136597983292,
210
+ "learning_rate": 7.733333333333333e-05,
211
+ "loss": 0.1224,
212
+ "step": 29
213
+ },
214
+ {
215
+ "epoch": 0.3,
216
+ "grad_norm": 0.010735578550951882,
217
+ "learning_rate": 8e-05,
218
+ "loss": 0.1204,
219
+ "step": 30
220
+ },
221
+ {
222
+ "epoch": 0.31,
223
+ "grad_norm": 0.011081512456623081,
224
+ "learning_rate": 7.99972923201505e-05,
225
+ "loss": 0.1219,
226
+ "step": 31
227
+ },
228
+ {
229
+ "epoch": 0.32,
230
+ "grad_norm": 0.010968193335996256,
231
+ "learning_rate": 7.998916964717848e-05,
232
+ "loss": 0.1034,
233
+ "step": 32
234
+ },
235
+ {
236
+ "epoch": 0.33,
237
+ "grad_norm": 0.011606564844140702,
238
+ "learning_rate": 7.997563308076384e-05,
239
+ "loss": 0.1023,
240
+ "step": 33
241
+ },
242
+ {
243
+ "epoch": 0.34,
244
+ "grad_norm": 0.010825973020931393,
245
+ "learning_rate": 7.9956684453541e-05,
246
+ "loss": 0.1048,
247
+ "step": 34
248
+ },
249
+ {
250
+ "epoch": 0.35,
251
+ "grad_norm": 0.010361943126689283,
252
+ "learning_rate": 7.993232633085074e-05,
253
+ "loss": 0.1091,
254
+ "step": 35
255
+ },
256
+ {
257
+ "epoch": 0.36,
258
+ "grad_norm": 0.010438830032426665,
259
+ "learning_rate": 7.990256201039297e-05,
260
+ "loss": 0.1057,
261
+ "step": 36
262
+ },
263
+ {
264
+ "epoch": 0.37,
265
+ "grad_norm": 0.009998624784288302,
266
+ "learning_rate": 7.986739552178024e-05,
267
+ "loss": 0.1013,
268
+ "step": 37
269
+ },
270
+ {
271
+ "epoch": 0.38,
272
+ "grad_norm": 0.009377781647301162,
273
+ "learning_rate": 7.982683162599218e-05,
274
+ "loss": 0.0912,
275
+ "step": 38
276
+ },
277
+ {
278
+ "epoch": 0.39,
279
+ "grad_norm": 0.009251354252244855,
280
+ "learning_rate": 7.978087581473094e-05,
281
+ "loss": 0.0892,
282
+ "step": 39
283
+ },
284
+ {
285
+ "epoch": 0.4,
286
+ "grad_norm": 0.009398969645095491,
287
+ "learning_rate": 7.972953430967773e-05,
288
+ "loss": 0.0889,
289
+ "step": 40
290
+ },
291
+ {
292
+ "epoch": 0.41,
293
+ "grad_norm": 0.008917003010329064,
294
+ "learning_rate": 7.967281406165047e-05,
295
+ "loss": 0.0836,
296
+ "step": 41
297
+ },
298
+ {
299
+ "epoch": 0.42,
300
+ "grad_norm": 0.008369371218976972,
301
+ "learning_rate": 7.961072274966282e-05,
302
+ "loss": 0.0881,
303
+ "step": 42
304
+ },
305
+ {
306
+ "epoch": 0.43,
307
+ "grad_norm": 0.008645208177411155,
308
+ "learning_rate": 7.954326877988446e-05,
309
+ "loss": 0.0784,
310
+ "step": 43
311
+ },
312
+ {
313
+ "epoch": 0.44,
314
+ "grad_norm": 0.010050615184569308,
315
+ "learning_rate": 7.947046128450319e-05,
316
+ "loss": 0.0836,
317
+ "step": 44
318
+ },
319
+ {
320
+ "epoch": 0.45,
321
+ "grad_norm": 0.009987779690915787,
322
+ "learning_rate": 7.939231012048833e-05,
323
+ "loss": 0.0935,
324
+ "step": 45
325
+ },
326
+ {
327
+ "epoch": 0.46,
328
+ "grad_norm": 0.009980160564781465,
329
+ "learning_rate": 7.930882586825653e-05,
330
+ "loss": 0.0763,
331
+ "step": 46
332
+ },
333
+ {
334
+ "epoch": 0.47,
335
+ "grad_norm": 0.009655054144220078,
336
+ "learning_rate": 7.922001983023918e-05,
337
+ "loss": 0.0848,
338
+ "step": 47
339
+ },
340
+ {
341
+ "epoch": 0.48,
342
+ "grad_norm": 0.011213143459668503,
343
+ "learning_rate": 7.912590402935223e-05,
344
+ "loss": 0.0731,
345
+ "step": 48
346
+ },
347
+ {
348
+ "epoch": 0.49,
349
+ "grad_norm": 0.009739459573280887,
350
+ "learning_rate": 7.902649120736858e-05,
351
+ "loss": 0.0757,
352
+ "step": 49
353
+ },
354
+ {
355
+ "epoch": 0.5,
356
+ "grad_norm": 0.011246440365487474,
357
+ "learning_rate": 7.892179482319297e-05,
358
+ "loss": 0.0719,
359
+ "step": 50
360
+ },
361
+ {
362
+ "epoch": 0.51,
363
+ "grad_norm": 0.010656568377013767,
364
+ "learning_rate": 7.881182905103986e-05,
365
+ "loss": 0.0665,
366
+ "step": 51
367
+ },
368
+ {
369
+ "epoch": 0.52,
370
+ "grad_norm": 0.010705942104817151,
371
+ "learning_rate": 7.869660877851456e-05,
372
+ "loss": 0.068,
373
+ "step": 52
374
+ },
375
+ {
376
+ "epoch": 0.53,
377
+ "grad_norm": 0.013046752315013016,
378
+ "learning_rate": 7.857614960459756e-05,
379
+ "loss": 0.0705,
380
+ "step": 53
381
+ },
382
+ {
383
+ "epoch": 0.54,
384
+ "grad_norm": 0.01138775668359661,
385
+ "learning_rate": 7.845046783753276e-05,
386
+ "loss": 0.0607,
387
+ "step": 54
388
+ },
389
+ {
390
+ "epoch": 0.55,
391
+ "grad_norm": 0.01483943168489928,
392
+ "learning_rate": 7.831958049261956e-05,
393
+ "loss": 0.071,
394
+ "step": 55
395
+ },
396
+ {
397
+ "epoch": 0.56,
398
+ "grad_norm": 0.013889643142845373,
399
+ "learning_rate": 7.818350528990929e-05,
400
+ "loss": 0.064,
401
+ "step": 56
402
+ },
403
+ {
404
+ "epoch": 0.57,
405
+ "grad_norm": 0.014913835125302678,
406
+ "learning_rate": 7.804226065180615e-05,
407
+ "loss": 0.0627,
408
+ "step": 57
409
+ },
410
+ {
411
+ "epoch": 0.58,
412
+ "grad_norm": 0.013783358495679316,
413
+ "learning_rate": 7.789586570057317e-05,
414
+ "loss": 0.0628,
415
+ "step": 58
416
+ },
417
+ {
418
+ "epoch": 0.59,
419
+ "grad_norm": 0.012881559012047194,
420
+ "learning_rate": 7.774434025574335e-05,
421
+ "loss": 0.0583,
422
+ "step": 59
423
+ },
424
+ {
425
+ "epoch": 0.6,
426
+ "grad_norm": 0.009203767304861093,
427
+ "learning_rate": 7.758770483143634e-05,
428
+ "loss": 0.0618,
429
+ "step": 60
430
+ },
431
+ {
432
+ "epoch": 0.61,
433
+ "grad_norm": 0.007757149142200283,
434
+ "learning_rate": 7.742598063358127e-05,
435
+ "loss": 0.0611,
436
+ "step": 61
437
+ },
438
+ {
439
+ "epoch": 0.62,
440
+ "grad_norm": 0.011419138129620103,
441
+ "learning_rate": 7.72591895570457e-05,
442
+ "loss": 0.0592,
443
+ "step": 62
444
+ },
445
+ {
446
+ "epoch": 0.63,
447
+ "grad_norm": 0.009871217127379156,
448
+ "learning_rate": 7.70873541826715e-05,
449
+ "loss": 0.0648,
450
+ "step": 63
451
+ },
452
+ {
453
+ "epoch": 0.64,
454
+ "grad_norm": 0.013741722887867229,
455
+ "learning_rate": 7.69104977742177e-05,
456
+ "loss": 0.0551,
457
+ "step": 64
458
+ },
459
+ {
460
+ "epoch": 0.65,
461
+ "grad_norm": 0.0075585956152919576,
462
+ "learning_rate": 7.672864427521097e-05,
463
+ "loss": 0.0614,
464
+ "step": 65
465
+ },
466
+ {
467
+ "epoch": 0.66,
468
+ "grad_norm": 0.01180566302338812,
469
+ "learning_rate": 7.654181830570404e-05,
470
+ "loss": 0.0587,
471
+ "step": 66
472
+ },
473
+ {
474
+ "epoch": 0.67,
475
+ "grad_norm": 0.014184242311912936,
476
+ "learning_rate": 7.635004515894258e-05,
477
+ "loss": 0.0543,
478
+ "step": 67
479
+ },
480
+ {
481
+ "epoch": 0.68,
482
+ "grad_norm": 0.012798772576387809,
483
+ "learning_rate": 7.615335079794083e-05,
484
+ "loss": 0.0672,
485
+ "step": 68
486
+ },
487
+ {
488
+ "epoch": 0.69,
489
+ "grad_norm": 0.017304492348577144,
490
+ "learning_rate": 7.595176185196669e-05,
491
+ "loss": 0.0532,
492
+ "step": 69
493
+ },
494
+ {
495
+ "epoch": 0.7,
496
+ "grad_norm": 0.010226411164996575,
497
+ "learning_rate": 7.57453056129365e-05,
498
+ "loss": 0.0617,
499
+ "step": 70
500
+ },
501
+ {
502
+ "epoch": 0.71,
503
+ "grad_norm": 0.03880893650808376,
504
+ "learning_rate": 7.553401003172018e-05,
505
+ "loss": 0.0657,
506
+ "step": 71
507
+ },
508
+ {
509
+ "epoch": 0.72,
510
+ "grad_norm": 0.009123262384208996,
511
+ "learning_rate": 7.531790371435709e-05,
512
+ "loss": 0.0585,
513
+ "step": 72
514
+ },
515
+ {
516
+ "epoch": 0.73,
517
+ "grad_norm": 0.0310525568086072,
518
+ "learning_rate": 7.509701591818328e-05,
519
+ "loss": 0.0632,
520
+ "step": 73
521
+ },
522
+ {
523
+ "epoch": 0.74,
524
+ "grad_norm": 0.010585387066448279,
525
+ "learning_rate": 7.48713765478705e-05,
526
+ "loss": 0.0607,
527
+ "step": 74
528
+ },
529
+ {
530
+ "epoch": 0.75,
531
+ "grad_norm": 0.009989777131862311,
532
+ "learning_rate": 7.464101615137756e-05,
533
+ "loss": 0.0585,
534
+ "step": 75
535
+ },
536
+ {
537
+ "epoch": 0.76,
538
+ "grad_norm": 0.014604925899626916,
539
+ "learning_rate": 7.440596591581463e-05,
540
+ "loss": 0.061,
541
+ "step": 76
542
+ },
543
+ {
544
+ "epoch": 0.77,
545
+ "grad_norm": 0.009727832966346583,
546
+ "learning_rate": 7.416625766322104e-05,
547
+ "loss": 0.06,
548
+ "step": 77
549
+ },
550
+ {
551
+ "epoch": 0.78,
552
+ "grad_norm": 0.008571172568577179,
553
+ "learning_rate": 7.392192384625704e-05,
554
+ "loss": 0.0584,
555
+ "step": 78
556
+ },
557
+ {
558
+ "epoch": 0.79,
559
+ "grad_norm": 0.011006866583398434,
560
+ "learning_rate": 7.36729975438103e-05,
561
+ "loss": 0.0673,
562
+ "step": 79
563
+ },
564
+ {
565
+ "epoch": 0.8,
566
+ "grad_norm": 0.008511317955868526,
567
+ "learning_rate": 7.341951245651747e-05,
568
+ "loss": 0.0637,
569
+ "step": 80
570
+ },
571
+ {
572
+ "epoch": 0.81,
573
+ "grad_norm": 0.009222703977899139,
574
+ "learning_rate": 7.316150290220167e-05,
575
+ "loss": 0.0576,
576
+ "step": 81
577
+ },
578
+ {
579
+ "epoch": 0.82,
580
+ "grad_norm": 0.007069722171739626,
581
+ "learning_rate": 7.28990038112265e-05,
582
+ "loss": 0.0595,
583
+ "step": 82
584
+ },
585
+ {
586
+ "epoch": 0.83,
587
+ "grad_norm": 0.008636882699174933,
588
+ "learning_rate": 7.26320507217669e-05,
589
+ "loss": 0.0538,
590
+ "step": 83
591
+ },
592
+ {
593
+ "epoch": 0.84,
594
+ "grad_norm": 0.006728336243965822,
595
+ "learning_rate": 7.236067977499791e-05,
596
+ "loss": 0.0765,
597
+ "step": 84
598
+ },
599
+ {
600
+ "epoch": 0.85,
601
+ "grad_norm": 0.008128573468751916,
602
+ "learning_rate": 7.208492771020176e-05,
603
+ "loss": 0.0592,
604
+ "step": 85
605
+ },
606
+ {
607
+ "epoch": 0.86,
608
+ "grad_norm": 0.006526872173502966,
609
+ "learning_rate": 7.180483185979392e-05,
610
+ "loss": 0.0633,
611
+ "step": 86
612
+ },
613
+ {
614
+ "epoch": 0.87,
615
+ "grad_norm": 0.006782232793070975,
616
+ "learning_rate": 7.152043014426888e-05,
617
+ "loss": 0.0529,
618
+ "step": 87
619
+ },
620
+ {
621
+ "epoch": 0.88,
622
+ "grad_norm": 0.00755302555939971,
623
+ "learning_rate": 7.123176106706638e-05,
624
+ "loss": 0.0592,
625
+ "step": 88
626
+ },
627
+ {
628
+ "epoch": 0.89,
629
+ "grad_norm": 0.006827724421058533,
630
+ "learning_rate": 7.093886370935857e-05,
631
+ "loss": 0.053,
632
+ "step": 89
633
+ },
634
+ {
635
+ "epoch": 0.9,
636
+ "grad_norm": 0.006668204199113761,
637
+ "learning_rate": 7.064177772475912e-05,
638
+ "loss": 0.0553,
639
+ "step": 90
640
+ },
641
+ {
642
+ "epoch": 0.91,
643
+ "grad_norm": 0.006661786544837106,
644
+ "learning_rate": 7.034054333395477e-05,
645
+ "loss": 0.0529,
646
+ "step": 91
647
+ },
648
+ {
649
+ "epoch": 0.92,
650
+ "grad_norm": 0.006619846137095159,
651
+ "learning_rate": 7.003520131925997e-05,
652
+ "loss": 0.0535,
653
+ "step": 92
654
+ },
655
+ {
656
+ "epoch": 0.93,
657
+ "grad_norm": 0.0070011563265403465,
658
+ "learning_rate": 6.972579301909577e-05,
659
+ "loss": 0.0634,
660
+ "step": 93
661
+ },
662
+ {
663
+ "epoch": 0.94,
664
+ "grad_norm": 0.0061134490312462,
665
+ "learning_rate": 6.941236032239316e-05,
666
+ "loss": 0.0615,
667
+ "step": 94
668
+ },
669
+ {
670
+ "epoch": 0.95,
671
+ "grad_norm": 0.006537916555469948,
672
+ "learning_rate": 6.909494566292195e-05,
673
+ "loss": 0.0603,
674
+ "step": 95
675
+ },
676
+ {
677
+ "epoch": 0.96,
678
+ "grad_norm": 0.007553056346823884,
679
+ "learning_rate": 6.877359201354606e-05,
680
+ "loss": 0.0539,
681
+ "step": 96
682
+ },
683
+ {
684
+ "epoch": 0.97,
685
+ "grad_norm": 0.006093993013213828,
686
+ "learning_rate": 6.844834288040548e-05,
687
+ "loss": 0.0554,
688
+ "step": 97
689
+ },
690
+ {
691
+ "epoch": 0.98,
692
+ "grad_norm": 0.006310803981190944,
693
+ "learning_rate": 6.811924229702648e-05,
694
+ "loss": 0.0571,
695
+ "step": 98
696
+ },
697
+ {
698
+ "epoch": 0.99,
699
+ "grad_norm": 0.007570131129374107,
700
+ "learning_rate": 6.778633481835989e-05,
701
+ "loss": 0.0557,
702
+ "step": 99
703
+ },
704
+ {
705
+ "epoch": 1.0,
706
+ "grad_norm": 0.007487562294217722,
707
+ "learning_rate": 6.744966551474936e-05,
708
+ "loss": 0.0583,
709
+ "step": 100
710
+ },
711
+ {
712
+ "epoch": 1.01,
713
+ "grad_norm": 0.006359837564784104,
714
+ "learning_rate": 6.71092799658293e-05,
715
+ "loss": 0.0628,
716
+ "step": 101
717
+ },
718
+ {
719
+ "epoch": 1.02,
720
+ "grad_norm": 0.006138388003369084,
721
+ "learning_rate": 6.676522425435433e-05,
722
+ "loss": 0.0547,
723
+ "step": 102
724
+ },
725
+ {
726
+ "epoch": 1.03,
727
+ "grad_norm": 0.006703101906314633,
728
+ "learning_rate": 6.641754495996031e-05,
729
+ "loss": 0.0461,
730
+ "step": 103
731
+ },
732
+ {
733
+ "epoch": 1.04,
734
+ "grad_norm": 0.0062725720861589685,
735
+ "learning_rate": 6.606628915285822e-05,
736
+ "loss": 0.0527,
737
+ "step": 104
738
+ },
739
+ {
740
+ "epoch": 1.05,
741
+ "grad_norm": 0.0063865423382953946,
742
+ "learning_rate": 6.571150438746157e-05,
743
+ "loss": 0.0506,
744
+ "step": 105
745
+ },
746
+ {
747
+ "epoch": 1.06,
748
+ "grad_norm": 0.005990465904804074,
749
+ "learning_rate": 6.53532386959484e-05,
750
+ "loss": 0.0537,
751
+ "step": 106
752
+ },
753
+ {
754
+ "epoch": 1.07,
755
+ "grad_norm": 0.009220140374447247,
756
+ "learning_rate": 6.499154058175841e-05,
757
+ "loss": 0.0522,
758
+ "step": 107
759
+ },
760
+ {
761
+ "epoch": 1.08,
762
+ "grad_norm": 0.00540998255469808,
763
+ "learning_rate": 6.462645901302633e-05,
764
+ "loss": 0.0516,
765
+ "step": 108
766
+ },
767
+ {
768
+ "epoch": 1.09,
769
+ "grad_norm": 0.006217449253873114,
770
+ "learning_rate": 6.425804341595255e-05,
771
+ "loss": 0.0518,
772
+ "step": 109
773
+ },
774
+ {
775
+ "epoch": 1.1,
776
+ "grad_norm": 0.006471438407438729,
777
+ "learning_rate": 6.388634366811146e-05,
778
+ "loss": 0.0496,
779
+ "step": 110
780
+ },
781
+ {
782
+ "epoch": 1.11,
783
+ "grad_norm": 0.007161952818479216,
784
+ "learning_rate": 6.351141009169893e-05,
785
+ "loss": 0.0582,
786
+ "step": 111
787
+ },
788
+ {
789
+ "epoch": 1.12,
790
+ "grad_norm": 0.0068160508026970205,
791
+ "learning_rate": 6.313329344671946e-05,
792
+ "loss": 0.0546,
793
+ "step": 112
794
+ },
795
+ {
796
+ "epoch": 1.13,
797
+ "grad_norm": 0.00590373606923259,
798
+ "learning_rate": 6.275204492411408e-05,
799
+ "loss": 0.0519,
800
+ "step": 113
801
+ },
802
+ {
803
+ "epoch": 1.1400000000000001,
804
+ "grad_norm": 0.006433610458016062,
805
+ "learning_rate": 6.236771613882987e-05,
806
+ "loss": 0.0553,
807
+ "step": 114
808
+ },
809
+ {
810
+ "epoch": 1.15,
811
+ "grad_norm": 0.006835005084642541,
812
+ "learning_rate": 6.198035912283225e-05,
813
+ "loss": 0.0474,
814
+ "step": 115
815
+ },
816
+ {
817
+ "epoch": 1.16,
818
+ "grad_norm": 0.0065486780447602125,
819
+ "learning_rate": 6.159002631806052e-05,
820
+ "loss": 0.0527,
821
+ "step": 116
822
+ },
823
+ {
824
+ "epoch": 1.17,
825
+ "grad_norm": 0.006115599641372958,
826
+ "learning_rate": 6.11967705693282e-05,
827
+ "loss": 0.0593,
828
+ "step": 117
829
+ },
830
+ {
831
+ "epoch": 1.18,
832
+ "grad_norm": 0.006410277773462344,
833
+ "learning_rate": 6.0800645117168616e-05,
834
+ "loss": 0.0495,
835
+ "step": 118
836
+ },
837
+ {
838
+ "epoch": 1.19,
839
+ "grad_norm": 0.00624207587040104,
840
+ "learning_rate": 6.040170359062702e-05,
841
+ "loss": 0.056,
842
+ "step": 119
843
+ },
844
+ {
845
+ "epoch": 1.2,
846
+ "grad_norm": 0.006452317218711539,
847
+ "learning_rate": 6.000000000000001e-05,
848
+ "loss": 0.0537,
849
+ "step": 120
850
+ },
851
+ {
852
+ "epoch": 1.21,
853
+ "grad_norm": 0.007465405568860055,
854
+ "learning_rate": 5.959558872952349e-05,
855
+ "loss": 0.0539,
856
+ "step": 121
857
+ },
858
+ {
859
+ "epoch": 1.22,
860
+ "grad_norm": 0.006441358307073292,
861
+ "learning_rate": 5.918852453000986e-05,
862
+ "loss": 0.0567,
863
+ "step": 122
864
+ },
865
+ {
866
+ "epoch": 1.23,
867
+ "grad_norm": 0.007033751482965206,
868
+ "learning_rate": 5.877886251143564e-05,
869
+ "loss": 0.0488,
870
+ "step": 123
871
+ },
872
+ {
873
+ "epoch": 1.24,
874
+ "grad_norm": 0.006453511474714476,
875
+ "learning_rate": 5.836665813548047e-05,
876
+ "loss": 0.0517,
877
+ "step": 124
878
+ },
879
+ {
880
+ "epoch": 1.25,
881
+ "grad_norm": 0.006202405357875232,
882
+ "learning_rate": 5.79519672080185e-05,
883
+ "loss": 0.0531,
884
+ "step": 125
885
+ },
886
+ {
887
+ "epoch": 1.26,
888
+ "grad_norm": 0.0063350763498242054,
889
+ "learning_rate": 5.75348458715631e-05,
890
+ "loss": 0.0542,
891
+ "step": 126
892
+ },
893
+ {
894
+ "epoch": 1.27,
895
+ "grad_norm": 0.006585920886645376,
896
+ "learning_rate": 5.711535059766617e-05,
897
+ "loss": 0.0511,
898
+ "step": 127
899
+ },
900
+ {
901
+ "epoch": 1.28,
902
+ "grad_norm": 0.007114089940186752,
903
+ "learning_rate": 5.669353817927272e-05,
904
+ "loss": 0.0507,
905
+ "step": 128
906
+ },
907
+ {
908
+ "epoch": 1.29,
909
+ "grad_norm": 0.0063686838958942745,
910
+ "learning_rate": 5.626946572303202e-05,
911
+ "loss": 0.0517,
912
+ "step": 129
913
+ },
914
+ {
915
+ "epoch": 1.3,
916
+ "grad_norm": 0.007118365650739293,
917
+ "learning_rate": 5.584319064156628e-05,
918
+ "loss": 0.0617,
919
+ "step": 130
920
+ },
921
+ {
922
+ "epoch": 1.31,
923
+ "grad_norm": 0.009985325431920227,
924
+ "learning_rate": 5.541477064569794e-05,
925
+ "loss": 0.06,
926
+ "step": 131
927
+ },
928
+ {
929
+ "epoch": 1.32,
930
+ "grad_norm": 0.007584023655357705,
931
+ "learning_rate": 5.4984263736636494e-05,
932
+ "loss": 0.0524,
933
+ "step": 132
934
+ },
935
+ {
936
+ "epoch": 1.33,
937
+ "grad_norm": 0.008166009187369436,
938
+ "learning_rate": 5.4551728198126066e-05,
939
+ "loss": 0.0568,
940
+ "step": 133
941
+ },
942
+ {
943
+ "epoch": 1.34,
944
+ "grad_norm": 0.007807828168493425,
945
+ "learning_rate": 5.4117222588554756e-05,
946
+ "loss": 0.0528,
947
+ "step": 134
948
+ },
949
+ {
950
+ "epoch": 1.35,
951
+ "grad_norm": 0.007275027029853539,
952
+ "learning_rate": 5.368080573302676e-05,
953
+ "loss": 0.0523,
954
+ "step": 135
955
+ },
956
+ {
957
+ "epoch": 1.3599999999999999,
958
+ "grad_norm": 0.009982025448270036,
959
+ "learning_rate": 5.324253671539833e-05,
960
+ "loss": 0.0561,
961
+ "step": 136
962
+ },
963
+ {
964
+ "epoch": 1.37,
965
+ "grad_norm": 0.007917078584406574,
966
+ "learning_rate": 5.280247487027886e-05,
967
+ "loss": 0.0529,
968
+ "step": 137
969
+ },
970
+ {
971
+ "epoch": 1.38,
972
+ "grad_norm": 0.007156905015064814,
973
+ "learning_rate": 5.23606797749979e-05,
974
+ "loss": 0.0513,
975
+ "step": 138
976
+ },
977
+ {
978
+ "epoch": 1.3900000000000001,
979
+ "grad_norm": 0.007915384457070518,
980
+ "learning_rate": 5.191721124153928e-05,
981
+ "loss": 0.0504,
982
+ "step": 139
983
+ },
984
+ {
985
+ "epoch": 1.4,
986
+ "grad_norm": 0.0071454076520816884,
987
+ "learning_rate": 5.1472129308443616e-05,
988
+ "loss": 0.0505,
989
+ "step": 140
990
+ },
991
+ {
992
+ "epoch": 1.41,
993
+ "grad_norm": 0.007440864860172307,
994
+ "learning_rate": 5.102549423267997e-05,
995
+ "loss": 0.0513,
996
+ "step": 141
997
+ },
998
+ {
999
+ "epoch": 1.42,
1000
+ "grad_norm": 0.006781489697269766,
1001
+ "learning_rate": 5.05773664814881e-05,
1002
+ "loss": 0.0557,
1003
+ "step": 142
1004
+ },
1005
+ {
1006
+ "epoch": 1.43,
1007
+ "grad_norm": 0.0076834133529257624,
1008
+ "learning_rate": 5.012780672419208e-05,
1009
+ "loss": 0.0544,
1010
+ "step": 143
1011
+ },
1012
+ {
1013
+ "epoch": 1.44,
1014
+ "grad_norm": 0.007302367654679525,
1015
+ "learning_rate": 4.967687582398671e-05,
1016
+ "loss": 0.0582,
1017
+ "step": 144
1018
+ },
1019
+ {
1020
+ "epoch": 1.45,
1021
+ "grad_norm": 0.007567079535660508,
1022
+ "learning_rate": 4.922463482969761e-05,
1023
+ "loss": 0.058,
1024
+ "step": 145
1025
+ },
1026
+ {
1027
+ "epoch": 1.46,
1028
+ "grad_norm": 0.008102048310381298,
1029
+ "learning_rate": 4.877114496751613e-05,
1030
+ "loss": 0.0552,
1031
+ "step": 146
1032
+ },
1033
+ {
1034
+ "epoch": 1.47,
1035
+ "grad_norm": 0.007034402622201175,
1036
+ "learning_rate": 4.831646763271037e-05,
1037
+ "loss": 0.0613,
1038
+ "step": 147
1039
+ },
1040
+ {
1041
+ "epoch": 1.48,
1042
+ "grad_norm": 0.00724145626141598,
1043
+ "learning_rate": 4.786066438131321e-05,
1044
+ "loss": 0.0561,
1045
+ "step": 148
1046
+ },
1047
+ {
1048
+ "epoch": 1.49,
1049
+ "grad_norm": 0.007102845006067121,
1050
+ "learning_rate": 4.740379692178858e-05,
1051
+ "loss": 0.0554,
1052
+ "step": 149
1053
+ },
1054
+ {
1055
+ "epoch": 1.5,
1056
+ "grad_norm": 0.009236373859778914,
1057
+ "learning_rate": 4.694592710667723e-05,
1058
+ "loss": 0.0495,
1059
+ "step": 150
1060
+ },
1061
+ {
1062
+ "epoch": 1.51,
1063
+ "grad_norm": 0.00714453517731578,
1064
+ "learning_rate": 4.648711692422271e-05,
1065
+ "loss": 0.0556,
1066
+ "step": 151
1067
+ },
1068
+ {
1069
+ "epoch": 1.52,
1070
+ "grad_norm": 0.007085617774269391,
1071
+ "learning_rate": 4.602742848997933e-05,
1072
+ "loss": 0.056,
1073
+ "step": 152
1074
+ },
1075
+ {
1076
+ "epoch": 1.53,
1077
+ "grad_norm": 0.008430545103813795,
1078
+ "learning_rate": 4.556692403840262e-05,
1079
+ "loss": 0.0524,
1080
+ "step": 153
1081
+ },
1082
+ {
1083
+ "epoch": 1.54,
1084
+ "grad_norm": 0.007287113804117382,
1085
+ "learning_rate": 4.51056659144238e-05,
1086
+ "loss": 0.0516,
1087
+ "step": 154
1088
+ },
1089
+ {
1090
+ "epoch": 1.55,
1091
+ "grad_norm": 0.008349043652432166,
1092
+ "learning_rate": 4.464371656500921e-05,
1093
+ "loss": 0.0631,
1094
+ "step": 155
1095
+ },
1096
+ {
1097
+ "epoch": 1.56,
1098
+ "grad_norm": 0.007598972785573559,
1099
+ "learning_rate": 4.418113853070614e-05,
1100
+ "loss": 0.0587,
1101
+ "step": 156
1102
+ },
1103
+ {
1104
+ "epoch": 1.5699999999999998,
1105
+ "grad_norm": 0.007569896238311314,
1106
+ "learning_rate": 4.37179944371757e-05,
1107
+ "loss": 0.055,
1108
+ "step": 157
1109
+ },
1110
+ {
1111
+ "epoch": 1.58,
1112
+ "grad_norm": 0.007131863103812141,
1113
+ "learning_rate": 4.3254346986714334e-05,
1114
+ "loss": 0.0584,
1115
+ "step": 158
1116
+ },
1117
+ {
1118
+ "epoch": 1.5899999999999999,
1119
+ "grad_norm": 0.007080408626366854,
1120
+ "learning_rate": 4.2790258949765014e-05,
1121
+ "loss": 0.0519,
1122
+ "step": 159
1123
+ },
1124
+ {
1125
+ "epoch": 1.6,
1126
+ "grad_norm": 0.008114262196864164,
1127
+ "learning_rate": 4.2325793156419035e-05,
1128
+ "loss": 0.0482,
1129
+ "step": 160
1130
+ },
1131
+ {
1132
+ "epoch": 1.6099999999999999,
1133
+ "grad_norm": 0.008190674953788812,
1134
+ "learning_rate": 4.186101248790988e-05,
1135
+ "loss": 0.0484,
1136
+ "step": 161
1137
+ },
1138
+ {
1139
+ "epoch": 1.62,
1140
+ "grad_norm": 0.007020507001428916,
1141
+ "learning_rate": 4.139597986810005e-05,
1142
+ "loss": 0.0521,
1143
+ "step": 162
1144
+ },
1145
+ {
1146
+ "epoch": 1.63,
1147
+ "grad_norm": 0.007650354154095339,
1148
+ "learning_rate": 4.093075825496225e-05,
1149
+ "loss": 0.0568,
1150
+ "step": 163
1151
+ },
1152
+ {
1153
+ "epoch": 1.6400000000000001,
1154
+ "grad_norm": 0.008182046330938405,
1155
+ "learning_rate": 4.046541063205589e-05,
1156
+ "loss": 0.0516,
1157
+ "step": 164
1158
+ },
1159
+ {
1160
+ "epoch": 1.65,
1161
+ "grad_norm": 0.007465154265220012,
1162
+ "learning_rate": 4e-05,
1163
+ "loss": 0.0482,
1164
+ "step": 165
1165
+ },
1166
+ {
1167
+ "epoch": 1.6600000000000001,
1168
+ "grad_norm": 0.008452117611695243,
1169
+ "learning_rate": 3.953458936794413e-05,
1170
+ "loss": 0.05,
1171
+ "step": 166
1172
+ },
1173
+ {
1174
+ "epoch": 1.67,
1175
+ "grad_norm": 0.008074789265636787,
1176
+ "learning_rate": 3.9069241745037753e-05,
1177
+ "loss": 0.0513,
1178
+ "step": 167
1179
+ },
1180
+ {
1181
+ "epoch": 1.6800000000000002,
1182
+ "grad_norm": 0.009042811977756882,
1183
+ "learning_rate": 3.860402013189998e-05,
1184
+ "loss": 0.0485,
1185
+ "step": 168
1186
+ },
1187
+ {
1188
+ "epoch": 1.69,
1189
+ "grad_norm": 0.00762433164262109,
1190
+ "learning_rate": 3.813898751209013e-05,
1191
+ "loss": 0.0512,
1192
+ "step": 169
1193
+ },
1194
+ {
1195
+ "epoch": 1.7,
1196
+ "grad_norm": 0.008811253254955806,
1197
+ "learning_rate": 3.767420684358097e-05,
1198
+ "loss": 0.0521,
1199
+ "step": 170
1200
+ },
1201
+ {
1202
+ "epoch": 1.71,
1203
+ "grad_norm": 0.0075299486046352315,
1204
+ "learning_rate": 3.720974105023499e-05,
1205
+ "loss": 0.0504,
1206
+ "step": 171
1207
+ },
1208
+ {
1209
+ "epoch": 1.72,
1210
+ "grad_norm": 0.007798017673528889,
1211
+ "learning_rate": 3.674565301328568e-05,
1212
+ "loss": 0.0577,
1213
+ "step": 172
1214
+ },
1215
+ {
1216
+ "epoch": 1.73,
1217
+ "grad_norm": 0.007622433433874874,
1218
+ "learning_rate": 3.628200556282432e-05,
1219
+ "loss": 0.0518,
1220
+ "step": 173
1221
+ },
1222
+ {
1223
+ "epoch": 1.74,
1224
+ "grad_norm": 0.008467491545172878,
1225
+ "learning_rate": 3.581886146929387e-05,
1226
+ "loss": 0.0522,
1227
+ "step": 174
1228
+ },
1229
+ {
1230
+ "epoch": 1.75,
1231
+ "grad_norm": 0.007506213631952832,
1232
+ "learning_rate": 3.535628343499079e-05,
1233
+ "loss": 0.0581,
1234
+ "step": 175
1235
+ },
1236
+ {
1237
+ "epoch": 1.76,
1238
+ "grad_norm": 0.008763895813966242,
1239
+ "learning_rate": 3.4894334085576215e-05,
1240
+ "loss": 0.058,
1241
+ "step": 176
1242
+ },
1243
+ {
1244
+ "epoch": 1.77,
1245
+ "grad_norm": 0.007669952397480411,
1246
+ "learning_rate": 3.44330759615974e-05,
1247
+ "loss": 0.052,
1248
+ "step": 177
1249
+ },
1250
+ {
1251
+ "epoch": 1.78,
1252
+ "grad_norm": 0.007054625454305199,
1253
+ "learning_rate": 3.397257151002068e-05,
1254
+ "loss": 0.0549,
1255
+ "step": 178
1256
+ },
1257
+ {
1258
+ "epoch": 1.79,
1259
+ "grad_norm": 0.009288756626833644,
1260
+ "learning_rate": 3.351288307577731e-05,
1261
+ "loss": 0.0554,
1262
+ "step": 179
1263
+ },
1264
+ {
1265
+ "epoch": 1.8,
1266
+ "grad_norm": 0.008233310749170903,
1267
+ "learning_rate": 3.305407289332279e-05,
1268
+ "loss": 0.0565,
1269
+ "step": 180
1270
+ },
1271
+ {
1272
+ "epoch": 1.81,
1273
+ "grad_norm": 0.007833837204017957,
1274
+ "learning_rate": 3.2596203078211424e-05,
1275
+ "loss": 0.0467,
1276
+ "step": 181
1277
+ },
1278
+ {
1279
+ "epoch": 1.8199999999999998,
1280
+ "grad_norm": 0.007089341594394346,
1281
+ "learning_rate": 3.213933561868679e-05,
1282
+ "loss": 0.052,
1283
+ "step": 182
1284
+ },
1285
+ {
1286
+ "epoch": 1.83,
1287
+ "grad_norm": 0.007473306580606742,
1288
+ "learning_rate": 3.168353236728964e-05,
1289
+ "loss": 0.0487,
1290
+ "step": 183
1291
+ },
1292
+ {
1293
+ "epoch": 1.8399999999999999,
1294
+ "grad_norm": 0.008372809544562218,
1295
+ "learning_rate": 3.122885503248386e-05,
1296
+ "loss": 0.0492,
1297
+ "step": 184
1298
+ },
1299
+ {
1300
+ "epoch": 1.85,
1301
+ "grad_norm": 0.0071497318025102955,
1302
+ "learning_rate": 3.07753651703024e-05,
1303
+ "loss": 0.0608,
1304
+ "step": 185
1305
+ },
1306
+ {
1307
+ "epoch": 1.8599999999999999,
1308
+ "grad_norm": 0.007493452653104603,
1309
+ "learning_rate": 3.0323124176013297e-05,
1310
+ "loss": 0.0478,
1311
+ "step": 186
1312
+ },
1313
+ {
1314
+ "epoch": 1.87,
1315
+ "grad_norm": 0.007202322342127042,
1316
+ "learning_rate": 2.9872193275807933e-05,
1317
+ "loss": 0.0542,
1318
+ "step": 187
1319
+ },
1320
+ {
1321
+ "epoch": 1.88,
1322
+ "grad_norm": 0.00732058753845859,
1323
+ "learning_rate": 2.9422633518511926e-05,
1324
+ "loss": 0.0518,
1325
+ "step": 188
1326
+ },
1327
+ {
1328
+ "epoch": 1.8900000000000001,
1329
+ "grad_norm": 0.007508749793397996,
1330
+ "learning_rate": 2.8974505767320037e-05,
1331
+ "loss": 0.052,
1332
+ "step": 189
1333
+ },
1334
+ {
1335
+ "epoch": 1.9,
1336
+ "grad_norm": 0.00766611988918056,
1337
+ "learning_rate": 2.8527870691556404e-05,
1338
+ "loss": 0.0516,
1339
+ "step": 190
1340
+ },
1341
+ {
1342
+ "epoch": 1.9100000000000001,
1343
+ "grad_norm": 0.007388182140516179,
1344
+ "learning_rate": 2.808278875846072e-05,
1345
+ "loss": 0.0457,
1346
+ "step": 191
1347
+ },
1348
+ {
1349
+ "epoch": 1.92,
1350
+ "grad_norm": 0.008028421092160346,
1351
+ "learning_rate": 2.7639320225002108e-05,
1352
+ "loss": 0.0559,
1353
+ "step": 192
1354
+ },
1355
+ {
1356
+ "epoch": 1.9300000000000002,
1357
+ "grad_norm": 0.0074198301332096695,
1358
+ "learning_rate": 2.7197525129721138e-05,
1359
+ "loss": 0.0559,
1360
+ "step": 193
1361
+ },
1362
+ {
1363
+ "epoch": 1.94,
1364
+ "grad_norm": 0.008148658738087657,
1365
+ "learning_rate": 2.6757463284601682e-05,
1366
+ "loss": 0.0506,
1367
+ "step": 194
1368
+ },
1369
+ {
1370
+ "epoch": 1.95,
1371
+ "grad_norm": 0.0075285729396853684,
1372
+ "learning_rate": 2.6319194266973256e-05,
1373
+ "loss": 0.0505,
1374
+ "step": 195
1375
+ },
1376
+ {
1377
+ "epoch": 1.96,
1378
+ "grad_norm": 0.007423097745506451,
1379
+ "learning_rate": 2.5882777411445254e-05,
1380
+ "loss": 0.048,
1381
+ "step": 196
1382
+ },
1383
+ {
1384
+ "epoch": 1.97,
1385
+ "grad_norm": 0.00710016303959488,
1386
+ "learning_rate": 2.5448271801873957e-05,
1387
+ "loss": 0.0506,
1388
+ "step": 197
1389
+ },
1390
+ {
1391
+ "epoch": 1.98,
1392
+ "grad_norm": 0.008113205739100461,
1393
+ "learning_rate": 2.501573626336352e-05,
1394
+ "loss": 0.0609,
1395
+ "step": 198
1396
+ },
1397
+ {
1398
+ "epoch": 1.99,
1399
+ "grad_norm": 0.008764046165710386,
1400
+ "learning_rate": 2.4585229354302077e-05,
1401
+ "loss": 0.0454,
1402
+ "step": 199
1403
+ },
1404
+ {
1405
+ "epoch": 2.0,
1406
+ "grad_norm": 0.00821153513962698,
1407
+ "learning_rate": 2.4156809358433728e-05,
1408
+ "loss": 0.0606,
1409
+ "step": 200
1410
+ },
1411
+ {
1412
+ "epoch": 2.01,
1413
+ "grad_norm": 0.007680698523728549,
1414
+ "learning_rate": 2.3730534276968e-05,
1415
+ "loss": 0.0476,
1416
+ "step": 201
1417
+ },
1418
+ {
1419
+ "epoch": 2.02,
1420
+ "grad_norm": 0.0072058892547023625,
1421
+ "learning_rate": 2.330646182072729e-05,
1422
+ "loss": 0.0452,
1423
+ "step": 202
1424
+ },
1425
+ {
1426
+ "epoch": 2.03,
1427
+ "grad_norm": 0.007455986857229059,
1428
+ "learning_rate": 2.288464940233384e-05,
1429
+ "loss": 0.0532,
1430
+ "step": 203
1431
+ },
1432
+ {
1433
+ "epoch": 2.04,
1434
+ "grad_norm": 0.008403832982531088,
1435
+ "learning_rate": 2.24651541284369e-05,
1436
+ "loss": 0.059,
1437
+ "step": 204
1438
+ },
1439
+ {
1440
+ "epoch": 2.05,
1441
+ "grad_norm": 0.008164085332218317,
1442
+ "learning_rate": 2.2048032791981515e-05,
1443
+ "loss": 0.051,
1444
+ "step": 205
1445
+ },
1446
+ {
1447
+ "epoch": 2.06,
1448
+ "grad_norm": 0.008887334881050327,
1449
+ "learning_rate": 2.1633341864519526e-05,
1450
+ "loss": 0.0472,
1451
+ "step": 206
1452
+ },
1453
+ {
1454
+ "epoch": 2.07,
1455
+ "grad_norm": 0.006913827555493125,
1456
+ "learning_rate": 2.122113748856438e-05,
1457
+ "loss": 0.0495,
1458
+ "step": 207
1459
+ },
1460
+ {
1461
+ "epoch": 2.08,
1462
+ "grad_norm": 0.007764341801266272,
1463
+ "learning_rate": 2.0811475469990167e-05,
1464
+ "loss": 0.0521,
1465
+ "step": 208
1466
+ },
1467
+ {
1468
+ "epoch": 2.09,
1469
+ "grad_norm": 0.00796973985641782,
1470
+ "learning_rate": 2.0404411270476527e-05,
1471
+ "loss": 0.042,
1472
+ "step": 209
1473
+ },
1474
+ {
1475
+ "epoch": 2.1,
1476
+ "grad_norm": 0.007679599584638068,
1477
+ "learning_rate": 2.0000000000000012e-05,
1478
+ "loss": 0.043,
1479
+ "step": 210
1480
+ },
1481
+ {
1482
+ "epoch": 2.11,
1483
+ "grad_norm": 0.007741532652410615,
1484
+ "learning_rate": 1.959829640937299e-05,
1485
+ "loss": 0.0513,
1486
+ "step": 211
1487
+ },
1488
+ {
1489
+ "epoch": 2.12,
1490
+ "grad_norm": 0.008209511734690264,
1491
+ "learning_rate": 1.9199354882831387e-05,
1492
+ "loss": 0.0449,
1493
+ "step": 212
1494
+ },
1495
+ {
1496
+ "epoch": 2.13,
1497
+ "grad_norm": 0.00748273073895644,
1498
+ "learning_rate": 1.880322943067181e-05,
1499
+ "loss": 0.0516,
1500
+ "step": 213
1501
+ },
1502
+ {
1503
+ "epoch": 2.14,
1504
+ "grad_norm": 0.008058884957159639,
1505
+ "learning_rate": 1.8409973681939498e-05,
1506
+ "loss": 0.0491,
1507
+ "step": 214
1508
+ },
1509
+ {
1510
+ "epoch": 2.15,
1511
+ "grad_norm": 0.007896720503622225,
1512
+ "learning_rate": 1.8019640877167763e-05,
1513
+ "loss": 0.0508,
1514
+ "step": 215
1515
+ },
1516
+ {
1517
+ "epoch": 2.16,
1518
+ "grad_norm": 0.0076269938815235775,
1519
+ "learning_rate": 1.7632283861170135e-05,
1520
+ "loss": 0.0486,
1521
+ "step": 216
1522
+ },
1523
+ {
1524
+ "epoch": 2.17,
1525
+ "grad_norm": 0.007351997904615261,
1526
+ "learning_rate": 1.7247955075885938e-05,
1527
+ "loss": 0.0538,
1528
+ "step": 217
1529
+ },
1530
+ {
1531
+ "epoch": 2.18,
1532
+ "grad_norm": 0.008997677074227179,
1533
+ "learning_rate": 1.686670655328054e-05,
1534
+ "loss": 0.0428,
1535
+ "step": 218
1536
+ },
1537
+ {
1538
+ "epoch": 2.19,
1539
+ "grad_norm": 0.007628347779794642,
1540
+ "learning_rate": 1.648858990830108e-05,
1541
+ "loss": 0.0519,
1542
+ "step": 219
1543
+ },
1544
+ {
1545
+ "epoch": 2.2,
1546
+ "grad_norm": 0.007848198798166058,
1547
+ "learning_rate": 1.6113656331888563e-05,
1548
+ "loss": 0.0396,
1549
+ "step": 220
1550
+ },
1551
+ {
1552
+ "epoch": 2.21,
1553
+ "grad_norm": 0.00818161455778652,
1554
+ "learning_rate": 1.5741956584047478e-05,
1555
+ "loss": 0.0402,
1556
+ "step": 221
1557
+ },
1558
+ {
1559
+ "epoch": 2.22,
1560
+ "grad_norm": 0.008100183400441816,
1561
+ "learning_rate": 1.537354098697367e-05,
1562
+ "loss": 0.0453,
1563
+ "step": 222
1564
+ },
1565
+ {
1566
+ "epoch": 2.23,
1567
+ "grad_norm": 0.00794023122702451,
1568
+ "learning_rate": 1.5008459418241601e-05,
1569
+ "loss": 0.0472,
1570
+ "step": 223
1571
+ },
1572
+ {
1573
+ "epoch": 2.24,
1574
+ "grad_norm": 0.009769674822868987,
1575
+ "learning_rate": 1.4646761304051587e-05,
1576
+ "loss": 0.0465,
1577
+ "step": 224
1578
+ },
1579
+ {
1580
+ "epoch": 2.25,
1581
+ "grad_norm": 0.00783561890669205,
1582
+ "learning_rate": 1.4288495612538427e-05,
1583
+ "loss": 0.0466,
1584
+ "step": 225
1585
+ },
1586
+ {
1587
+ "epoch": 2.26,
1588
+ "grad_norm": 0.007694729315287434,
1589
+ "learning_rate": 1.3933710847141795e-05,
1590
+ "loss": 0.0507,
1591
+ "step": 226
1592
+ },
1593
+ {
1594
+ "epoch": 2.27,
1595
+ "grad_norm": 0.0075756047835537,
1596
+ "learning_rate": 1.3582455040039699e-05,
1597
+ "loss": 0.0518,
1598
+ "step": 227
1599
+ },
1600
+ {
1601
+ "epoch": 2.2800000000000002,
1602
+ "grad_norm": 0.009015454228459657,
1603
+ "learning_rate": 1.3234775745645684e-05,
1604
+ "loss": 0.0499,
1605
+ "step": 228
1606
+ },
1607
+ {
1608
+ "epoch": 2.29,
1609
+ "grad_norm": 0.00921692465849527,
1610
+ "learning_rate": 1.2890720034170712e-05,
1611
+ "loss": 0.0561,
1612
+ "step": 229
1613
+ },
1614
+ {
1615
+ "epoch": 2.3,
1616
+ "grad_norm": 0.007278637992549301,
1617
+ "learning_rate": 1.2550334485250661e-05,
1618
+ "loss": 0.0453,
1619
+ "step": 230
1620
+ },
1621
+ {
1622
+ "epoch": 2.31,
1623
+ "grad_norm": 0.008589962697891675,
1624
+ "learning_rate": 1.2213665181640106e-05,
1625
+ "loss": 0.0502,
1626
+ "step": 231
1627
+ },
1628
+ {
1629
+ "epoch": 2.32,
1630
+ "grad_norm": 0.007498032503423722,
1631
+ "learning_rate": 1.1880757702973531e-05,
1632
+ "loss": 0.0544,
1633
+ "step": 232
1634
+ },
1635
+ {
1636
+ "epoch": 2.33,
1637
+ "grad_norm": 0.00711635543171786,
1638
+ "learning_rate": 1.1551657119594517e-05,
1639
+ "loss": 0.042,
1640
+ "step": 233
1641
+ },
1642
+ {
1643
+ "epoch": 2.34,
1644
+ "grad_norm": 0.008216933132714868,
1645
+ "learning_rate": 1.1226407986453963e-05,
1646
+ "loss": 0.0504,
1647
+ "step": 234
1648
+ },
1649
+ {
1650
+ "epoch": 2.35,
1651
+ "grad_norm": 0.008104809687511125,
1652
+ "learning_rate": 1.0905054337078051e-05,
1653
+ "loss": 0.0458,
1654
+ "step": 235
1655
+ },
1656
+ {
1657
+ "epoch": 2.36,
1658
+ "grad_norm": 0.007698489937667685,
1659
+ "learning_rate": 1.0587639677606857e-05,
1660
+ "loss": 0.0484,
1661
+ "step": 236
1662
+ },
1663
+ {
1664
+ "epoch": 2.37,
1665
+ "grad_norm": 0.008350303786833624,
1666
+ "learning_rate": 1.0274206980904226e-05,
1667
+ "loss": 0.053,
1668
+ "step": 237
1669
+ },
1670
+ {
1671
+ "epoch": 2.38,
1672
+ "grad_norm": 0.00787749812285185,
1673
+ "learning_rate": 9.964798680740033e-06,
1674
+ "loss": 0.0583,
1675
+ "step": 238
1676
+ },
1677
+ {
1678
+ "epoch": 2.39,
1679
+ "grad_norm": 0.007815320631397374,
1680
+ "learning_rate": 9.659456666045247e-06,
1681
+ "loss": 0.0505,
1682
+ "step": 239
1683
+ },
1684
+ {
1685
+ "epoch": 2.4,
1686
+ "grad_norm": 0.007102002524475694,
1687
+ "learning_rate": 9.358222275240884e-06,
1688
+ "loss": 0.0492,
1689
+ "step": 240
1690
+ },
1691
+ {
1692
+ "epoch": 2.41,
1693
+ "grad_norm": 0.007621055980572488,
1694
+ "learning_rate": 9.061136290641448e-06,
1695
+ "loss": 0.05,
1696
+ "step": 241
1697
+ },
1698
+ {
1699
+ "epoch": 2.42,
1700
+ "grad_norm": 0.011297785905862773,
1701
+ "learning_rate": 8.768238932933632e-06,
1702
+ "loss": 0.0468,
1703
+ "step": 242
1704
+ },
1705
+ {
1706
+ "epoch": 2.43,
1707
+ "grad_norm": 0.008601495621167033,
1708
+ "learning_rate": 8.479569855731125e-06,
1709
+ "loss": 0.0535,
1710
+ "step": 243
1711
+ },
1712
+ {
1713
+ "epoch": 2.44,
1714
+ "grad_norm": 0.007878980554869908,
1715
+ "learning_rate": 8.195168140206084e-06,
1716
+ "loss": 0.0451,
1717
+ "step": 244
1718
+ },
1719
+ {
1720
+ "epoch": 2.45,
1721
+ "grad_norm": 0.0074909986850030886,
1722
+ "learning_rate": 7.915072289798247e-06,
1723
+ "loss": 0.0521,
1724
+ "step": 245
1725
+ },
1726
+ {
1727
+ "epoch": 2.46,
1728
+ "grad_norm": 0.00848573741778448,
1729
+ "learning_rate": 7.639320225002106e-06,
1730
+ "loss": 0.0452,
1731
+ "step": 246
1732
+ },
1733
+ {
1734
+ "epoch": 2.4699999999999998,
1735
+ "grad_norm": 0.00790578716548238,
1736
+ "learning_rate": 7.367949278233126e-06,
1737
+ "loss": 0.0539,
1738
+ "step": 247
1739
+ },
1740
+ {
1741
+ "epoch": 2.48,
1742
+ "grad_norm": 0.008232378642808477,
1743
+ "learning_rate": 7.1009961887735075e-06,
1744
+ "loss": 0.0464,
1745
+ "step": 248
1746
+ },
1747
+ {
1748
+ "epoch": 2.49,
1749
+ "grad_norm": 0.007433470022835041,
1750
+ "learning_rate": 6.838497097798336e-06,
1751
+ "loss": 0.0467,
1752
+ "step": 249
1753
+ },
1754
+ {
1755
+ "epoch": 2.5,
1756
+ "grad_norm": 0.007738887190809639,
1757
+ "learning_rate": 6.58048754348255e-06,
1758
+ "loss": 0.0587,
1759
+ "step": 250
1760
+ },
1761
+ {
1762
+ "epoch": 2.51,
1763
+ "grad_norm": 0.00880827917948987,
1764
+ "learning_rate": 6.327002456189699e-06,
1765
+ "loss": 0.054,
1766
+ "step": 251
1767
+ },
1768
+ {
1769
+ "epoch": 2.52,
1770
+ "grad_norm": 0.007965487571289098,
1771
+ "learning_rate": 6.078076153742962e-06,
1772
+ "loss": 0.0512,
1773
+ "step": 252
1774
+ },
1775
+ {
1776
+ "epoch": 2.5300000000000002,
1777
+ "grad_norm": 0.007469099301562679,
1778
+ "learning_rate": 5.833742336778981e-06,
1779
+ "loss": 0.0477,
1780
+ "step": 253
1781
+ },
1782
+ {
1783
+ "epoch": 2.54,
1784
+ "grad_norm": 0.008484709423463018,
1785
+ "learning_rate": 5.5940340841853915e-06,
1786
+ "loss": 0.0494,
1787
+ "step": 254
1788
+ },
1789
+ {
1790
+ "epoch": 2.55,
1791
+ "grad_norm": 0.0077139322549110954,
1792
+ "learning_rate": 5.358983848622452e-06,
1793
+ "loss": 0.0431,
1794
+ "step": 255
1795
+ },
1796
+ {
1797
+ "epoch": 2.56,
1798
+ "grad_norm": 0.007419456022316517,
1799
+ "learning_rate": 5.128623452129508e-06,
1800
+ "loss": 0.0458,
1801
+ "step": 256
1802
+ },
1803
+ {
1804
+ "epoch": 2.57,
1805
+ "grad_norm": 0.007497816758801154,
1806
+ "learning_rate": 4.902984081816717e-06,
1807
+ "loss": 0.0525,
1808
+ "step": 257
1809
+ },
1810
+ {
1811
+ "epoch": 2.58,
1812
+ "grad_norm": 0.00773414898278967,
1813
+ "learning_rate": 4.6820962856429205e-06,
1814
+ "loss": 0.0501,
1815
+ "step": 258
1816
+ },
1817
+ {
1818
+ "epoch": 2.59,
1819
+ "grad_norm": 0.008029479128266754,
1820
+ "learning_rate": 4.4659899682798446e-06,
1821
+ "loss": 0.058,
1822
+ "step": 259
1823
+ },
1824
+ {
1825
+ "epoch": 2.6,
1826
+ "grad_norm": 0.00817201238048331,
1827
+ "learning_rate": 4.254694387063514e-06,
1828
+ "loss": 0.0517,
1829
+ "step": 260
1830
+ },
1831
+ {
1832
+ "epoch": 2.61,
1833
+ "grad_norm": 0.007833872573765003,
1834
+ "learning_rate": 4.048238148033328e-06,
1835
+ "loss": 0.0447,
1836
+ "step": 261
1837
+ },
1838
+ {
1839
+ "epoch": 2.62,
1840
+ "grad_norm": 0.007283917683521356,
1841
+ "learning_rate": 3.846649202059181e-06,
1842
+ "loss": 0.0507,
1843
+ "step": 262
1844
+ },
1845
+ {
1846
+ "epoch": 2.63,
1847
+ "grad_norm": 0.007281030186329541,
1848
+ "learning_rate": 3.6499548410574303e-06,
1849
+ "loss": 0.0513,
1850
+ "step": 263
1851
+ },
1852
+ {
1853
+ "epoch": 2.64,
1854
+ "grad_norm": 0.008227694669530704,
1855
+ "learning_rate": 3.458181694295961e-06,
1856
+ "loss": 0.0479,
1857
+ "step": 264
1858
+ },
1859
+ {
1860
+ "epoch": 2.65,
1861
+ "grad_norm": 0.008050977932771895,
1862
+ "learning_rate": 3.2713557247890447e-06,
1863
+ "loss": 0.0454,
1864
+ "step": 265
1865
+ },
1866
+ {
1867
+ "epoch": 2.66,
1868
+ "grad_norm": 0.008423892005773738,
1869
+ "learning_rate": 3.0895022257823083e-06,
1870
+ "loss": 0.0468,
1871
+ "step": 266
1872
+ },
1873
+ {
1874
+ "epoch": 2.67,
1875
+ "grad_norm": 0.007881578517262917,
1876
+ "learning_rate": 2.9126458173285077e-06,
1877
+ "loss": 0.0407,
1878
+ "step": 267
1879
+ },
1880
+ {
1881
+ "epoch": 2.68,
1882
+ "grad_norm": 0.007992533123823718,
1883
+ "learning_rate": 2.7408104429543025e-06,
1884
+ "loss": 0.0494,
1885
+ "step": 268
1886
+ },
1887
+ {
1888
+ "epoch": 2.69,
1889
+ "grad_norm": 0.007436604743620323,
1890
+ "learning_rate": 2.574019366418745e-06,
1891
+ "loss": 0.0549,
1892
+ "step": 269
1893
+ },
1894
+ {
1895
+ "epoch": 2.7,
1896
+ "grad_norm": 0.007663239639283421,
1897
+ "learning_rate": 2.4122951685636674e-06,
1898
+ "loss": 0.0475,
1899
+ "step": 270
1900
+ },
1901
+ {
1902
+ "epoch": 2.71,
1903
+ "grad_norm": 0.007838468915328882,
1904
+ "learning_rate": 2.25565974425666e-06,
1905
+ "loss": 0.0464,
1906
+ "step": 271
1907
+ },
1908
+ {
1909
+ "epoch": 2.7199999999999998,
1910
+ "grad_norm": 0.00914425008382166,
1911
+ "learning_rate": 2.104134299426832e-06,
1912
+ "loss": 0.0383,
1913
+ "step": 272
1914
+ },
1915
+ {
1916
+ "epoch": 2.73,
1917
+ "grad_norm": 0.007947682642468833,
1918
+ "learning_rate": 1.957739348193859e-06,
1919
+ "loss": 0.0519,
1920
+ "step": 273
1921
+ },
1922
+ {
1923
+ "epoch": 2.74,
1924
+ "grad_norm": 0.0070935838733125825,
1925
+ "learning_rate": 1.8164947100907238e-06,
1926
+ "loss": 0.0465,
1927
+ "step": 274
1928
+ },
1929
+ {
1930
+ "epoch": 2.75,
1931
+ "grad_norm": 0.008179536491946084,
1932
+ "learning_rate": 1.6804195073804442e-06,
1933
+ "loss": 0.0459,
1934
+ "step": 275
1935
+ },
1936
+ {
1937
+ "epoch": 2.76,
1938
+ "grad_norm": 0.007753507765305779,
1939
+ "learning_rate": 1.5495321624672443e-06,
1940
+ "loss": 0.0468,
1941
+ "step": 276
1942
+ },
1943
+ {
1944
+ "epoch": 2.77,
1945
+ "grad_norm": 0.007199406022151337,
1946
+ "learning_rate": 1.423850395402444e-06,
1947
+ "loss": 0.0514,
1948
+ "step": 277
1949
+ },
1950
+ {
1951
+ "epoch": 2.7800000000000002,
1952
+ "grad_norm": 0.008333418338222438,
1953
+ "learning_rate": 1.3033912214854482e-06,
1954
+ "loss": 0.05,
1955
+ "step": 278
1956
+ },
1957
+ {
1958
+ "epoch": 2.79,
1959
+ "grad_norm": 0.008081800995925644,
1960
+ "learning_rate": 1.1881709489601413e-06,
1961
+ "loss": 0.0545,
1962
+ "step": 279
1963
+ },
1964
+ {
1965
+ "epoch": 2.8,
1966
+ "grad_norm": 0.007699849451471151,
1967
+ "learning_rate": 1.0782051768070477e-06,
1968
+ "loss": 0.0458,
1969
+ "step": 280
1970
+ },
1971
+ {
1972
+ "epoch": 2.81,
1973
+ "grad_norm": 0.008054323381976133,
1974
+ "learning_rate": 9.735087926314324e-07,
1975
+ "loss": 0.0431,
1976
+ "step": 281
1977
+ },
1978
+ {
1979
+ "epoch": 2.82,
1980
+ "grad_norm": 0.008480619590691966,
1981
+ "learning_rate": 8.740959706477725e-07,
1982
+ "loss": 0.0549,
1983
+ "step": 282
1984
+ },
1985
+ {
1986
+ "epoch": 2.83,
1987
+ "grad_norm": 0.00767280596192848,
1988
+ "learning_rate": 7.799801697608278e-07,
1989
+ "loss": 0.0488,
1990
+ "step": 283
1991
+ },
1992
+ {
1993
+ "epoch": 2.84,
1994
+ "grad_norm": 0.007292852487745584,
1995
+ "learning_rate": 6.911741317434706e-07,
1996
+ "loss": 0.0483,
1997
+ "step": 284
1998
+ },
1999
+ {
2000
+ "epoch": 2.85,
2001
+ "grad_norm": 0.00795167167707837,
2002
+ "learning_rate": 6.076898795116792e-07,
2003
+ "loss": 0.0457,
2004
+ "step": 285
2005
+ },
2006
+ {
2007
+ "epoch": 2.86,
2008
+ "grad_norm": 0.007486907497841981,
2009
+ "learning_rate": 5.295387154968312e-07,
2010
+ "loss": 0.0485,
2011
+ "step": 286
2012
+ },
2013
+ {
2014
+ "epoch": 2.87,
2015
+ "grad_norm": 0.00743096916544247,
2016
+ "learning_rate": 4.5673122011553605e-07,
2017
+ "loss": 0.0519,
2018
+ "step": 287
2019
+ },
2020
+ {
2021
+ "epoch": 2.88,
2022
+ "grad_norm": 0.008243440663466602,
2023
+ "learning_rate": 3.8927725033718553e-07,
2024
+ "loss": 0.0597,
2025
+ "step": 288
2026
+ },
2027
+ {
2028
+ "epoch": 2.89,
2029
+ "grad_norm": 0.008492105017339772,
2030
+ "learning_rate": 3.2718593834953237e-07,
2031
+ "loss": 0.0454,
2032
+ "step": 289
2033
+ },
2034
+ {
2035
+ "epoch": 2.9,
2036
+ "grad_norm": 0.007494753123818296,
2037
+ "learning_rate": 2.704656903222791e-07,
2038
+ "loss": 0.0481,
2039
+ "step": 290
2040
+ },
2041
+ {
2042
+ "epoch": 2.91,
2043
+ "grad_norm": 0.007642204782814251,
2044
+ "learning_rate": 2.1912418526906841e-07,
2045
+ "loss": 0.0482,
2046
+ "step": 291
2047
+ },
2048
+ {
2049
+ "epoch": 2.92,
2050
+ "grad_norm": 0.007883004012316045,
2051
+ "learning_rate": 1.7316837400782604e-07,
2052
+ "loss": 0.0455,
2053
+ "step": 292
2054
+ },
2055
+ {
2056
+ "epoch": 2.93,
2057
+ "grad_norm": 0.007928279317356624,
2058
+ "learning_rate": 1.3260447821975775e-07,
2059
+ "loss": 0.0456,
2060
+ "step": 293
2061
+ },
2062
+ {
2063
+ "epoch": 2.94,
2064
+ "grad_norm": 0.007445473197648637,
2065
+ "learning_rate": 9.74379896070321e-08,
2066
+ "loss": 0.0405,
2067
+ "step": 294
2068
+ },
2069
+ {
2070
+ "epoch": 2.95,
2071
+ "grad_norm": 0.008153580081268767,
2072
+ "learning_rate": 6.767366914927298e-08,
2073
+ "loss": 0.0458,
2074
+ "step": 295
2075
+ },
2076
+ {
2077
+ "epoch": 2.96,
2078
+ "grad_norm": 0.007821316249018594,
2079
+ "learning_rate": 4.331554645901737e-08,
2080
+ "loss": 0.0481,
2081
+ "step": 296
2082
+ },
2083
+ {
2084
+ "epoch": 2.9699999999999998,
2085
+ "grad_norm": 0.007329108353965065,
2086
+ "learning_rate": 2.4366919236169518e-08,
2087
+ "loss": 0.0526,
2088
+ "step": 297
2089
+ },
2090
+ {
2091
+ "epoch": 2.98,
2092
+ "grad_norm": 0.007906132996865366,
2093
+ "learning_rate": 1.0830352821531442e-08,
2094
+ "loss": 0.0392,
2095
+ "step": 298
2096
+ },
2097
+ {
2098
+ "epoch": 2.99,
2099
+ "grad_norm": 0.008017394398038166,
2100
+ "learning_rate": 2.7076798495118127e-09,
2101
+ "loss": 0.0467,
2102
+ "step": 299
2103
+ },
2104
+ {
2105
+ "epoch": 3.0,
2106
+ "grad_norm": 0.00838965584897803,
2107
+ "learning_rate": 0.0,
2108
+ "loss": 0.0492,
2109
+ "step": 300
2110
+ },
2111
+ {
2112
+ "epoch": 3.0,
2113
+ "step": 300,
2114
+ "total_flos": 225998445477888.0,
2115
+ "train_loss": 0.066388065914313,
2116
+ "train_runtime": 2513.5793,
2117
+ "train_samples_per_second": 0.955,
2118
+ "train_steps_per_second": 0.119
2119
+ }
2120
+ ],
2121
+ "logging_steps": 1,
2122
+ "max_steps": 300,
2123
+ "num_input_tokens_seen": 0,
2124
+ "num_train_epochs": 3,
2125
+ "save_steps": 500,
2126
+ "stateful_callbacks": {
2127
+ "TrainerControl": {
2128
+ "args": {
2129
+ "should_epoch_stop": false,
2130
+ "should_evaluate": false,
2131
+ "should_log": false,
2132
+ "should_save": false,
2133
+ "should_training_stop": false
2134
+ },
2135
+ "attributes": {}
2136
+ }
2137
+ },
2138
+ "total_flos": 225998445477888.0,
2139
+ "train_batch_size": 1,
2140
+ "trial_name": null,
2141
+ "trial_params": null
2142
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e30bcfbda170d3f3fb7df7709d0674a5f23427dfcede9b0ffdfa92382ba2bd95
3
+ size 7288