slseanwu commited on
Commit
628e395
·
1 Parent(s): 356b516

add weights

Browse files
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "/home/slseanwu/11891_codegen/air2/runs/deepseek-1.3b-full-arith-hard-PT-promptC12-interm-bs32-30K-240416/checkpoint-20000",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 32013,
9
+ "eos_token_id": 32021,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 2048,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 5504,
14
+ "max_position_embeddings": 16384,
15
+ "model_type": "llama",
16
+ "num_attention_heads": 16,
17
+ "num_hidden_layers": 24,
18
+ "num_key_value_heads": 16,
19
+ "pretraining_tp": 1,
20
+ "rms_norm_eps": 1e-06,
21
+ "rope_scaling": {
22
+ "factor": 4.0,
23
+ "type": "linear"
24
+ },
25
+ "rope_theta": 100000,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "float32",
28
+ "transformers_version": "4.38.2",
29
+ "use_cache": true,
30
+ "vocab_size": 32256
31
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 32013,
4
+ "eos_token_id": 32021,
5
+ "transformers_version": "4.38.2"
6
+ }
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc0e472ea796a4b0e3ba96f79740fb6f66fd0ccf1d32f212f9853fecd655ba44
3
+ size 4986380064
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:299028d5a9b41e2203b779cbc459e20821401373c9c1988bcd73bed7552c2c64
3
+ size 399532808
model.safetensors.index.json ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 5385887744
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00002-of-00002.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
13
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
14
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
15
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
16
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
17
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
18
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
19
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
20
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
21
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
22
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
23
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
24
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
25
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
27
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
28
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
30
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
31
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
33
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
34
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
35
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
36
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
38
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
39
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
40
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
41
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
42
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
43
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
45
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
46
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
47
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
48
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
49
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
50
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
51
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
52
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
53
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
54
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
56
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
57
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
58
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
60
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
61
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
63
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
64
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
66
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
67
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
68
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
69
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
70
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
71
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
72
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
73
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
74
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
75
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
76
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
78
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
79
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
83
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
85
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
86
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
87
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
88
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
93
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
94
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
95
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
96
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
98
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
99
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
100
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
101
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
102
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
103
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
104
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
105
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
106
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
107
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
108
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
109
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
110
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
111
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
112
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
113
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
115
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
116
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
117
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
118
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
119
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
120
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
121
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
122
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
123
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
124
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
125
+ "model.layers.20.input_layernorm.weight": "model-00001-of-00002.safetensors",
126
+ "model.layers.20.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
127
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
128
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
129
+ "model.layers.20.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
130
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
131
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
133
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
134
+ "model.layers.21.input_layernorm.weight": "model-00001-of-00002.safetensors",
135
+ "model.layers.21.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
136
+ "model.layers.21.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
137
+ "model.layers.21.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
138
+ "model.layers.21.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
139
+ "model.layers.21.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
140
+ "model.layers.21.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
141
+ "model.layers.21.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
142
+ "model.layers.21.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
143
+ "model.layers.22.input_layernorm.weight": "model-00001-of-00002.safetensors",
144
+ "model.layers.22.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
145
+ "model.layers.22.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
146
+ "model.layers.22.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.22.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
148
+ "model.layers.22.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
149
+ "model.layers.22.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
150
+ "model.layers.22.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
151
+ "model.layers.22.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
152
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
153
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
154
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
155
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
156
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
157
+ "model.layers.23.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
158
+ "model.layers.23.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
159
+ "model.layers.23.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
160
+ "model.layers.23.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
161
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
162
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
163
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
164
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
165
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
166
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
167
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
168
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
169
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
170
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
171
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
172
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
173
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
174
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
175
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
176
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
177
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
178
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
179
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
180
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
181
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
182
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
183
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
184
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
185
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
186
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
187
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
188
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
189
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
190
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
191
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
192
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
193
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
194
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
195
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
196
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
197
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
198
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
199
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
200
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
201
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
202
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
203
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
204
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
205
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
206
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
207
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
208
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
209
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
210
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
211
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
212
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
213
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
214
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
215
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
216
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
217
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
218
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
219
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
220
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
221
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
222
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
223
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
224
+ "model.norm.weight": "model-00002-of-00002.safetensors"
225
+ }
226
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fe34b13dee7d9da42d001abf122dbaecf50b6bf7f52cb9d0a5447bdaa240d3a5
3
+ size 2699039674
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:183fdf51dbe79231d6619388215f2ac4056fa6643f2257500e69174f47856ec1
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f728bf1bb63de4031722624d28ecc479007645e4c48e28a89d409237d3a10ed4
3
+ size 1064
trainer_state.json ADDED
@@ -0,0 +1,1119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 3.0,
5
+ "eval_steps": 2500,
6
+ "global_step": 15000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.02,
13
+ "grad_norm": 1.482173204421997,
14
+ "learning_rate": 4.000000000000001e-06,
15
+ "loss": 1.4165,
16
+ "step": 100
17
+ },
18
+ {
19
+ "epoch": 0.04,
20
+ "grad_norm": 1.0766079425811768,
21
+ "learning_rate": 8.000000000000001e-06,
22
+ "loss": 1.0351,
23
+ "step": 200
24
+ },
25
+ {
26
+ "epoch": 0.06,
27
+ "grad_norm": 0.8878026008605957,
28
+ "learning_rate": 1.2e-05,
29
+ "loss": 0.9911,
30
+ "step": 300
31
+ },
32
+ {
33
+ "epoch": 0.08,
34
+ "grad_norm": 0.8634878396987915,
35
+ "learning_rate": 1.6000000000000003e-05,
36
+ "loss": 0.9833,
37
+ "step": 400
38
+ },
39
+ {
40
+ "epoch": 0.1,
41
+ "grad_norm": 0.8714269399642944,
42
+ "learning_rate": 2e-05,
43
+ "loss": 0.9708,
44
+ "step": 500
45
+ },
46
+ {
47
+ "epoch": 0.12,
48
+ "grad_norm": 1.1247708797454834,
49
+ "learning_rate": 1.9862068965517244e-05,
50
+ "loss": 0.9742,
51
+ "step": 600
52
+ },
53
+ {
54
+ "epoch": 0.14,
55
+ "grad_norm": 0.8334652781486511,
56
+ "learning_rate": 1.9724137931034483e-05,
57
+ "loss": 0.9529,
58
+ "step": 700
59
+ },
60
+ {
61
+ "epoch": 0.16,
62
+ "grad_norm": 0.8450727462768555,
63
+ "learning_rate": 1.9586206896551725e-05,
64
+ "loss": 0.9545,
65
+ "step": 800
66
+ },
67
+ {
68
+ "epoch": 0.18,
69
+ "grad_norm": 0.9118452072143555,
70
+ "learning_rate": 1.9448275862068968e-05,
71
+ "loss": 0.9456,
72
+ "step": 900
73
+ },
74
+ {
75
+ "epoch": 0.2,
76
+ "grad_norm": 0.7823046445846558,
77
+ "learning_rate": 1.931034482758621e-05,
78
+ "loss": 0.9338,
79
+ "step": 1000
80
+ },
81
+ {
82
+ "epoch": 0.22,
83
+ "grad_norm": 1.1707388162612915,
84
+ "learning_rate": 1.917241379310345e-05,
85
+ "loss": 0.9433,
86
+ "step": 1100
87
+ },
88
+ {
89
+ "epoch": 0.24,
90
+ "grad_norm": 0.9278285503387451,
91
+ "learning_rate": 1.903448275862069e-05,
92
+ "loss": 0.9442,
93
+ "step": 1200
94
+ },
95
+ {
96
+ "epoch": 0.26,
97
+ "grad_norm": 0.8357234001159668,
98
+ "learning_rate": 1.8896551724137934e-05,
99
+ "loss": 0.9439,
100
+ "step": 1300
101
+ },
102
+ {
103
+ "epoch": 0.28,
104
+ "grad_norm": 0.8239058256149292,
105
+ "learning_rate": 1.8758620689655173e-05,
106
+ "loss": 0.9428,
107
+ "step": 1400
108
+ },
109
+ {
110
+ "epoch": 0.3,
111
+ "grad_norm": 0.8225810527801514,
112
+ "learning_rate": 1.8620689655172415e-05,
113
+ "loss": 0.945,
114
+ "step": 1500
115
+ },
116
+ {
117
+ "epoch": 0.32,
118
+ "grad_norm": 5.317204475402832,
119
+ "learning_rate": 1.8482758620689657e-05,
120
+ "loss": 0.9333,
121
+ "step": 1600
122
+ },
123
+ {
124
+ "epoch": 0.34,
125
+ "grad_norm": 0.8159579634666443,
126
+ "learning_rate": 1.8344827586206896e-05,
127
+ "loss": 0.9462,
128
+ "step": 1700
129
+ },
130
+ {
131
+ "epoch": 0.36,
132
+ "grad_norm": 0.8454849123954773,
133
+ "learning_rate": 1.820689655172414e-05,
134
+ "loss": 0.9283,
135
+ "step": 1800
136
+ },
137
+ {
138
+ "epoch": 0.38,
139
+ "grad_norm": 0.8753693103790283,
140
+ "learning_rate": 1.806896551724138e-05,
141
+ "loss": 0.9193,
142
+ "step": 1900
143
+ },
144
+ {
145
+ "epoch": 0.4,
146
+ "grad_norm": 0.808141827583313,
147
+ "learning_rate": 1.7931034482758623e-05,
148
+ "loss": 0.9568,
149
+ "step": 2000
150
+ },
151
+ {
152
+ "epoch": 0.42,
153
+ "grad_norm": 0.9040705561637878,
154
+ "learning_rate": 1.7793103448275862e-05,
155
+ "loss": 0.9189,
156
+ "step": 2100
157
+ },
158
+ {
159
+ "epoch": 0.44,
160
+ "grad_norm": 0.7747160792350769,
161
+ "learning_rate": 1.7655172413793105e-05,
162
+ "loss": 0.9285,
163
+ "step": 2200
164
+ },
165
+ {
166
+ "epoch": 0.46,
167
+ "grad_norm": 0.7610200643539429,
168
+ "learning_rate": 1.7517241379310347e-05,
169
+ "loss": 0.9339,
170
+ "step": 2300
171
+ },
172
+ {
173
+ "epoch": 0.48,
174
+ "grad_norm": 0.8340559601783752,
175
+ "learning_rate": 1.7379310344827586e-05,
176
+ "loss": 0.9362,
177
+ "step": 2400
178
+ },
179
+ {
180
+ "epoch": 0.5,
181
+ "grad_norm": 0.7427037954330444,
182
+ "learning_rate": 1.7241379310344828e-05,
183
+ "loss": 0.9302,
184
+ "step": 2500
185
+ },
186
+ {
187
+ "epoch": 0.5,
188
+ "eval_loss": 0.9145726561546326,
189
+ "eval_runtime": 40.1434,
190
+ "eval_samples_per_second": 24.911,
191
+ "eval_steps_per_second": 6.228,
192
+ "step": 2500
193
+ },
194
+ {
195
+ "epoch": 0.52,
196
+ "grad_norm": 0.7336578369140625,
197
+ "learning_rate": 1.710344827586207e-05,
198
+ "loss": 0.9184,
199
+ "step": 2600
200
+ },
201
+ {
202
+ "epoch": 0.54,
203
+ "grad_norm": 0.7962681651115417,
204
+ "learning_rate": 1.6965517241379313e-05,
205
+ "loss": 0.9207,
206
+ "step": 2700
207
+ },
208
+ {
209
+ "epoch": 0.56,
210
+ "grad_norm": 0.7666469812393188,
211
+ "learning_rate": 1.6827586206896552e-05,
212
+ "loss": 0.9169,
213
+ "step": 2800
214
+ },
215
+ {
216
+ "epoch": 0.58,
217
+ "grad_norm": 0.7549653053283691,
218
+ "learning_rate": 1.6689655172413794e-05,
219
+ "loss": 0.9231,
220
+ "step": 2900
221
+ },
222
+ {
223
+ "epoch": 0.6,
224
+ "grad_norm": 0.8372851610183716,
225
+ "learning_rate": 1.6551724137931037e-05,
226
+ "loss": 0.9189,
227
+ "step": 3000
228
+ },
229
+ {
230
+ "epoch": 0.62,
231
+ "grad_norm": 0.9210222363471985,
232
+ "learning_rate": 1.6413793103448276e-05,
233
+ "loss": 0.9227,
234
+ "step": 3100
235
+ },
236
+ {
237
+ "epoch": 0.64,
238
+ "grad_norm": 0.9357787370681763,
239
+ "learning_rate": 1.6275862068965518e-05,
240
+ "loss": 0.9192,
241
+ "step": 3200
242
+ },
243
+ {
244
+ "epoch": 0.66,
245
+ "grad_norm": 0.7520227432250977,
246
+ "learning_rate": 1.613793103448276e-05,
247
+ "loss": 0.8994,
248
+ "step": 3300
249
+ },
250
+ {
251
+ "epoch": 0.68,
252
+ "grad_norm": 0.9676304459571838,
253
+ "learning_rate": 1.6000000000000003e-05,
254
+ "loss": 0.9105,
255
+ "step": 3400
256
+ },
257
+ {
258
+ "epoch": 0.7,
259
+ "grad_norm": 0.8729006052017212,
260
+ "learning_rate": 1.586206896551724e-05,
261
+ "loss": 0.9184,
262
+ "step": 3500
263
+ },
264
+ {
265
+ "epoch": 0.72,
266
+ "grad_norm": 0.899910032749176,
267
+ "learning_rate": 1.5724137931034484e-05,
268
+ "loss": 0.9051,
269
+ "step": 3600
270
+ },
271
+ {
272
+ "epoch": 0.74,
273
+ "grad_norm": 0.874957799911499,
274
+ "learning_rate": 1.5586206896551726e-05,
275
+ "loss": 0.9252,
276
+ "step": 3700
277
+ },
278
+ {
279
+ "epoch": 0.76,
280
+ "grad_norm": 0.8180311322212219,
281
+ "learning_rate": 1.5448275862068965e-05,
282
+ "loss": 0.9204,
283
+ "step": 3800
284
+ },
285
+ {
286
+ "epoch": 0.78,
287
+ "grad_norm": 0.8456066846847534,
288
+ "learning_rate": 1.5310344827586208e-05,
289
+ "loss": 0.9159,
290
+ "step": 3900
291
+ },
292
+ {
293
+ "epoch": 0.8,
294
+ "grad_norm": 0.895601212978363,
295
+ "learning_rate": 1.5172413793103448e-05,
296
+ "loss": 0.9075,
297
+ "step": 4000
298
+ },
299
+ {
300
+ "epoch": 0.82,
301
+ "grad_norm": 0.7567282915115356,
302
+ "learning_rate": 1.503448275862069e-05,
303
+ "loss": 0.8972,
304
+ "step": 4100
305
+ },
306
+ {
307
+ "epoch": 0.84,
308
+ "grad_norm": 0.7225831151008606,
309
+ "learning_rate": 1.4896551724137933e-05,
310
+ "loss": 0.9193,
311
+ "step": 4200
312
+ },
313
+ {
314
+ "epoch": 0.86,
315
+ "grad_norm": 0.8456130623817444,
316
+ "learning_rate": 1.4758620689655174e-05,
317
+ "loss": 0.9043,
318
+ "step": 4300
319
+ },
320
+ {
321
+ "epoch": 0.88,
322
+ "grad_norm": 0.7458916306495667,
323
+ "learning_rate": 1.4620689655172416e-05,
324
+ "loss": 0.9069,
325
+ "step": 4400
326
+ },
327
+ {
328
+ "epoch": 0.9,
329
+ "grad_norm": 0.7301814556121826,
330
+ "learning_rate": 1.4482758620689657e-05,
331
+ "loss": 0.9143,
332
+ "step": 4500
333
+ },
334
+ {
335
+ "epoch": 0.92,
336
+ "grad_norm": 0.8358351588249207,
337
+ "learning_rate": 1.4344827586206897e-05,
338
+ "loss": 0.9024,
339
+ "step": 4600
340
+ },
341
+ {
342
+ "epoch": 0.94,
343
+ "grad_norm": 0.9452328085899353,
344
+ "learning_rate": 1.4206896551724138e-05,
345
+ "loss": 0.909,
346
+ "step": 4700
347
+ },
348
+ {
349
+ "epoch": 0.96,
350
+ "grad_norm": 0.7513816952705383,
351
+ "learning_rate": 1.406896551724138e-05,
352
+ "loss": 0.9049,
353
+ "step": 4800
354
+ },
355
+ {
356
+ "epoch": 0.98,
357
+ "grad_norm": 0.7348505258560181,
358
+ "learning_rate": 1.3931034482758621e-05,
359
+ "loss": 0.9024,
360
+ "step": 4900
361
+ },
362
+ {
363
+ "epoch": 1.0,
364
+ "grad_norm": 0.7450273633003235,
365
+ "learning_rate": 1.3793103448275863e-05,
366
+ "loss": 0.903,
367
+ "step": 5000
368
+ },
369
+ {
370
+ "epoch": 1.0,
371
+ "eval_loss": 0.9057763814926147,
372
+ "eval_runtime": 40.9147,
373
+ "eval_samples_per_second": 24.441,
374
+ "eval_steps_per_second": 6.11,
375
+ "step": 5000
376
+ },
377
+ {
378
+ "epoch": 1.02,
379
+ "grad_norm": 0.8260780572891235,
380
+ "learning_rate": 1.3655172413793106e-05,
381
+ "loss": 0.797,
382
+ "step": 5100
383
+ },
384
+ {
385
+ "epoch": 1.04,
386
+ "grad_norm": 0.9057289958000183,
387
+ "learning_rate": 1.3517241379310346e-05,
388
+ "loss": 0.7991,
389
+ "step": 5200
390
+ },
391
+ {
392
+ "epoch": 1.06,
393
+ "grad_norm": 0.7878429889678955,
394
+ "learning_rate": 1.3379310344827587e-05,
395
+ "loss": 0.7986,
396
+ "step": 5300
397
+ },
398
+ {
399
+ "epoch": 1.08,
400
+ "grad_norm": 0.8928379416465759,
401
+ "learning_rate": 1.324137931034483e-05,
402
+ "loss": 0.8137,
403
+ "step": 5400
404
+ },
405
+ {
406
+ "epoch": 1.1,
407
+ "grad_norm": 0.8249024152755737,
408
+ "learning_rate": 1.310344827586207e-05,
409
+ "loss": 0.7987,
410
+ "step": 5500
411
+ },
412
+ {
413
+ "epoch": 1.12,
414
+ "grad_norm": 0.7873898148536682,
415
+ "learning_rate": 1.296551724137931e-05,
416
+ "loss": 0.8021,
417
+ "step": 5600
418
+ },
419
+ {
420
+ "epoch": 1.14,
421
+ "grad_norm": 0.9511479735374451,
422
+ "learning_rate": 1.2827586206896551e-05,
423
+ "loss": 0.8004,
424
+ "step": 5700
425
+ },
426
+ {
427
+ "epoch": 1.16,
428
+ "grad_norm": 0.9020041227340698,
429
+ "learning_rate": 1.2689655172413795e-05,
430
+ "loss": 0.7941,
431
+ "step": 5800
432
+ },
433
+ {
434
+ "epoch": 1.18,
435
+ "grad_norm": 0.8485173583030701,
436
+ "learning_rate": 1.2551724137931036e-05,
437
+ "loss": 0.7892,
438
+ "step": 5900
439
+ },
440
+ {
441
+ "epoch": 1.2,
442
+ "grad_norm": 0.827191174030304,
443
+ "learning_rate": 1.2413793103448277e-05,
444
+ "loss": 0.8055,
445
+ "step": 6000
446
+ },
447
+ {
448
+ "epoch": 1.22,
449
+ "grad_norm": 0.7705573439598083,
450
+ "learning_rate": 1.2275862068965519e-05,
451
+ "loss": 0.7992,
452
+ "step": 6100
453
+ },
454
+ {
455
+ "epoch": 1.24,
456
+ "grad_norm": 0.806735634803772,
457
+ "learning_rate": 1.213793103448276e-05,
458
+ "loss": 0.8084,
459
+ "step": 6200
460
+ },
461
+ {
462
+ "epoch": 1.26,
463
+ "grad_norm": 0.7915472388267517,
464
+ "learning_rate": 1.2e-05,
465
+ "loss": 0.7968,
466
+ "step": 6300
467
+ },
468
+ {
469
+ "epoch": 1.28,
470
+ "grad_norm": 0.8447715044021606,
471
+ "learning_rate": 1.1862068965517241e-05,
472
+ "loss": 0.7857,
473
+ "step": 6400
474
+ },
475
+ {
476
+ "epoch": 1.3,
477
+ "grad_norm": 0.8174277544021606,
478
+ "learning_rate": 1.1724137931034483e-05,
479
+ "loss": 0.8081,
480
+ "step": 6500
481
+ },
482
+ {
483
+ "epoch": 1.32,
484
+ "grad_norm": 0.8464189767837524,
485
+ "learning_rate": 1.1586206896551726e-05,
486
+ "loss": 0.7923,
487
+ "step": 6600
488
+ },
489
+ {
490
+ "epoch": 1.34,
491
+ "grad_norm": 0.8201343417167664,
492
+ "learning_rate": 1.1448275862068966e-05,
493
+ "loss": 0.8015,
494
+ "step": 6700
495
+ },
496
+ {
497
+ "epoch": 1.36,
498
+ "grad_norm": 0.834135890007019,
499
+ "learning_rate": 1.1310344827586209e-05,
500
+ "loss": 0.7984,
501
+ "step": 6800
502
+ },
503
+ {
504
+ "epoch": 1.38,
505
+ "grad_norm": 0.8788368701934814,
506
+ "learning_rate": 1.117241379310345e-05,
507
+ "loss": 0.7932,
508
+ "step": 6900
509
+ },
510
+ {
511
+ "epoch": 1.4,
512
+ "grad_norm": 0.7641535401344299,
513
+ "learning_rate": 1.103448275862069e-05,
514
+ "loss": 0.7959,
515
+ "step": 7000
516
+ },
517
+ {
518
+ "epoch": 1.42,
519
+ "grad_norm": 0.8290014863014221,
520
+ "learning_rate": 1.0896551724137932e-05,
521
+ "loss": 0.7908,
522
+ "step": 7100
523
+ },
524
+ {
525
+ "epoch": 1.44,
526
+ "grad_norm": 0.9937741160392761,
527
+ "learning_rate": 1.0758620689655173e-05,
528
+ "loss": 0.818,
529
+ "step": 7200
530
+ },
531
+ {
532
+ "epoch": 1.46,
533
+ "grad_norm": 0.8346081972122192,
534
+ "learning_rate": 1.0620689655172414e-05,
535
+ "loss": 0.8101,
536
+ "step": 7300
537
+ },
538
+ {
539
+ "epoch": 1.48,
540
+ "grad_norm": 0.8698582053184509,
541
+ "learning_rate": 1.0482758620689658e-05,
542
+ "loss": 0.8128,
543
+ "step": 7400
544
+ },
545
+ {
546
+ "epoch": 1.5,
547
+ "grad_norm": 0.8040934801101685,
548
+ "learning_rate": 1.0344827586206898e-05,
549
+ "loss": 0.7991,
550
+ "step": 7500
551
+ },
552
+ {
553
+ "epoch": 1.5,
554
+ "eval_loss": 0.911612331867218,
555
+ "eval_runtime": 40.9979,
556
+ "eval_samples_per_second": 24.392,
557
+ "eval_steps_per_second": 6.098,
558
+ "step": 7500
559
+ },
560
+ {
561
+ "epoch": 1.52,
562
+ "grad_norm": 0.8177987933158875,
563
+ "learning_rate": 1.0206896551724139e-05,
564
+ "loss": 0.7867,
565
+ "step": 7600
566
+ },
567
+ {
568
+ "epoch": 1.54,
569
+ "grad_norm": 0.7633827924728394,
570
+ "learning_rate": 1.006896551724138e-05,
571
+ "loss": 0.7916,
572
+ "step": 7700
573
+ },
574
+ {
575
+ "epoch": 1.56,
576
+ "grad_norm": 0.8105002641677856,
577
+ "learning_rate": 9.931034482758622e-06,
578
+ "loss": 0.8111,
579
+ "step": 7800
580
+ },
581
+ {
582
+ "epoch": 1.58,
583
+ "grad_norm": 0.893203616142273,
584
+ "learning_rate": 9.793103448275863e-06,
585
+ "loss": 0.7984,
586
+ "step": 7900
587
+ },
588
+ {
589
+ "epoch": 1.6,
590
+ "grad_norm": 0.7978829145431519,
591
+ "learning_rate": 9.655172413793105e-06,
592
+ "loss": 0.8086,
593
+ "step": 8000
594
+ },
595
+ {
596
+ "epoch": 1.62,
597
+ "grad_norm": 0.8793106079101562,
598
+ "learning_rate": 9.517241379310346e-06,
599
+ "loss": 0.8047,
600
+ "step": 8100
601
+ },
602
+ {
603
+ "epoch": 1.64,
604
+ "grad_norm": 0.8467621803283691,
605
+ "learning_rate": 9.379310344827586e-06,
606
+ "loss": 0.8008,
607
+ "step": 8200
608
+ },
609
+ {
610
+ "epoch": 1.66,
611
+ "grad_norm": 0.8126978278160095,
612
+ "learning_rate": 9.241379310344829e-06,
613
+ "loss": 0.7989,
614
+ "step": 8300
615
+ },
616
+ {
617
+ "epoch": 1.68,
618
+ "grad_norm": 0.8217782378196716,
619
+ "learning_rate": 9.10344827586207e-06,
620
+ "loss": 0.7927,
621
+ "step": 8400
622
+ },
623
+ {
624
+ "epoch": 1.7,
625
+ "grad_norm": 0.7457430958747864,
626
+ "learning_rate": 8.965517241379312e-06,
627
+ "loss": 0.794,
628
+ "step": 8500
629
+ },
630
+ {
631
+ "epoch": 1.72,
632
+ "grad_norm": 0.8164616823196411,
633
+ "learning_rate": 8.827586206896552e-06,
634
+ "loss": 0.8057,
635
+ "step": 8600
636
+ },
637
+ {
638
+ "epoch": 1.74,
639
+ "grad_norm": 0.8319987058639526,
640
+ "learning_rate": 8.689655172413793e-06,
641
+ "loss": 0.789,
642
+ "step": 8700
643
+ },
644
+ {
645
+ "epoch": 1.76,
646
+ "grad_norm": 0.9327766299247742,
647
+ "learning_rate": 8.553103448275863e-06,
648
+ "loss": 0.786,
649
+ "step": 8800
650
+ },
651
+ {
652
+ "epoch": 1.78,
653
+ "grad_norm": 0.8273203372955322,
654
+ "learning_rate": 8.415172413793105e-06,
655
+ "loss": 0.7883,
656
+ "step": 8900
657
+ },
658
+ {
659
+ "epoch": 1.8,
660
+ "grad_norm": 0.9450199604034424,
661
+ "learning_rate": 8.277241379310346e-06,
662
+ "loss": 0.8003,
663
+ "step": 9000
664
+ },
665
+ {
666
+ "epoch": 1.82,
667
+ "grad_norm": 0.7524954080581665,
668
+ "learning_rate": 8.139310344827586e-06,
669
+ "loss": 0.7968,
670
+ "step": 9100
671
+ },
672
+ {
673
+ "epoch": 1.84,
674
+ "grad_norm": 0.8359555602073669,
675
+ "learning_rate": 8.001379310344829e-06,
676
+ "loss": 0.801,
677
+ "step": 9200
678
+ },
679
+ {
680
+ "epoch": 1.86,
681
+ "grad_norm": 0.8040117025375366,
682
+ "learning_rate": 7.86344827586207e-06,
683
+ "loss": 0.7914,
684
+ "step": 9300
685
+ },
686
+ {
687
+ "epoch": 1.88,
688
+ "grad_norm": 0.8810254335403442,
689
+ "learning_rate": 7.725517241379312e-06,
690
+ "loss": 0.8007,
691
+ "step": 9400
692
+ },
693
+ {
694
+ "epoch": 1.9,
695
+ "grad_norm": 0.83013516664505,
696
+ "learning_rate": 7.587586206896552e-06,
697
+ "loss": 0.7911,
698
+ "step": 9500
699
+ },
700
+ {
701
+ "epoch": 1.92,
702
+ "grad_norm": 0.8463912010192871,
703
+ "learning_rate": 7.449655172413793e-06,
704
+ "loss": 0.793,
705
+ "step": 9600
706
+ },
707
+ {
708
+ "epoch": 1.94,
709
+ "grad_norm": 0.8582866787910461,
710
+ "learning_rate": 7.311724137931035e-06,
711
+ "loss": 0.7897,
712
+ "step": 9700
713
+ },
714
+ {
715
+ "epoch": 1.96,
716
+ "grad_norm": 0.8311706185340881,
717
+ "learning_rate": 7.173793103448277e-06,
718
+ "loss": 0.7946,
719
+ "step": 9800
720
+ },
721
+ {
722
+ "epoch": 1.98,
723
+ "grad_norm": 0.8774682879447937,
724
+ "learning_rate": 7.0358620689655175e-06,
725
+ "loss": 0.788,
726
+ "step": 9900
727
+ },
728
+ {
729
+ "epoch": 2.0,
730
+ "grad_norm": 0.8584392666816711,
731
+ "learning_rate": 6.897931034482759e-06,
732
+ "loss": 0.7889,
733
+ "step": 10000
734
+ },
735
+ {
736
+ "epoch": 2.0,
737
+ "eval_loss": 0.8923286199569702,
738
+ "eval_runtime": 40.2115,
739
+ "eval_samples_per_second": 24.869,
740
+ "eval_steps_per_second": 6.217,
741
+ "step": 10000
742
+ },
743
+ {
744
+ "epoch": 2.02,
745
+ "grad_norm": 0.8329530954360962,
746
+ "learning_rate": 6.760000000000001e-06,
747
+ "loss": 0.7004,
748
+ "step": 10100
749
+ },
750
+ {
751
+ "epoch": 2.04,
752
+ "grad_norm": 0.8917152881622314,
753
+ "learning_rate": 6.622068965517242e-06,
754
+ "loss": 0.695,
755
+ "step": 10200
756
+ },
757
+ {
758
+ "epoch": 2.06,
759
+ "grad_norm": 1.0538383722305298,
760
+ "learning_rate": 6.4841379310344835e-06,
761
+ "loss": 0.6897,
762
+ "step": 10300
763
+ },
764
+ {
765
+ "epoch": 2.08,
766
+ "grad_norm": 0.8790658712387085,
767
+ "learning_rate": 6.346206896551724e-06,
768
+ "loss": 0.6994,
769
+ "step": 10400
770
+ },
771
+ {
772
+ "epoch": 2.1,
773
+ "grad_norm": 1.0203146934509277,
774
+ "learning_rate": 6.209655172413793e-06,
775
+ "loss": 0.698,
776
+ "step": 10500
777
+ },
778
+ {
779
+ "epoch": 2.12,
780
+ "grad_norm": 0.8630858063697815,
781
+ "learning_rate": 6.071724137931035e-06,
782
+ "loss": 0.6928,
783
+ "step": 10600
784
+ },
785
+ {
786
+ "epoch": 2.14,
787
+ "grad_norm": 0.8968507051467896,
788
+ "learning_rate": 5.933793103448277e-06,
789
+ "loss": 0.6933,
790
+ "step": 10700
791
+ },
792
+ {
793
+ "epoch": 2.16,
794
+ "grad_norm": 0.8782464861869812,
795
+ "learning_rate": 5.7958620689655175e-06,
796
+ "loss": 0.6946,
797
+ "step": 10800
798
+ },
799
+ {
800
+ "epoch": 2.18,
801
+ "grad_norm": 0.9616362452507019,
802
+ "learning_rate": 5.657931034482759e-06,
803
+ "loss": 0.6903,
804
+ "step": 10900
805
+ },
806
+ {
807
+ "epoch": 2.2,
808
+ "grad_norm": 0.9654956459999084,
809
+ "learning_rate": 5.5200000000000005e-06,
810
+ "loss": 0.6919,
811
+ "step": 11000
812
+ },
813
+ {
814
+ "epoch": 2.22,
815
+ "grad_norm": 0.9196248054504395,
816
+ "learning_rate": 5.382068965517242e-06,
817
+ "loss": 0.6853,
818
+ "step": 11100
819
+ },
820
+ {
821
+ "epoch": 2.24,
822
+ "grad_norm": 0.9637214541435242,
823
+ "learning_rate": 5.2441379310344835e-06,
824
+ "loss": 0.6927,
825
+ "step": 11200
826
+ },
827
+ {
828
+ "epoch": 2.26,
829
+ "grad_norm": 0.972012460231781,
830
+ "learning_rate": 5.106206896551724e-06,
831
+ "loss": 0.697,
832
+ "step": 11300
833
+ },
834
+ {
835
+ "epoch": 2.28,
836
+ "grad_norm": 1.038694143295288,
837
+ "learning_rate": 4.968275862068966e-06,
838
+ "loss": 0.6964,
839
+ "step": 11400
840
+ },
841
+ {
842
+ "epoch": 2.3,
843
+ "grad_norm": 0.946384072303772,
844
+ "learning_rate": 4.830344827586207e-06,
845
+ "loss": 0.7077,
846
+ "step": 11500
847
+ },
848
+ {
849
+ "epoch": 2.32,
850
+ "grad_norm": 0.897548496723175,
851
+ "learning_rate": 4.692413793103449e-06,
852
+ "loss": 0.6941,
853
+ "step": 11600
854
+ },
855
+ {
856
+ "epoch": 2.34,
857
+ "grad_norm": 0.9227349162101746,
858
+ "learning_rate": 4.55448275862069e-06,
859
+ "loss": 0.705,
860
+ "step": 11700
861
+ },
862
+ {
863
+ "epoch": 2.36,
864
+ "grad_norm": 0.9196397066116333,
865
+ "learning_rate": 4.416551724137932e-06,
866
+ "loss": 0.6989,
867
+ "step": 11800
868
+ },
869
+ {
870
+ "epoch": 2.38,
871
+ "grad_norm": 0.9564076662063599,
872
+ "learning_rate": 4.278620689655173e-06,
873
+ "loss": 0.688,
874
+ "step": 11900
875
+ },
876
+ {
877
+ "epoch": 2.4,
878
+ "grad_norm": 0.9881793260574341,
879
+ "learning_rate": 4.140689655172414e-06,
880
+ "loss": 0.6917,
881
+ "step": 12000
882
+ },
883
+ {
884
+ "epoch": 2.42,
885
+ "grad_norm": 0.92747962474823,
886
+ "learning_rate": 4.002758620689655e-06,
887
+ "loss": 0.6871,
888
+ "step": 12100
889
+ },
890
+ {
891
+ "epoch": 2.44,
892
+ "grad_norm": 0.9182614088058472,
893
+ "learning_rate": 3.864827586206897e-06,
894
+ "loss": 0.6868,
895
+ "step": 12200
896
+ },
897
+ {
898
+ "epoch": 2.46,
899
+ "grad_norm": 1.0040738582611084,
900
+ "learning_rate": 3.7268965517241383e-06,
901
+ "loss": 0.7016,
902
+ "step": 12300
903
+ },
904
+ {
905
+ "epoch": 2.48,
906
+ "grad_norm": 0.8651072382926941,
907
+ "learning_rate": 3.5889655172413794e-06,
908
+ "loss": 0.6906,
909
+ "step": 12400
910
+ },
911
+ {
912
+ "epoch": 2.5,
913
+ "grad_norm": 0.945471465587616,
914
+ "learning_rate": 3.4510344827586214e-06,
915
+ "loss": 0.6963,
916
+ "step": 12500
917
+ },
918
+ {
919
+ "epoch": 2.5,
920
+ "eval_loss": 0.8942536115646362,
921
+ "eval_runtime": 40.9486,
922
+ "eval_samples_per_second": 24.421,
923
+ "eval_steps_per_second": 6.105,
924
+ "step": 12500
925
+ },
926
+ {
927
+ "epoch": 2.52,
928
+ "grad_norm": 0.864122748374939,
929
+ "learning_rate": 3.3131034482758624e-06,
930
+ "loss": 0.7025,
931
+ "step": 12600
932
+ },
933
+ {
934
+ "epoch": 2.54,
935
+ "grad_norm": 1.153102993965149,
936
+ "learning_rate": 3.175172413793104e-06,
937
+ "loss": 0.699,
938
+ "step": 12700
939
+ },
940
+ {
941
+ "epoch": 2.56,
942
+ "grad_norm": 0.9909245371818542,
943
+ "learning_rate": 3.037241379310345e-06,
944
+ "loss": 0.6848,
945
+ "step": 12800
946
+ },
947
+ {
948
+ "epoch": 2.58,
949
+ "grad_norm": 0.9752694368362427,
950
+ "learning_rate": 2.8993103448275865e-06,
951
+ "loss": 0.6756,
952
+ "step": 12900
953
+ },
954
+ {
955
+ "epoch": 2.6,
956
+ "grad_norm": 1.0156644582748413,
957
+ "learning_rate": 2.7613793103448276e-06,
958
+ "loss": 0.7047,
959
+ "step": 13000
960
+ },
961
+ {
962
+ "epoch": 2.62,
963
+ "grad_norm": 0.9422524571418762,
964
+ "learning_rate": 2.6234482758620695e-06,
965
+ "loss": 0.6999,
966
+ "step": 13100
967
+ },
968
+ {
969
+ "epoch": 2.64,
970
+ "grad_norm": 0.9309016466140747,
971
+ "learning_rate": 2.4855172413793106e-06,
972
+ "loss": 0.6982,
973
+ "step": 13200
974
+ },
975
+ {
976
+ "epoch": 2.66,
977
+ "grad_norm": 0.9932184815406799,
978
+ "learning_rate": 2.3475862068965517e-06,
979
+ "loss": 0.6878,
980
+ "step": 13300
981
+ },
982
+ {
983
+ "epoch": 2.68,
984
+ "grad_norm": 1.0034270286560059,
985
+ "learning_rate": 2.209655172413793e-06,
986
+ "loss": 0.6943,
987
+ "step": 13400
988
+ },
989
+ {
990
+ "epoch": 2.7,
991
+ "grad_norm": 0.9979943037033081,
992
+ "learning_rate": 2.0717241379310347e-06,
993
+ "loss": 0.6874,
994
+ "step": 13500
995
+ },
996
+ {
997
+ "epoch": 2.72,
998
+ "grad_norm": 0.9405415058135986,
999
+ "learning_rate": 1.933793103448276e-06,
1000
+ "loss": 0.675,
1001
+ "step": 13600
1002
+ },
1003
+ {
1004
+ "epoch": 2.74,
1005
+ "grad_norm": 0.973407506942749,
1006
+ "learning_rate": 1.7958620689655173e-06,
1007
+ "loss": 0.6751,
1008
+ "step": 13700
1009
+ },
1010
+ {
1011
+ "epoch": 2.76,
1012
+ "grad_norm": 0.9658547043800354,
1013
+ "learning_rate": 1.6579310344827588e-06,
1014
+ "loss": 0.7019,
1015
+ "step": 13800
1016
+ },
1017
+ {
1018
+ "epoch": 2.78,
1019
+ "grad_norm": 0.9559837579727173,
1020
+ "learning_rate": 1.52e-06,
1021
+ "loss": 0.6795,
1022
+ "step": 13900
1023
+ },
1024
+ {
1025
+ "epoch": 2.8,
1026
+ "grad_norm": 0.9583409428596497,
1027
+ "learning_rate": 1.3820689655172416e-06,
1028
+ "loss": 0.6943,
1029
+ "step": 14000
1030
+ },
1031
+ {
1032
+ "epoch": 2.82,
1033
+ "grad_norm": 0.8599256873130798,
1034
+ "learning_rate": 1.2441379310344829e-06,
1035
+ "loss": 0.6915,
1036
+ "step": 14100
1037
+ },
1038
+ {
1039
+ "epoch": 2.84,
1040
+ "grad_norm": 0.9818318486213684,
1041
+ "learning_rate": 1.1062068965517241e-06,
1042
+ "loss": 0.6845,
1043
+ "step": 14200
1044
+ },
1045
+ {
1046
+ "epoch": 2.86,
1047
+ "grad_norm": 1.0233429670333862,
1048
+ "learning_rate": 9.682758620689656e-07,
1049
+ "loss": 0.6936,
1050
+ "step": 14300
1051
+ },
1052
+ {
1053
+ "epoch": 2.88,
1054
+ "grad_norm": 0.9797420501708984,
1055
+ "learning_rate": 8.303448275862069e-07,
1056
+ "loss": 0.6903,
1057
+ "step": 14400
1058
+ },
1059
+ {
1060
+ "epoch": 2.9,
1061
+ "grad_norm": 0.9917966723442078,
1062
+ "learning_rate": 6.924137931034483e-07,
1063
+ "loss": 0.7028,
1064
+ "step": 14500
1065
+ },
1066
+ {
1067
+ "epoch": 2.92,
1068
+ "grad_norm": 0.9207854270935059,
1069
+ "learning_rate": 5.544827586206897e-07,
1070
+ "loss": 0.6739,
1071
+ "step": 14600
1072
+ },
1073
+ {
1074
+ "epoch": 2.94,
1075
+ "grad_norm": 0.9161053895950317,
1076
+ "learning_rate": 4.1655172413793107e-07,
1077
+ "loss": 0.7003,
1078
+ "step": 14700
1079
+ },
1080
+ {
1081
+ "epoch": 2.96,
1082
+ "grad_norm": 1.0565011501312256,
1083
+ "learning_rate": 2.7862068965517247e-07,
1084
+ "loss": 0.6872,
1085
+ "step": 14800
1086
+ },
1087
+ {
1088
+ "epoch": 2.98,
1089
+ "grad_norm": 1.0897576808929443,
1090
+ "learning_rate": 1.406896551724138e-07,
1091
+ "loss": 0.6919,
1092
+ "step": 14900
1093
+ },
1094
+ {
1095
+ "epoch": 3.0,
1096
+ "grad_norm": 0.9112550020217896,
1097
+ "learning_rate": 2.758620689655173e-09,
1098
+ "loss": 0.6804,
1099
+ "step": 15000
1100
+ },
1101
+ {
1102
+ "epoch": 3.0,
1103
+ "eval_loss": 0.9123731255531311,
1104
+ "eval_runtime": 40.7263,
1105
+ "eval_samples_per_second": 24.554,
1106
+ "eval_steps_per_second": 6.139,
1107
+ "step": 15000
1108
+ }
1109
+ ],
1110
+ "logging_steps": 100,
1111
+ "max_steps": 15000,
1112
+ "num_input_tokens_seen": 0,
1113
+ "num_train_epochs": 3,
1114
+ "save_steps": 2500,
1115
+ "total_flos": 1.88804379967488e+18,
1116
+ "train_batch_size": 4,
1117
+ "trial_name": null,
1118
+ "trial_params": null
1119
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2844010f60d782f48a1430f48583b118c9c59b86c24f41fb14bb3f5174b5022
3
+ size 4920