SystemAdmin123 commited on
Commit
68a6197
·
verified ·
1 Parent(s): 2331579

Training in progress, step 400, checkpoint

Browse files
last-checkpoint/added_tokens.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "<|endoftext|>": 151643,
3
+ "<|im_end|>": 151645,
4
+ "<|im_start|>": 151644
5
+ }
last-checkpoint/config.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "Qwen/Qwen1.5-0.5B-Chat",
3
+ "architectures": [
4
+ "Qwen2ForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "eos_token_id": 151645,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 1024,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 2816,
12
+ "max_position_embeddings": 32768,
13
+ "max_window_layers": 21,
14
+ "model_type": "qwen2",
15
+ "num_attention_heads": 16,
16
+ "num_hidden_layers": 24,
17
+ "num_key_value_heads": 16,
18
+ "rms_norm_eps": 1e-06,
19
+ "rope_scaling": null,
20
+ "rope_theta": 1000000.0,
21
+ "sliding_window": null,
22
+ "tie_word_embeddings": true,
23
+ "torch_dtype": "bfloat16",
24
+ "transformers_version": "4.48.1",
25
+ "use_cache": false,
26
+ "use_sliding_window": false,
27
+ "vocab_size": 151646
28
+ }
last-checkpoint/generation_config.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "repetition_penalty": 1.1,
10
+ "top_p": 0.8,
11
+ "transformers_version": "4.48.1"
12
+ }
last-checkpoint/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
last-checkpoint/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce784b84b672fff299f815541e54ec587d2cdcded415ef1365a2384d855b9be7
3
+ size 927414184
last-checkpoint/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da41eca7c23aafd158d2a8f1c9db1d385409206f293c330f6bf158e90e3328b1
3
+ size 942882362
last-checkpoint/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9feae33b2fec0a6229240e7adaee6ecc8f5cfdf1a8bd0e827b1d8a241424e3c0
3
+ size 14244
last-checkpoint/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a673aaf85c0fe6b6c29cb8f3e7dbd829eef637110e4ad9a775f3fcf001c92591
3
+ size 1064
last-checkpoint/special_tokens_map.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "eos_token": {
7
+ "content": "<|im_end|>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "pad_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ }
20
+ }
last-checkpoint/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcfe42da0a4497e8b2b172c1f9f4ec423a46dc12907f4349c55025f670422ba9
3
+ size 11418266
last-checkpoint/tokenizer_config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "151643": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": false,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ },
12
+ "151644": {
13
+ "content": "<|im_start|>",
14
+ "lstrip": false,
15
+ "normalized": false,
16
+ "rstrip": false,
17
+ "single_word": false,
18
+ "special": true
19
+ },
20
+ "151645": {
21
+ "content": "<|im_end|>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false,
26
+ "special": true
27
+ }
28
+ },
29
+ "additional_special_tokens": [
30
+ "<|im_start|>",
31
+ "<|im_end|>"
32
+ ],
33
+ "bos_token": null,
34
+ "chat_template": "{% for message in messages %}{% if loop.first and messages[0]['role'] != 'system' %}{{ '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}{% endif %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}",
35
+ "clean_up_tokenization_spaces": false,
36
+ "eos_token": "<|im_end|>",
37
+ "errors": "replace",
38
+ "extra_special_tokens": {},
39
+ "model_max_length": 32768,
40
+ "pad_token": "<|endoftext|>",
41
+ "split_special_tokens": false,
42
+ "tokenizer_class": "Qwen2Tokenizer",
43
+ "unk_token": null,
44
+ "use_fast": true
45
+ }
last-checkpoint/trainer_state.json ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 0.11837821840781296,
5
+ "eval_steps": 200,
6
+ "global_step": 400,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.0002959455460195324,
13
+ "eval_loss": 3.6609864234924316,
14
+ "eval_runtime": 59.7747,
15
+ "eval_samples_per_second": 25.128,
16
+ "eval_steps_per_second": 6.29,
17
+ "step": 1
18
+ },
19
+ {
20
+ "epoch": 0.002959455460195324,
21
+ "grad_norm": 131.0,
22
+ "learning_rate": 1.6000000000000003e-05,
23
+ "loss": 3.0588,
24
+ "step": 10
25
+ },
26
+ {
27
+ "epoch": 0.005918910920390648,
28
+ "grad_norm": 15.875,
29
+ "learning_rate": 3.2000000000000005e-05,
30
+ "loss": 2.8042,
31
+ "step": 20
32
+ },
33
+ {
34
+ "epoch": 0.008878366380585973,
35
+ "grad_norm": 20.125,
36
+ "learning_rate": 4.8e-05,
37
+ "loss": 2.3522,
38
+ "step": 30
39
+ },
40
+ {
41
+ "epoch": 0.011837821840781295,
42
+ "grad_norm": 27.5,
43
+ "learning_rate": 6.400000000000001e-05,
44
+ "loss": 2.5387,
45
+ "step": 40
46
+ },
47
+ {
48
+ "epoch": 0.01479727730097662,
49
+ "grad_norm": 73.0,
50
+ "learning_rate": 8e-05,
51
+ "loss": 2.3299,
52
+ "step": 50
53
+ },
54
+ {
55
+ "epoch": 0.017756732761171946,
56
+ "grad_norm": 12.5,
57
+ "learning_rate": 9.6e-05,
58
+ "loss": 2.654,
59
+ "step": 60
60
+ },
61
+ {
62
+ "epoch": 0.020716188221367268,
63
+ "grad_norm": 12.375,
64
+ "learning_rate": 0.00011200000000000001,
65
+ "loss": 2.5244,
66
+ "step": 70
67
+ },
68
+ {
69
+ "epoch": 0.02367564368156259,
70
+ "grad_norm": 21.875,
71
+ "learning_rate": 0.00012800000000000002,
72
+ "loss": 2.605,
73
+ "step": 80
74
+ },
75
+ {
76
+ "epoch": 0.026635099141757917,
77
+ "grad_norm": 37.25,
78
+ "learning_rate": 0.000144,
79
+ "loss": 2.6757,
80
+ "step": 90
81
+ },
82
+ {
83
+ "epoch": 0.02959455460195324,
84
+ "grad_norm": 67.5,
85
+ "learning_rate": 0.00016,
86
+ "loss": 3.3479,
87
+ "step": 100
88
+ },
89
+ {
90
+ "epoch": 0.032554010062148565,
91
+ "grad_norm": 22.5,
92
+ "learning_rate": 0.00017600000000000002,
93
+ "loss": 3.5544,
94
+ "step": 110
95
+ },
96
+ {
97
+ "epoch": 0.03551346552234389,
98
+ "grad_norm": 14.9375,
99
+ "learning_rate": 0.000192,
100
+ "loss": 3.1455,
101
+ "step": 120
102
+ },
103
+ {
104
+ "epoch": 0.03847292098253921,
105
+ "grad_norm": 24.25,
106
+ "learning_rate": 0.0001999978128380225,
107
+ "loss": 3.2288,
108
+ "step": 130
109
+ },
110
+ {
111
+ "epoch": 0.041432376442734536,
112
+ "grad_norm": 26.5,
113
+ "learning_rate": 0.0001999803161162393,
114
+ "loss": 4.085,
115
+ "step": 140
116
+ },
117
+ {
118
+ "epoch": 0.04439183190292986,
119
+ "grad_norm": 77.0,
120
+ "learning_rate": 0.00019994532573409262,
121
+ "loss": 3.6674,
122
+ "step": 150
123
+ },
124
+ {
125
+ "epoch": 0.04735128736312518,
126
+ "grad_norm": 15.0625,
127
+ "learning_rate": 0.00019989284781388617,
128
+ "loss": 3.9097,
129
+ "step": 160
130
+ },
131
+ {
132
+ "epoch": 0.05031074282332051,
133
+ "grad_norm": 16.0,
134
+ "learning_rate": 0.00019982289153773646,
135
+ "loss": 3.5733,
136
+ "step": 170
137
+ },
138
+ {
139
+ "epoch": 0.053270198283515834,
140
+ "grad_norm": 17.875,
141
+ "learning_rate": 0.00019973546914596623,
142
+ "loss": 4.0335,
143
+ "step": 180
144
+ },
145
+ {
146
+ "epoch": 0.05622965374371116,
147
+ "grad_norm": 29.375,
148
+ "learning_rate": 0.00019963059593496268,
149
+ "loss": 3.7777,
150
+ "step": 190
151
+ },
152
+ {
153
+ "epoch": 0.05918910920390648,
154
+ "grad_norm": 60.0,
155
+ "learning_rate": 0.00019950829025450114,
156
+ "loss": 3.8668,
157
+ "step": 200
158
+ },
159
+ {
160
+ "epoch": 0.05918910920390648,
161
+ "eval_loss": 4.461722373962402,
162
+ "eval_runtime": 60.1211,
163
+ "eval_samples_per_second": 24.983,
164
+ "eval_steps_per_second": 6.254,
165
+ "step": 200
166
+ },
167
+ {
168
+ "epoch": 0.062148564664101805,
169
+ "grad_norm": 9.4375,
170
+ "learning_rate": 0.0001993685735045343,
171
+ "loss": 4.3043,
172
+ "step": 210
173
+ },
174
+ {
175
+ "epoch": 0.06510802012429713,
176
+ "grad_norm": 60.75,
177
+ "learning_rate": 0.0001992114701314478,
178
+ "loss": 3.8791,
179
+ "step": 220
180
+ },
181
+ {
182
+ "epoch": 0.06806747558449246,
183
+ "grad_norm": 16.875,
184
+ "learning_rate": 0.000199037007623783,
185
+ "loss": 4.1739,
186
+ "step": 230
187
+ },
188
+ {
189
+ "epoch": 0.07102693104468778,
190
+ "grad_norm": 19.625,
191
+ "learning_rate": 0.00019884521650742715,
192
+ "loss": 3.9914,
193
+ "step": 240
194
+ },
195
+ {
196
+ "epoch": 0.0739863865048831,
197
+ "grad_norm": 418.0,
198
+ "learning_rate": 0.00019863613034027224,
199
+ "loss": 4.8681,
200
+ "step": 250
201
+ },
202
+ {
203
+ "epoch": 0.07694584196507842,
204
+ "grad_norm": 14.0625,
205
+ "learning_rate": 0.0001984097857063434,
206
+ "loss": 4.1649,
207
+ "step": 260
208
+ },
209
+ {
210
+ "epoch": 0.07990529742527375,
211
+ "grad_norm": 12.8125,
212
+ "learning_rate": 0.0001981662222093976,
213
+ "loss": 4.2323,
214
+ "step": 270
215
+ },
216
+ {
217
+ "epoch": 0.08286475288546907,
218
+ "grad_norm": 19.125,
219
+ "learning_rate": 0.00019790548246599447,
220
+ "loss": 4.2854,
221
+ "step": 280
222
+ },
223
+ {
224
+ "epoch": 0.0858242083456644,
225
+ "grad_norm": 20.0,
226
+ "learning_rate": 0.00019762761209803927,
227
+ "loss": 3.9894,
228
+ "step": 290
229
+ },
230
+ {
231
+ "epoch": 0.08878366380585972,
232
+ "grad_norm": 45.5,
233
+ "learning_rate": 0.0001973326597248006,
234
+ "loss": 4.6403,
235
+ "step": 300
236
+ },
237
+ {
238
+ "epoch": 0.09174311926605505,
239
+ "grad_norm": 10.125,
240
+ "learning_rate": 0.00019702067695440332,
241
+ "loss": 4.5261,
242
+ "step": 310
243
+ },
244
+ {
245
+ "epoch": 0.09470257472625036,
246
+ "grad_norm": 17.875,
247
+ "learning_rate": 0.00019669171837479873,
248
+ "loss": 3.9252,
249
+ "step": 320
250
+ },
251
+ {
252
+ "epoch": 0.09766203018644569,
253
+ "grad_norm": 14.125,
254
+ "learning_rate": 0.00019634584154421317,
255
+ "loss": 4.3635,
256
+ "step": 330
257
+ },
258
+ {
259
+ "epoch": 0.10062148564664102,
260
+ "grad_norm": 18.125,
261
+ "learning_rate": 0.00019598310698107702,
262
+ "loss": 4.3755,
263
+ "step": 340
264
+ },
265
+ {
266
+ "epoch": 0.10358094110683634,
267
+ "grad_norm": 176.0,
268
+ "learning_rate": 0.00019560357815343577,
269
+ "loss": 4.0719,
270
+ "step": 350
271
+ },
272
+ {
273
+ "epoch": 0.10654039656703167,
274
+ "grad_norm": 9.9375,
275
+ "learning_rate": 0.00019520732146784491,
276
+ "loss": 4.5,
277
+ "step": 360
278
+ },
279
+ {
280
+ "epoch": 0.109499852027227,
281
+ "grad_norm": 23.375,
282
+ "learning_rate": 0.0001947944062577507,
283
+ "loss": 3.9226,
284
+ "step": 370
285
+ },
286
+ {
287
+ "epoch": 0.11245930748742232,
288
+ "grad_norm": 18.125,
289
+ "learning_rate": 0.00019436490477135878,
290
+ "loss": 4.3131,
291
+ "step": 380
292
+ },
293
+ {
294
+ "epoch": 0.11541876294761765,
295
+ "grad_norm": 17.5,
296
+ "learning_rate": 0.00019391889215899299,
297
+ "loss": 4.3054,
298
+ "step": 390
299
+ },
300
+ {
301
+ "epoch": 0.11837821840781296,
302
+ "grad_norm": 49.25,
303
+ "learning_rate": 0.0001934564464599461,
304
+ "loss": 4.178,
305
+ "step": 400
306
+ },
307
+ {
308
+ "epoch": 0.11837821840781296,
309
+ "eval_loss": 5.044467449188232,
310
+ "eval_runtime": 59.0352,
311
+ "eval_samples_per_second": 25.442,
312
+ "eval_steps_per_second": 6.369,
313
+ "step": 400
314
+ }
315
+ ],
316
+ "logging_steps": 10,
317
+ "max_steps": 2500,
318
+ "num_input_tokens_seen": 0,
319
+ "num_train_epochs": 1,
320
+ "save_steps": 400,
321
+ "stateful_callbacks": {
322
+ "TrainerControl": {
323
+ "args": {
324
+ "should_epoch_stop": false,
325
+ "should_evaluate": false,
326
+ "should_log": false,
327
+ "should_save": true,
328
+ "should_training_stop": false
329
+ },
330
+ "attributes": {}
331
+ }
332
+ },
333
+ "total_flos": 6078652634628096.0,
334
+ "train_batch_size": 4,
335
+ "trial_name": null,
336
+ "trial_params": null
337
+ }
last-checkpoint/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c63701d9c217b1919e42df47130110c22127f119d66a02c851a680ea4abc179
3
+ size 6904
last-checkpoint/vocab.json ADDED
The diff for this file is too large to render. See raw diff