anasalter commited on
Commit
7f61ad5
·
verified ·
1 Parent(s): 6639d73

Upload 34 files

Browse files
checkpoint-2500/config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 1,
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
+ "initializer_range": 0.02,
15
+ "label2id": {
16
+ "LABEL_0": 0
17
+ },
18
+ "layer_norm_epsilon": 1e-05,
19
+ "model_type": "gpt2",
20
+ "n_ctx": 1024,
21
+ "n_embd": 768,
22
+ "n_head": 12,
23
+ "n_inner": null,
24
+ "n_layer": 6,
25
+ "n_positions": 1024,
26
+ "reorder_and_upcast_attn": false,
27
+ "resid_pdrop": 0.1,
28
+ "scale_attn_by_inverse_layer_idx": false,
29
+ "scale_attn_weights": true,
30
+ "summary_activation": null,
31
+ "summary_first_dropout": 0.1,
32
+ "summary_proj_to_labels": true,
33
+ "summary_type": "cls_index",
34
+ "summary_use_proj": true,
35
+ "task_specific_params": {
36
+ "text-generation": {
37
+ "do_sample": true,
38
+ "max_length": 50
39
+ }
40
+ },
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.52.4",
43
+ "use_cache": true,
44
+ "vocab_size": 50257
45
+ }
checkpoint-2500/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.52.4"
6
+ }
checkpoint-2500/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5859b7acc1ca636b27f3bbd94f890b0a62291f6a69eebfb81bf2b25e20a3d2c2
3
+ size 327657928
checkpoint-2500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fca1331b8724132c51ff119ed4aab428f9ffb5f6f9460df5868a14be783ca359
3
+ size 655364474
checkpoint-2500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e427b8372a46a3ed723d3daf99ac4a5bdabda18516def6ed27db100ff7c7775
3
+ size 14244
checkpoint-2500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d351e4131bf71ee391b33e8fb7c6946b2b7392c762c08220c49d816fbff2b361
3
+ size 1064
checkpoint-2500/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-2500/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2500/tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": false,
15
+ "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
+ "model_max_length": 1024,
18
+ "pad_token": "<|endoftext|>",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
checkpoint-2500/trainer_state.json ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 2.9515938606847696,
6
+ "eval_steps": 500,
7
+ "global_step": 2500,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0590318772136954,
14
+ "grad_norm": 6.553243637084961,
15
+ "learning_rate": 2.45e-05,
16
+ "loss": 4.6761,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 0.1180637544273908,
21
+ "grad_norm": 8.582610130310059,
22
+ "learning_rate": 4.9500000000000004e-05,
23
+ "loss": 4.2203,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 0.1770956316410862,
28
+ "grad_norm": 9.231163024902344,
29
+ "learning_rate": 4.8996312986480954e-05,
30
+ "loss": 3.9576,
31
+ "step": 150
32
+ },
33
+ {
34
+ "epoch": 0.2361275088547816,
35
+ "grad_norm": 7.166695594787598,
36
+ "learning_rate": 4.7972142564522735e-05,
37
+ "loss": 3.8839,
38
+ "step": 200
39
+ },
40
+ {
41
+ "epoch": 0.29515938606847697,
42
+ "grad_norm": 7.867295265197754,
43
+ "learning_rate": 4.6947972142564524e-05,
44
+ "loss": 3.9156,
45
+ "step": 250
46
+ },
47
+ {
48
+ "epoch": 0.3541912632821724,
49
+ "grad_norm": 9.70073127746582,
50
+ "learning_rate": 4.592380172060631e-05,
51
+ "loss": 3.7892,
52
+ "step": 300
53
+ },
54
+ {
55
+ "epoch": 0.4132231404958678,
56
+ "grad_norm": 10.692851066589355,
57
+ "learning_rate": 4.48996312986481e-05,
58
+ "loss": 3.7978,
59
+ "step": 350
60
+ },
61
+ {
62
+ "epoch": 0.4722550177095632,
63
+ "grad_norm": 8.353034019470215,
64
+ "learning_rate": 4.387546087668988e-05,
65
+ "loss": 3.6882,
66
+ "step": 400
67
+ },
68
+ {
69
+ "epoch": 0.5312868949232585,
70
+ "grad_norm": 7.379666328430176,
71
+ "learning_rate": 4.285129045473167e-05,
72
+ "loss": 3.6928,
73
+ "step": 450
74
+ },
75
+ {
76
+ "epoch": 0.5903187721369539,
77
+ "grad_norm": 8.379216194152832,
78
+ "learning_rate": 4.182712003277345e-05,
79
+ "loss": 3.697,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 0.6493506493506493,
84
+ "grad_norm": 5.806574821472168,
85
+ "learning_rate": 4.080294961081524e-05,
86
+ "loss": 3.6493,
87
+ "step": 550
88
+ },
89
+ {
90
+ "epoch": 0.7083825265643447,
91
+ "grad_norm": 5.262754917144775,
92
+ "learning_rate": 3.977877918885703e-05,
93
+ "loss": 3.6309,
94
+ "step": 600
95
+ },
96
+ {
97
+ "epoch": 0.7674144037780402,
98
+ "grad_norm": 7.273934364318848,
99
+ "learning_rate": 3.875460876689882e-05,
100
+ "loss": 3.6006,
101
+ "step": 650
102
+ },
103
+ {
104
+ "epoch": 0.8264462809917356,
105
+ "grad_norm": 9.894654273986816,
106
+ "learning_rate": 3.77304383449406e-05,
107
+ "loss": 3.5184,
108
+ "step": 700
109
+ },
110
+ {
111
+ "epoch": 0.885478158205431,
112
+ "grad_norm": 7.7802510261535645,
113
+ "learning_rate": 3.670626792298239e-05,
114
+ "loss": 3.5088,
115
+ "step": 750
116
+ },
117
+ {
118
+ "epoch": 0.9445100354191264,
119
+ "grad_norm": 4.538764953613281,
120
+ "learning_rate": 3.568209750102417e-05,
121
+ "loss": 3.5163,
122
+ "step": 800
123
+ },
124
+ {
125
+ "epoch": 1.0035419126328218,
126
+ "grad_norm": 9.740047454833984,
127
+ "learning_rate": 3.465792707906596e-05,
128
+ "loss": 3.4815,
129
+ "step": 850
130
+ },
131
+ {
132
+ "epoch": 1.062573789846517,
133
+ "grad_norm": 5.822250843048096,
134
+ "learning_rate": 3.3633756657107746e-05,
135
+ "loss": 3.3865,
136
+ "step": 900
137
+ },
138
+ {
139
+ "epoch": 1.1216056670602126,
140
+ "grad_norm": 8.263042449951172,
141
+ "learning_rate": 3.260958623514953e-05,
142
+ "loss": 3.3305,
143
+ "step": 950
144
+ },
145
+ {
146
+ "epoch": 1.1806375442739079,
147
+ "grad_norm": 5.721621513366699,
148
+ "learning_rate": 3.1585415813191316e-05,
149
+ "loss": 3.2901,
150
+ "step": 1000
151
+ },
152
+ {
153
+ "epoch": 1.2396694214876034,
154
+ "grad_norm": 5.100550651550293,
155
+ "learning_rate": 3.05612453912331e-05,
156
+ "loss": 3.2108,
157
+ "step": 1050
158
+ },
159
+ {
160
+ "epoch": 1.2987012987012987,
161
+ "grad_norm": 6.78065299987793,
162
+ "learning_rate": 2.9537074969274887e-05,
163
+ "loss": 3.2782,
164
+ "step": 1100
165
+ },
166
+ {
167
+ "epoch": 1.3577331759149942,
168
+ "grad_norm": 6.02623176574707,
169
+ "learning_rate": 2.851290454731668e-05,
170
+ "loss": 3.2726,
171
+ "step": 1150
172
+ },
173
+ {
174
+ "epoch": 1.4167650531286895,
175
+ "grad_norm": 6.936172962188721,
176
+ "learning_rate": 2.7488734125358463e-05,
177
+ "loss": 3.3236,
178
+ "step": 1200
179
+ },
180
+ {
181
+ "epoch": 1.4757969303423848,
182
+ "grad_norm": 6.60649299621582,
183
+ "learning_rate": 2.646456370340025e-05,
184
+ "loss": 3.2326,
185
+ "step": 1250
186
+ },
187
+ {
188
+ "epoch": 1.5348288075560803,
189
+ "grad_norm": 9.457938194274902,
190
+ "learning_rate": 2.5440393281442034e-05,
191
+ "loss": 3.2819,
192
+ "step": 1300
193
+ },
194
+ {
195
+ "epoch": 1.5938606847697758,
196
+ "grad_norm": 8.299750328063965,
197
+ "learning_rate": 2.441622285948382e-05,
198
+ "loss": 3.1736,
199
+ "step": 1350
200
+ },
201
+ {
202
+ "epoch": 1.6528925619834711,
203
+ "grad_norm": 6.856365203857422,
204
+ "learning_rate": 2.3392052437525604e-05,
205
+ "loss": 3.2544,
206
+ "step": 1400
207
+ },
208
+ {
209
+ "epoch": 1.7119244391971664,
210
+ "grad_norm": 7.17230224609375,
211
+ "learning_rate": 2.236788201556739e-05,
212
+ "loss": 3.2322,
213
+ "step": 1450
214
+ },
215
+ {
216
+ "epoch": 1.770956316410862,
217
+ "grad_norm": 5.7366814613342285,
218
+ "learning_rate": 2.1343711593609177e-05,
219
+ "loss": 3.1887,
220
+ "step": 1500
221
+ },
222
+ {
223
+ "epoch": 1.8299881936245572,
224
+ "grad_norm": 8.30285358428955,
225
+ "learning_rate": 2.0319541171650962e-05,
226
+ "loss": 3.2267,
227
+ "step": 1550
228
+ },
229
+ {
230
+ "epoch": 1.8890200708382525,
231
+ "grad_norm": 9.261168479919434,
232
+ "learning_rate": 1.929537074969275e-05,
233
+ "loss": 3.1754,
234
+ "step": 1600
235
+ },
236
+ {
237
+ "epoch": 1.948051948051948,
238
+ "grad_norm": 7.7448248863220215,
239
+ "learning_rate": 1.8271200327734536e-05,
240
+ "loss": 3.2218,
241
+ "step": 1650
242
+ },
243
+ {
244
+ "epoch": 2.0070838252656436,
245
+ "grad_norm": 6.691303730010986,
246
+ "learning_rate": 1.724702990577632e-05,
247
+ "loss": 3.1039,
248
+ "step": 1700
249
+ },
250
+ {
251
+ "epoch": 2.0661157024793386,
252
+ "grad_norm": 6.595968723297119,
253
+ "learning_rate": 1.622285948381811e-05,
254
+ "loss": 3.0747,
255
+ "step": 1750
256
+ },
257
+ {
258
+ "epoch": 2.125147579693034,
259
+ "grad_norm": 9.761649131774902,
260
+ "learning_rate": 1.5198689061859894e-05,
261
+ "loss": 3.0566,
262
+ "step": 1800
263
+ },
264
+ {
265
+ "epoch": 2.1841794569067297,
266
+ "grad_norm": 4.652196407318115,
267
+ "learning_rate": 1.417451863990168e-05,
268
+ "loss": 3.1184,
269
+ "step": 1850
270
+ },
271
+ {
272
+ "epoch": 2.243211334120425,
273
+ "grad_norm": 9.410402297973633,
274
+ "learning_rate": 1.3150348217943468e-05,
275
+ "loss": 3.0613,
276
+ "step": 1900
277
+ },
278
+ {
279
+ "epoch": 2.3022432113341202,
280
+ "grad_norm": 6.215067386627197,
281
+ "learning_rate": 1.2126177795985253e-05,
282
+ "loss": 3.1054,
283
+ "step": 1950
284
+ },
285
+ {
286
+ "epoch": 2.3612750885478158,
287
+ "grad_norm": 7.889769077301025,
288
+ "learning_rate": 1.110200737402704e-05,
289
+ "loss": 2.999,
290
+ "step": 2000
291
+ },
292
+ {
293
+ "epoch": 2.4203069657615113,
294
+ "grad_norm": 5.15559720993042,
295
+ "learning_rate": 1.0077836952068826e-05,
296
+ "loss": 3.0281,
297
+ "step": 2050
298
+ },
299
+ {
300
+ "epoch": 2.479338842975207,
301
+ "grad_norm": 5.545195579528809,
302
+ "learning_rate": 9.053666530110611e-06,
303
+ "loss": 3.0612,
304
+ "step": 2100
305
+ },
306
+ {
307
+ "epoch": 2.538370720188902,
308
+ "grad_norm": 9.813671112060547,
309
+ "learning_rate": 8.029496108152396e-06,
310
+ "loss": 3.03,
311
+ "step": 2150
312
+ },
313
+ {
314
+ "epoch": 2.5974025974025974,
315
+ "grad_norm": 9.66518783569336,
316
+ "learning_rate": 7.005325686194184e-06,
317
+ "loss": 3.0564,
318
+ "step": 2200
319
+ },
320
+ {
321
+ "epoch": 2.656434474616293,
322
+ "grad_norm": 7.902320384979248,
323
+ "learning_rate": 5.981155264235969e-06,
324
+ "loss": 2.9861,
325
+ "step": 2250
326
+ },
327
+ {
328
+ "epoch": 2.7154663518299884,
329
+ "grad_norm": 5.70012092590332,
330
+ "learning_rate": 4.956984842277755e-06,
331
+ "loss": 3.0116,
332
+ "step": 2300
333
+ },
334
+ {
335
+ "epoch": 2.7744982290436835,
336
+ "grad_norm": 6.998533725738525,
337
+ "learning_rate": 3.9328144203195416e-06,
338
+ "loss": 3.008,
339
+ "step": 2350
340
+ },
341
+ {
342
+ "epoch": 2.833530106257379,
343
+ "grad_norm": 5.67135763168335,
344
+ "learning_rate": 2.9086439983613274e-06,
345
+ "loss": 3.03,
346
+ "step": 2400
347
+ },
348
+ {
349
+ "epoch": 2.8925619834710745,
350
+ "grad_norm": 8.523286819458008,
351
+ "learning_rate": 1.8844735764031136e-06,
352
+ "loss": 2.9847,
353
+ "step": 2450
354
+ },
355
+ {
356
+ "epoch": 2.9515938606847696,
357
+ "grad_norm": 4.999369144439697,
358
+ "learning_rate": 8.603031544448998e-07,
359
+ "loss": 3.0083,
360
+ "step": 2500
361
+ }
362
+ ],
363
+ "logging_steps": 50,
364
+ "max_steps": 2541,
365
+ "num_input_tokens_seen": 0,
366
+ "num_train_epochs": 3,
367
+ "save_steps": 100,
368
+ "stateful_callbacks": {
369
+ "TrainerControl": {
370
+ "args": {
371
+ "should_epoch_stop": false,
372
+ "should_evaluate": false,
373
+ "should_log": false,
374
+ "should_save": true,
375
+ "should_training_stop": false
376
+ },
377
+ "attributes": {}
378
+ }
379
+ },
380
+ "total_flos": 2612183615668224.0,
381
+ "train_batch_size": 4,
382
+ "trial_name": null,
383
+ "trial_params": null
384
+ }
checkpoint-2500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b43a10cbeab2486580e3c97036c9c59de74a5cb622bfbe9990a04d6081af66fb
3
+ size 5240
checkpoint-2500/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2541/config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 1,
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
+ "initializer_range": 0.02,
15
+ "label2id": {
16
+ "LABEL_0": 0
17
+ },
18
+ "layer_norm_epsilon": 1e-05,
19
+ "model_type": "gpt2",
20
+ "n_ctx": 1024,
21
+ "n_embd": 768,
22
+ "n_head": 12,
23
+ "n_inner": null,
24
+ "n_layer": 6,
25
+ "n_positions": 1024,
26
+ "reorder_and_upcast_attn": false,
27
+ "resid_pdrop": 0.1,
28
+ "scale_attn_by_inverse_layer_idx": false,
29
+ "scale_attn_weights": true,
30
+ "summary_activation": null,
31
+ "summary_first_dropout": 0.1,
32
+ "summary_proj_to_labels": true,
33
+ "summary_type": "cls_index",
34
+ "summary_use_proj": true,
35
+ "task_specific_params": {
36
+ "text-generation": {
37
+ "do_sample": true,
38
+ "max_length": 50
39
+ }
40
+ },
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.52.4",
43
+ "use_cache": true,
44
+ "vocab_size": 50257
45
+ }
checkpoint-2541/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.52.4"
6
+ }
checkpoint-2541/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2541/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f76ad76a85bbd7e512dfb8e9c35f101d0c9a51a3f7c44c47e5fe1c8dad6585ff
3
+ size 327657928
checkpoint-2541/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69692423d117239d6d5781b6cc417d39001f4579d39bd8da10bb08bc9c0724f0
3
+ size 655364474
checkpoint-2541/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d28cf66ab7f99188aec2d491b34e9966ed3cd6b375812454d17615135c0ead3c
3
+ size 14244
checkpoint-2541/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebcba9f610fab3cc3aea4374ee102158c749fefe8c895c4dd1061aa870c071c9
3
+ size 1064
checkpoint-2541/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-2541/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-2541/tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": false,
15
+ "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
+ "model_max_length": 1024,
18
+ "pad_token": "<|endoftext|>",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
checkpoint-2541/trainer_state.json ADDED
@@ -0,0 +1,384 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_global_step": null,
3
+ "best_metric": null,
4
+ "best_model_checkpoint": null,
5
+ "epoch": 3.0,
6
+ "eval_steps": 500,
7
+ "global_step": 2541,
8
+ "is_hyper_param_search": false,
9
+ "is_local_process_zero": true,
10
+ "is_world_process_zero": true,
11
+ "log_history": [
12
+ {
13
+ "epoch": 0.0590318772136954,
14
+ "grad_norm": 6.553243637084961,
15
+ "learning_rate": 2.45e-05,
16
+ "loss": 4.6761,
17
+ "step": 50
18
+ },
19
+ {
20
+ "epoch": 0.1180637544273908,
21
+ "grad_norm": 8.582610130310059,
22
+ "learning_rate": 4.9500000000000004e-05,
23
+ "loss": 4.2203,
24
+ "step": 100
25
+ },
26
+ {
27
+ "epoch": 0.1770956316410862,
28
+ "grad_norm": 9.231163024902344,
29
+ "learning_rate": 4.8996312986480954e-05,
30
+ "loss": 3.9576,
31
+ "step": 150
32
+ },
33
+ {
34
+ "epoch": 0.2361275088547816,
35
+ "grad_norm": 7.166695594787598,
36
+ "learning_rate": 4.7972142564522735e-05,
37
+ "loss": 3.8839,
38
+ "step": 200
39
+ },
40
+ {
41
+ "epoch": 0.29515938606847697,
42
+ "grad_norm": 7.867295265197754,
43
+ "learning_rate": 4.6947972142564524e-05,
44
+ "loss": 3.9156,
45
+ "step": 250
46
+ },
47
+ {
48
+ "epoch": 0.3541912632821724,
49
+ "grad_norm": 9.70073127746582,
50
+ "learning_rate": 4.592380172060631e-05,
51
+ "loss": 3.7892,
52
+ "step": 300
53
+ },
54
+ {
55
+ "epoch": 0.4132231404958678,
56
+ "grad_norm": 10.692851066589355,
57
+ "learning_rate": 4.48996312986481e-05,
58
+ "loss": 3.7978,
59
+ "step": 350
60
+ },
61
+ {
62
+ "epoch": 0.4722550177095632,
63
+ "grad_norm": 8.353034019470215,
64
+ "learning_rate": 4.387546087668988e-05,
65
+ "loss": 3.6882,
66
+ "step": 400
67
+ },
68
+ {
69
+ "epoch": 0.5312868949232585,
70
+ "grad_norm": 7.379666328430176,
71
+ "learning_rate": 4.285129045473167e-05,
72
+ "loss": 3.6928,
73
+ "step": 450
74
+ },
75
+ {
76
+ "epoch": 0.5903187721369539,
77
+ "grad_norm": 8.379216194152832,
78
+ "learning_rate": 4.182712003277345e-05,
79
+ "loss": 3.697,
80
+ "step": 500
81
+ },
82
+ {
83
+ "epoch": 0.6493506493506493,
84
+ "grad_norm": 5.806574821472168,
85
+ "learning_rate": 4.080294961081524e-05,
86
+ "loss": 3.6493,
87
+ "step": 550
88
+ },
89
+ {
90
+ "epoch": 0.7083825265643447,
91
+ "grad_norm": 5.262754917144775,
92
+ "learning_rate": 3.977877918885703e-05,
93
+ "loss": 3.6309,
94
+ "step": 600
95
+ },
96
+ {
97
+ "epoch": 0.7674144037780402,
98
+ "grad_norm": 7.273934364318848,
99
+ "learning_rate": 3.875460876689882e-05,
100
+ "loss": 3.6006,
101
+ "step": 650
102
+ },
103
+ {
104
+ "epoch": 0.8264462809917356,
105
+ "grad_norm": 9.894654273986816,
106
+ "learning_rate": 3.77304383449406e-05,
107
+ "loss": 3.5184,
108
+ "step": 700
109
+ },
110
+ {
111
+ "epoch": 0.885478158205431,
112
+ "grad_norm": 7.7802510261535645,
113
+ "learning_rate": 3.670626792298239e-05,
114
+ "loss": 3.5088,
115
+ "step": 750
116
+ },
117
+ {
118
+ "epoch": 0.9445100354191264,
119
+ "grad_norm": 4.538764953613281,
120
+ "learning_rate": 3.568209750102417e-05,
121
+ "loss": 3.5163,
122
+ "step": 800
123
+ },
124
+ {
125
+ "epoch": 1.0035419126328218,
126
+ "grad_norm": 9.740047454833984,
127
+ "learning_rate": 3.465792707906596e-05,
128
+ "loss": 3.4815,
129
+ "step": 850
130
+ },
131
+ {
132
+ "epoch": 1.062573789846517,
133
+ "grad_norm": 5.822250843048096,
134
+ "learning_rate": 3.3633756657107746e-05,
135
+ "loss": 3.3865,
136
+ "step": 900
137
+ },
138
+ {
139
+ "epoch": 1.1216056670602126,
140
+ "grad_norm": 8.263042449951172,
141
+ "learning_rate": 3.260958623514953e-05,
142
+ "loss": 3.3305,
143
+ "step": 950
144
+ },
145
+ {
146
+ "epoch": 1.1806375442739079,
147
+ "grad_norm": 5.721621513366699,
148
+ "learning_rate": 3.1585415813191316e-05,
149
+ "loss": 3.2901,
150
+ "step": 1000
151
+ },
152
+ {
153
+ "epoch": 1.2396694214876034,
154
+ "grad_norm": 5.100550651550293,
155
+ "learning_rate": 3.05612453912331e-05,
156
+ "loss": 3.2108,
157
+ "step": 1050
158
+ },
159
+ {
160
+ "epoch": 1.2987012987012987,
161
+ "grad_norm": 6.78065299987793,
162
+ "learning_rate": 2.9537074969274887e-05,
163
+ "loss": 3.2782,
164
+ "step": 1100
165
+ },
166
+ {
167
+ "epoch": 1.3577331759149942,
168
+ "grad_norm": 6.02623176574707,
169
+ "learning_rate": 2.851290454731668e-05,
170
+ "loss": 3.2726,
171
+ "step": 1150
172
+ },
173
+ {
174
+ "epoch": 1.4167650531286895,
175
+ "grad_norm": 6.936172962188721,
176
+ "learning_rate": 2.7488734125358463e-05,
177
+ "loss": 3.3236,
178
+ "step": 1200
179
+ },
180
+ {
181
+ "epoch": 1.4757969303423848,
182
+ "grad_norm": 6.60649299621582,
183
+ "learning_rate": 2.646456370340025e-05,
184
+ "loss": 3.2326,
185
+ "step": 1250
186
+ },
187
+ {
188
+ "epoch": 1.5348288075560803,
189
+ "grad_norm": 9.457938194274902,
190
+ "learning_rate": 2.5440393281442034e-05,
191
+ "loss": 3.2819,
192
+ "step": 1300
193
+ },
194
+ {
195
+ "epoch": 1.5938606847697758,
196
+ "grad_norm": 8.299750328063965,
197
+ "learning_rate": 2.441622285948382e-05,
198
+ "loss": 3.1736,
199
+ "step": 1350
200
+ },
201
+ {
202
+ "epoch": 1.6528925619834711,
203
+ "grad_norm": 6.856365203857422,
204
+ "learning_rate": 2.3392052437525604e-05,
205
+ "loss": 3.2544,
206
+ "step": 1400
207
+ },
208
+ {
209
+ "epoch": 1.7119244391971664,
210
+ "grad_norm": 7.17230224609375,
211
+ "learning_rate": 2.236788201556739e-05,
212
+ "loss": 3.2322,
213
+ "step": 1450
214
+ },
215
+ {
216
+ "epoch": 1.770956316410862,
217
+ "grad_norm": 5.7366814613342285,
218
+ "learning_rate": 2.1343711593609177e-05,
219
+ "loss": 3.1887,
220
+ "step": 1500
221
+ },
222
+ {
223
+ "epoch": 1.8299881936245572,
224
+ "grad_norm": 8.30285358428955,
225
+ "learning_rate": 2.0319541171650962e-05,
226
+ "loss": 3.2267,
227
+ "step": 1550
228
+ },
229
+ {
230
+ "epoch": 1.8890200708382525,
231
+ "grad_norm": 9.261168479919434,
232
+ "learning_rate": 1.929537074969275e-05,
233
+ "loss": 3.1754,
234
+ "step": 1600
235
+ },
236
+ {
237
+ "epoch": 1.948051948051948,
238
+ "grad_norm": 7.7448248863220215,
239
+ "learning_rate": 1.8271200327734536e-05,
240
+ "loss": 3.2218,
241
+ "step": 1650
242
+ },
243
+ {
244
+ "epoch": 2.0070838252656436,
245
+ "grad_norm": 6.691303730010986,
246
+ "learning_rate": 1.724702990577632e-05,
247
+ "loss": 3.1039,
248
+ "step": 1700
249
+ },
250
+ {
251
+ "epoch": 2.0661157024793386,
252
+ "grad_norm": 6.595968723297119,
253
+ "learning_rate": 1.622285948381811e-05,
254
+ "loss": 3.0747,
255
+ "step": 1750
256
+ },
257
+ {
258
+ "epoch": 2.125147579693034,
259
+ "grad_norm": 9.761649131774902,
260
+ "learning_rate": 1.5198689061859894e-05,
261
+ "loss": 3.0566,
262
+ "step": 1800
263
+ },
264
+ {
265
+ "epoch": 2.1841794569067297,
266
+ "grad_norm": 4.652196407318115,
267
+ "learning_rate": 1.417451863990168e-05,
268
+ "loss": 3.1184,
269
+ "step": 1850
270
+ },
271
+ {
272
+ "epoch": 2.243211334120425,
273
+ "grad_norm": 9.410402297973633,
274
+ "learning_rate": 1.3150348217943468e-05,
275
+ "loss": 3.0613,
276
+ "step": 1900
277
+ },
278
+ {
279
+ "epoch": 2.3022432113341202,
280
+ "grad_norm": 6.215067386627197,
281
+ "learning_rate": 1.2126177795985253e-05,
282
+ "loss": 3.1054,
283
+ "step": 1950
284
+ },
285
+ {
286
+ "epoch": 2.3612750885478158,
287
+ "grad_norm": 7.889769077301025,
288
+ "learning_rate": 1.110200737402704e-05,
289
+ "loss": 2.999,
290
+ "step": 2000
291
+ },
292
+ {
293
+ "epoch": 2.4203069657615113,
294
+ "grad_norm": 5.15559720993042,
295
+ "learning_rate": 1.0077836952068826e-05,
296
+ "loss": 3.0281,
297
+ "step": 2050
298
+ },
299
+ {
300
+ "epoch": 2.479338842975207,
301
+ "grad_norm": 5.545195579528809,
302
+ "learning_rate": 9.053666530110611e-06,
303
+ "loss": 3.0612,
304
+ "step": 2100
305
+ },
306
+ {
307
+ "epoch": 2.538370720188902,
308
+ "grad_norm": 9.813671112060547,
309
+ "learning_rate": 8.029496108152396e-06,
310
+ "loss": 3.03,
311
+ "step": 2150
312
+ },
313
+ {
314
+ "epoch": 2.5974025974025974,
315
+ "grad_norm": 9.66518783569336,
316
+ "learning_rate": 7.005325686194184e-06,
317
+ "loss": 3.0564,
318
+ "step": 2200
319
+ },
320
+ {
321
+ "epoch": 2.656434474616293,
322
+ "grad_norm": 7.902320384979248,
323
+ "learning_rate": 5.981155264235969e-06,
324
+ "loss": 2.9861,
325
+ "step": 2250
326
+ },
327
+ {
328
+ "epoch": 2.7154663518299884,
329
+ "grad_norm": 5.70012092590332,
330
+ "learning_rate": 4.956984842277755e-06,
331
+ "loss": 3.0116,
332
+ "step": 2300
333
+ },
334
+ {
335
+ "epoch": 2.7744982290436835,
336
+ "grad_norm": 6.998533725738525,
337
+ "learning_rate": 3.9328144203195416e-06,
338
+ "loss": 3.008,
339
+ "step": 2350
340
+ },
341
+ {
342
+ "epoch": 2.833530106257379,
343
+ "grad_norm": 5.67135763168335,
344
+ "learning_rate": 2.9086439983613274e-06,
345
+ "loss": 3.03,
346
+ "step": 2400
347
+ },
348
+ {
349
+ "epoch": 2.8925619834710745,
350
+ "grad_norm": 8.523286819458008,
351
+ "learning_rate": 1.8844735764031136e-06,
352
+ "loss": 2.9847,
353
+ "step": 2450
354
+ },
355
+ {
356
+ "epoch": 2.9515938606847696,
357
+ "grad_norm": 4.999369144439697,
358
+ "learning_rate": 8.603031544448998e-07,
359
+ "loss": 3.0083,
360
+ "step": 2500
361
+ }
362
+ ],
363
+ "logging_steps": 50,
364
+ "max_steps": 2541,
365
+ "num_input_tokens_seen": 0,
366
+ "num_train_epochs": 3,
367
+ "save_steps": 100,
368
+ "stateful_callbacks": {
369
+ "TrainerControl": {
370
+ "args": {
371
+ "should_epoch_stop": false,
372
+ "should_evaluate": false,
373
+ "should_log": false,
374
+ "should_save": true,
375
+ "should_training_stop": true
376
+ },
377
+ "attributes": {}
378
+ }
379
+ },
380
+ "total_flos": 2654644337639424.0,
381
+ "train_batch_size": 4,
382
+ "trial_name": null,
383
+ "trial_params": null
384
+ }
checkpoint-2541/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b43a10cbeab2486580e3c97036c9c59de74a5cb622bfbe9990a04d6081af66fb
3
+ size 5240
checkpoint-2541/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_num_labels": 1,
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "id2label": {
12
+ "0": "LABEL_0"
13
+ },
14
+ "initializer_range": 0.02,
15
+ "label2id": {
16
+ "LABEL_0": 0
17
+ },
18
+ "layer_norm_epsilon": 1e-05,
19
+ "model_type": "gpt2",
20
+ "n_ctx": 1024,
21
+ "n_embd": 768,
22
+ "n_head": 12,
23
+ "n_inner": null,
24
+ "n_layer": 6,
25
+ "n_positions": 1024,
26
+ "reorder_and_upcast_attn": false,
27
+ "resid_pdrop": 0.1,
28
+ "scale_attn_by_inverse_layer_idx": false,
29
+ "scale_attn_weights": true,
30
+ "summary_activation": null,
31
+ "summary_first_dropout": 0.1,
32
+ "summary_proj_to_labels": true,
33
+ "summary_type": "cls_index",
34
+ "summary_use_proj": true,
35
+ "task_specific_params": {
36
+ "text-generation": {
37
+ "do_sample": true,
38
+ "max_length": 50
39
+ }
40
+ },
41
+ "torch_dtype": "float32",
42
+ "transformers_version": "4.52.4",
43
+ "use_cache": true,
44
+ "vocab_size": 50257
45
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "transformers_version": "4.52.4"
6
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f76ad76a85bbd7e512dfb8e9c35f101d0c9a51a3f7c44c47e5fe1c8dad6585ff
3
+ size 327657928
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": false,
15
+ "eos_token": "<|endoftext|>",
16
+ "extra_special_tokens": {},
17
+ "model_max_length": 1024,
18
+ "pad_token": "<|endoftext|>",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff