mllm-dev commited on
Commit
4148993
·
verified ·
1 Parent(s): bc87768

Upload folder using huggingface_hub

Browse files
Files changed (40) hide show
  1. checkpoint-528/config.json +39 -0
  2. checkpoint-528/generation_config.json +5 -0
  3. checkpoint-528/merges.txt +0 -0
  4. checkpoint-528/model.safetensors +3 -0
  5. checkpoint-528/optimizer.pt +3 -0
  6. checkpoint-528/rng_state.pth +3 -0
  7. checkpoint-528/scheduler.pt +3 -0
  8. checkpoint-528/special_tokens_map.json +6 -0
  9. checkpoint-528/tokenizer.json +0 -0
  10. checkpoint-528/tokenizer_config.json +21 -0
  11. checkpoint-528/trainer_state.json +457 -0
  12. checkpoint-528/training_args.bin +3 -0
  13. checkpoint-528/vocab.json +0 -0
  14. checkpoint-544/config.json +39 -0
  15. checkpoint-544/generation_config.json +5 -0
  16. checkpoint-544/merges.txt +0 -0
  17. checkpoint-544/model.safetensors +3 -0
  18. checkpoint-544/optimizer.pt +3 -0
  19. checkpoint-544/rng_state.pth +3 -0
  20. checkpoint-544/scheduler.pt +3 -0
  21. checkpoint-544/special_tokens_map.json +6 -0
  22. checkpoint-544/tokenizer.json +0 -0
  23. checkpoint-544/tokenizer_config.json +21 -0
  24. checkpoint-544/trainer_state.json +470 -0
  25. checkpoint-544/training_args.bin +3 -0
  26. checkpoint-544/vocab.json +0 -0
  27. checkpoint-560/config.json +39 -0
  28. checkpoint-560/generation_config.json +5 -0
  29. checkpoint-560/merges.txt +0 -0
  30. checkpoint-560/model.safetensors +3 -0
  31. checkpoint-560/optimizer.pt +3 -0
  32. checkpoint-560/rng_state.pth +3 -0
  33. checkpoint-560/scheduler.pt +3 -0
  34. checkpoint-560/special_tokens_map.json +6 -0
  35. checkpoint-560/tokenizer.json +0 -0
  36. checkpoint-560/tokenizer_config.json +21 -0
  37. checkpoint-560/trainer_state.json +483 -0
  38. checkpoint-560/training_args.bin +3 -0
  39. checkpoint-560/vocab.json +0 -0
  40. runs/Mar26_17-35-02_lambda-hyperplane05/events.out.tfevents.1711492509.lambda-hyperplane05.53901.0 +2 -2
checkpoint-528/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai-community/gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.40.0.dev0",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
checkpoint-528/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 50256,
3
+ "eos_token_id": 50256,
4
+ "transformers_version": "4.40.0.dev0"
5
+ }
checkpoint-528/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-528/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17d0457bce1a2ccaf9fe07debebb09aa290edbe81a74297e5c7a98171074d887
3
+ size 497774208
checkpoint-528/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71f3de299538ebdd5547328acfbb9a7fe8cb0ab1d8e477b710bd94d730871f2b
3
+ size 995641861
checkpoint-528/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:940dbafd9bb1687bf311a17c45712b776955ec0ebbcb5c340a9c9e4346b2423d
3
+ size 14575
checkpoint-528/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa1407b3f2d8e4716b5d0e3390421499c4de3b79ee97ce5ff62348b7e01565b4
3
+ size 627
checkpoint-528/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-528/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-528/tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "model_max_length": 1024,
17
+ "pad_token": "<|endoftext|>",
18
+ "padding_side": "left",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
checkpoint-528/trainer_state.json ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 6.074151039123535,
3
+ "best_model_checkpoint": "bill_sum_finetune_test_gpt2/checkpoint-528",
4
+ "epoch": 33.0,
5
+ "eval_steps": 500,
6
+ "global_step": 528,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_gen_len": 600.0,
14
+ "eval_loss": 6.843741416931152,
15
+ "eval_rouge1": 0.4053,
16
+ "eval_rouge2": 0.1708,
17
+ "eval_rougeL": 0.2228,
18
+ "eval_rougeLsum": 0.35,
19
+ "eval_runtime": 21.0375,
20
+ "eval_samples_per_second": 11.788,
21
+ "eval_steps_per_second": 0.19,
22
+ "step": 16
23
+ },
24
+ {
25
+ "epoch": 2.0,
26
+ "eval_gen_len": 600.0,
27
+ "eval_loss": 6.511104583740234,
28
+ "eval_rouge1": 0.3978,
29
+ "eval_rouge2": 0.1673,
30
+ "eval_rougeL": 0.2181,
31
+ "eval_rougeLsum": 0.3434,
32
+ "eval_runtime": 21.5368,
33
+ "eval_samples_per_second": 11.515,
34
+ "eval_steps_per_second": 0.186,
35
+ "step": 32
36
+ },
37
+ {
38
+ "epoch": 3.0,
39
+ "eval_gen_len": 600.0,
40
+ "eval_loss": 6.349050998687744,
41
+ "eval_rouge1": 0.3988,
42
+ "eval_rouge2": 0.1679,
43
+ "eval_rougeL": 0.2188,
44
+ "eval_rougeLsum": 0.3443,
45
+ "eval_runtime": 21.2101,
46
+ "eval_samples_per_second": 11.693,
47
+ "eval_steps_per_second": 0.189,
48
+ "step": 48
49
+ },
50
+ {
51
+ "epoch": 4.0,
52
+ "eval_gen_len": 600.0,
53
+ "eval_loss": 6.258257865905762,
54
+ "eval_rouge1": 0.3996,
55
+ "eval_rouge2": 0.1681,
56
+ "eval_rougeL": 0.2189,
57
+ "eval_rougeLsum": 0.345,
58
+ "eval_runtime": 21.3129,
59
+ "eval_samples_per_second": 11.636,
60
+ "eval_steps_per_second": 0.188,
61
+ "step": 64
62
+ },
63
+ {
64
+ "epoch": 5.0,
65
+ "eval_gen_len": 600.0,
66
+ "eval_loss": 6.200411796569824,
67
+ "eval_rouge1": 0.3986,
68
+ "eval_rouge2": 0.1677,
69
+ "eval_rougeL": 0.2184,
70
+ "eval_rougeLsum": 0.3439,
71
+ "eval_runtime": 21.6445,
72
+ "eval_samples_per_second": 11.458,
73
+ "eval_steps_per_second": 0.185,
74
+ "step": 80
75
+ },
76
+ {
77
+ "epoch": 6.0,
78
+ "eval_gen_len": 600.0,
79
+ "eval_loss": 6.170421600341797,
80
+ "eval_rouge1": 0.3981,
81
+ "eval_rouge2": 0.1674,
82
+ "eval_rougeL": 0.2178,
83
+ "eval_rougeLsum": 0.3432,
84
+ "eval_runtime": 21.242,
85
+ "eval_samples_per_second": 11.675,
86
+ "eval_steps_per_second": 0.188,
87
+ "step": 96
88
+ },
89
+ {
90
+ "epoch": 7.0,
91
+ "eval_gen_len": 600.0,
92
+ "eval_loss": 6.150304317474365,
93
+ "eval_rouge1": 0.3976,
94
+ "eval_rouge2": 0.1672,
95
+ "eval_rougeL": 0.2176,
96
+ "eval_rougeLsum": 0.3428,
97
+ "eval_runtime": 21.3562,
98
+ "eval_samples_per_second": 11.613,
99
+ "eval_steps_per_second": 0.187,
100
+ "step": 112
101
+ },
102
+ {
103
+ "epoch": 8.0,
104
+ "eval_gen_len": 600.0,
105
+ "eval_loss": 6.135751724243164,
106
+ "eval_rouge1": 0.3977,
107
+ "eval_rouge2": 0.1672,
108
+ "eval_rougeL": 0.2175,
109
+ "eval_rougeLsum": 0.3427,
110
+ "eval_runtime": 21.5836,
111
+ "eval_samples_per_second": 11.49,
112
+ "eval_steps_per_second": 0.185,
113
+ "step": 128
114
+ },
115
+ {
116
+ "epoch": 9.0,
117
+ "eval_gen_len": 600.0,
118
+ "eval_loss": 6.122563362121582,
119
+ "eval_rouge1": 0.3977,
120
+ "eval_rouge2": 0.1671,
121
+ "eval_rougeL": 0.2171,
122
+ "eval_rougeLsum": 0.3425,
123
+ "eval_runtime": 21.5829,
124
+ "eval_samples_per_second": 11.491,
125
+ "eval_steps_per_second": 0.185,
126
+ "step": 144
127
+ },
128
+ {
129
+ "epoch": 10.0,
130
+ "eval_gen_len": 600.0,
131
+ "eval_loss": 6.114274501800537,
132
+ "eval_rouge1": 0.397,
133
+ "eval_rouge2": 0.1669,
134
+ "eval_rougeL": 0.2174,
135
+ "eval_rougeLsum": 0.3427,
136
+ "eval_runtime": 21.508,
137
+ "eval_samples_per_second": 11.531,
138
+ "eval_steps_per_second": 0.186,
139
+ "step": 160
140
+ },
141
+ {
142
+ "epoch": 11.0,
143
+ "eval_gen_len": 600.0,
144
+ "eval_loss": 6.108905792236328,
145
+ "eval_rouge1": 0.3973,
146
+ "eval_rouge2": 0.167,
147
+ "eval_rougeL": 0.2173,
148
+ "eval_rougeLsum": 0.3427,
149
+ "eval_runtime": 21.2386,
150
+ "eval_samples_per_second": 11.677,
151
+ "eval_steps_per_second": 0.188,
152
+ "step": 176
153
+ },
154
+ {
155
+ "epoch": 12.0,
156
+ "eval_gen_len": 600.0,
157
+ "eval_loss": 6.107725620269775,
158
+ "eval_rouge1": 0.3974,
159
+ "eval_rouge2": 0.167,
160
+ "eval_rougeL": 0.2173,
161
+ "eval_rougeLsum": 0.3426,
162
+ "eval_runtime": 21.6952,
163
+ "eval_samples_per_second": 11.431,
164
+ "eval_steps_per_second": 0.184,
165
+ "step": 192
166
+ },
167
+ {
168
+ "epoch": 13.0,
169
+ "eval_gen_len": 600.0,
170
+ "eval_loss": 6.099628448486328,
171
+ "eval_rouge1": 0.3976,
172
+ "eval_rouge2": 0.167,
173
+ "eval_rougeL": 0.2172,
174
+ "eval_rougeLsum": 0.3428,
175
+ "eval_runtime": 21.1438,
176
+ "eval_samples_per_second": 11.729,
177
+ "eval_steps_per_second": 0.189,
178
+ "step": 208
179
+ },
180
+ {
181
+ "epoch": 14.0,
182
+ "eval_gen_len": 600.0,
183
+ "eval_loss": 6.096395492553711,
184
+ "eval_rouge1": 0.3975,
185
+ "eval_rouge2": 0.167,
186
+ "eval_rougeL": 0.2171,
187
+ "eval_rougeLsum": 0.3426,
188
+ "eval_runtime": 21.6504,
189
+ "eval_samples_per_second": 11.455,
190
+ "eval_steps_per_second": 0.185,
191
+ "step": 224
192
+ },
193
+ {
194
+ "epoch": 15.0,
195
+ "eval_gen_len": 600.0,
196
+ "eval_loss": 6.0916852951049805,
197
+ "eval_rouge1": 0.3979,
198
+ "eval_rouge2": 0.167,
199
+ "eval_rougeL": 0.2168,
200
+ "eval_rougeLsum": 0.3427,
201
+ "eval_runtime": 21.4782,
202
+ "eval_samples_per_second": 11.547,
203
+ "eval_steps_per_second": 0.186,
204
+ "step": 240
205
+ },
206
+ {
207
+ "epoch": 16.0,
208
+ "eval_gen_len": 600.0,
209
+ "eval_loss": 6.090492248535156,
210
+ "eval_rouge1": 0.3977,
211
+ "eval_rouge2": 0.1672,
212
+ "eval_rougeL": 0.2173,
213
+ "eval_rougeLsum": 0.3428,
214
+ "eval_runtime": 21.7128,
215
+ "eval_samples_per_second": 11.422,
216
+ "eval_steps_per_second": 0.184,
217
+ "step": 256
218
+ },
219
+ {
220
+ "epoch": 17.0,
221
+ "eval_gen_len": 600.0,
222
+ "eval_loss": 6.091054916381836,
223
+ "eval_rouge1": 0.399,
224
+ "eval_rouge2": 0.168,
225
+ "eval_rougeL": 0.2176,
226
+ "eval_rougeLsum": 0.3436,
227
+ "eval_runtime": 21.2583,
228
+ "eval_samples_per_second": 11.666,
229
+ "eval_steps_per_second": 0.188,
230
+ "step": 272
231
+ },
232
+ {
233
+ "epoch": 18.0,
234
+ "eval_gen_len": 600.0,
235
+ "eval_loss": 6.0864386558532715,
236
+ "eval_rouge1": 0.3985,
237
+ "eval_rouge2": 0.1675,
238
+ "eval_rougeL": 0.2172,
239
+ "eval_rougeLsum": 0.3431,
240
+ "eval_runtime": 21.4489,
241
+ "eval_samples_per_second": 11.562,
242
+ "eval_steps_per_second": 0.186,
243
+ "step": 288
244
+ },
245
+ {
246
+ "epoch": 19.0,
247
+ "eval_gen_len": 600.0,
248
+ "eval_loss": 6.082566261291504,
249
+ "eval_rouge1": 0.4004,
250
+ "eval_rouge2": 0.1686,
251
+ "eval_rougeL": 0.2186,
252
+ "eval_rougeLsum": 0.3451,
253
+ "eval_runtime": 21.4779,
254
+ "eval_samples_per_second": 11.547,
255
+ "eval_steps_per_second": 0.186,
256
+ "step": 304
257
+ },
258
+ {
259
+ "epoch": 20.0,
260
+ "eval_gen_len": 600.0,
261
+ "eval_loss": 6.0813798904418945,
262
+ "eval_rouge1": 0.4009,
263
+ "eval_rouge2": 0.1689,
264
+ "eval_rougeL": 0.2189,
265
+ "eval_rougeLsum": 0.3454,
266
+ "eval_runtime": 21.5568,
267
+ "eval_samples_per_second": 11.504,
268
+ "eval_steps_per_second": 0.186,
269
+ "step": 320
270
+ },
271
+ {
272
+ "epoch": 21.0,
273
+ "eval_gen_len": 600.0,
274
+ "eval_loss": 6.082016944885254,
275
+ "eval_rouge1": 0.3999,
276
+ "eval_rouge2": 0.1682,
277
+ "eval_rougeL": 0.218,
278
+ "eval_rougeLsum": 0.3444,
279
+ "eval_runtime": 21.5727,
280
+ "eval_samples_per_second": 11.496,
281
+ "eval_steps_per_second": 0.185,
282
+ "step": 336
283
+ },
284
+ {
285
+ "epoch": 22.0,
286
+ "eval_gen_len": 600.0,
287
+ "eval_loss": 6.082878589630127,
288
+ "eval_rouge1": 0.4076,
289
+ "eval_rouge2": 0.1718,
290
+ "eval_rougeL": 0.2222,
291
+ "eval_rougeLsum": 0.3508,
292
+ "eval_runtime": 20.8434,
293
+ "eval_samples_per_second": 11.898,
294
+ "eval_steps_per_second": 0.192,
295
+ "step": 352
296
+ },
297
+ {
298
+ "epoch": 23.0,
299
+ "eval_gen_len": 600.0,
300
+ "eval_loss": 6.080228805541992,
301
+ "eval_rouge1": 0.405,
302
+ "eval_rouge2": 0.1705,
303
+ "eval_rougeL": 0.221,
304
+ "eval_rougeLsum": 0.3488,
305
+ "eval_runtime": 21.1916,
306
+ "eval_samples_per_second": 11.703,
307
+ "eval_steps_per_second": 0.189,
308
+ "step": 368
309
+ },
310
+ {
311
+ "epoch": 24.0,
312
+ "eval_gen_len": 600.0,
313
+ "eval_loss": 6.07808780670166,
314
+ "eval_rouge1": 0.4052,
315
+ "eval_rouge2": 0.1709,
316
+ "eval_rougeL": 0.2212,
317
+ "eval_rougeLsum": 0.3491,
318
+ "eval_runtime": 21.3026,
319
+ "eval_samples_per_second": 11.642,
320
+ "eval_steps_per_second": 0.188,
321
+ "step": 384
322
+ },
323
+ {
324
+ "epoch": 25.0,
325
+ "eval_gen_len": 600.0,
326
+ "eval_loss": 6.077059268951416,
327
+ "eval_rouge1": 0.4064,
328
+ "eval_rouge2": 0.1711,
329
+ "eval_rougeL": 0.2216,
330
+ "eval_rougeLsum": 0.3498,
331
+ "eval_runtime": 20.9702,
332
+ "eval_samples_per_second": 11.826,
333
+ "eval_steps_per_second": 0.191,
334
+ "step": 400
335
+ },
336
+ {
337
+ "epoch": 26.0,
338
+ "eval_gen_len": 600.0,
339
+ "eval_loss": 6.075596809387207,
340
+ "eval_rouge1": 0.4086,
341
+ "eval_rouge2": 0.1723,
342
+ "eval_rougeL": 0.223,
343
+ "eval_rougeLsum": 0.3517,
344
+ "eval_runtime": 21.1984,
345
+ "eval_samples_per_second": 11.699,
346
+ "eval_steps_per_second": 0.189,
347
+ "step": 416
348
+ },
349
+ {
350
+ "epoch": 27.0,
351
+ "eval_gen_len": 600.0,
352
+ "eval_loss": 6.075705528259277,
353
+ "eval_rouge1": 0.4075,
354
+ "eval_rouge2": 0.1719,
355
+ "eval_rougeL": 0.2224,
356
+ "eval_rougeLsum": 0.3509,
357
+ "eval_runtime": 20.7964,
358
+ "eval_samples_per_second": 11.925,
359
+ "eval_steps_per_second": 0.192,
360
+ "step": 432
361
+ },
362
+ {
363
+ "epoch": 28.0,
364
+ "eval_gen_len": 600.0,
365
+ "eval_loss": 6.075275421142578,
366
+ "eval_rouge1": 0.4081,
367
+ "eval_rouge2": 0.1722,
368
+ "eval_rougeL": 0.2224,
369
+ "eval_rougeLsum": 0.3509,
370
+ "eval_runtime": 20.9972,
371
+ "eval_samples_per_second": 11.811,
372
+ "eval_steps_per_second": 0.191,
373
+ "step": 448
374
+ },
375
+ {
376
+ "epoch": 29.0,
377
+ "eval_gen_len": 600.0,
378
+ "eval_loss": 6.076692581176758,
379
+ "eval_rouge1": 0.4132,
380
+ "eval_rouge2": 0.1751,
381
+ "eval_rougeL": 0.2258,
382
+ "eval_rougeLsum": 0.3553,
383
+ "eval_runtime": 21.0313,
384
+ "eval_samples_per_second": 11.792,
385
+ "eval_steps_per_second": 0.19,
386
+ "step": 464
387
+ },
388
+ {
389
+ "epoch": 30.0,
390
+ "eval_gen_len": 600.0,
391
+ "eval_loss": 6.075990676879883,
392
+ "eval_rouge1": 0.4108,
393
+ "eval_rouge2": 0.1737,
394
+ "eval_rougeL": 0.2242,
395
+ "eval_rougeLsum": 0.3533,
396
+ "eval_runtime": 20.714,
397
+ "eval_samples_per_second": 11.973,
398
+ "eval_steps_per_second": 0.193,
399
+ "step": 480
400
+ },
401
+ {
402
+ "epoch": 31.0,
403
+ "eval_gen_len": 600.0,
404
+ "eval_loss": 6.074672222137451,
405
+ "eval_rouge1": 0.4126,
406
+ "eval_rouge2": 0.1747,
407
+ "eval_rougeL": 0.2253,
408
+ "eval_rougeLsum": 0.3546,
409
+ "eval_runtime": 21.1511,
410
+ "eval_samples_per_second": 11.725,
411
+ "eval_steps_per_second": 0.189,
412
+ "step": 496
413
+ },
414
+ {
415
+ "epoch": 31.25,
416
+ "grad_norm": 237350.25,
417
+ "learning_rate": 2.1428571428571427e-06,
418
+ "loss": 6.1153,
419
+ "step": 500
420
+ },
421
+ {
422
+ "epoch": 32.0,
423
+ "eval_gen_len": 600.0,
424
+ "eval_loss": 6.076193809509277,
425
+ "eval_rouge1": 0.4119,
426
+ "eval_rouge2": 0.1744,
427
+ "eval_rougeL": 0.2248,
428
+ "eval_rougeLsum": 0.3541,
429
+ "eval_runtime": 20.5412,
430
+ "eval_samples_per_second": 12.073,
431
+ "eval_steps_per_second": 0.195,
432
+ "step": 512
433
+ },
434
+ {
435
+ "epoch": 33.0,
436
+ "eval_gen_len": 600.0,
437
+ "eval_loss": 6.074151039123535,
438
+ "eval_rouge1": 0.4123,
439
+ "eval_rouge2": 0.1746,
440
+ "eval_rougeL": 0.2251,
441
+ "eval_rougeLsum": 0.3545,
442
+ "eval_runtime": 21.0056,
443
+ "eval_samples_per_second": 11.806,
444
+ "eval_steps_per_second": 0.19,
445
+ "step": 528
446
+ }
447
+ ],
448
+ "logging_steps": 500,
449
+ "max_steps": 560,
450
+ "num_input_tokens_seen": 0,
451
+ "num_train_epochs": 35,
452
+ "save_steps": 500,
453
+ "total_flos": 8527788048384000.0,
454
+ "train_batch_size": 64,
455
+ "trial_name": null,
456
+ "trial_params": null
457
+ }
checkpoint-528/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ecd0faf5f9d90a3838ab34a2691a497d77a695ec8c1fb0490440a6eb10f007c
3
+ size 4795
checkpoint-528/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-544/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai-community/gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.40.0.dev0",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
checkpoint-544/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 50256,
3
+ "eos_token_id": 50256,
4
+ "transformers_version": "4.40.0.dev0"
5
+ }
checkpoint-544/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-544/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faf579eee3ce11c8b12aba51f440a331393b029b7b94703dff8140ede641d87e
3
+ size 497774208
checkpoint-544/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa752cc67c8c59d82220acd435c034b6dc4b5cedd955950ba26ab0503e1b7b31
3
+ size 995641861
checkpoint-544/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c1ea103eb6972517ba8c52e55d21f248d7aeff1ebbfbbf8116fde962f0e5152
3
+ size 14575
checkpoint-544/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d192934b0afbbe525b055f4e7cd703bb0997d69fba957e3ac903fe5d22b9d18f
3
+ size 627
checkpoint-544/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-544/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-544/tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "model_max_length": 1024,
17
+ "pad_token": "<|endoftext|>",
18
+ "padding_side": "left",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
checkpoint-544/trainer_state.json ADDED
@@ -0,0 +1,470 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 6.074151039123535,
3
+ "best_model_checkpoint": "bill_sum_finetune_test_gpt2/checkpoint-528",
4
+ "epoch": 34.0,
5
+ "eval_steps": 500,
6
+ "global_step": 544,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_gen_len": 600.0,
14
+ "eval_loss": 6.843741416931152,
15
+ "eval_rouge1": 0.4053,
16
+ "eval_rouge2": 0.1708,
17
+ "eval_rougeL": 0.2228,
18
+ "eval_rougeLsum": 0.35,
19
+ "eval_runtime": 21.0375,
20
+ "eval_samples_per_second": 11.788,
21
+ "eval_steps_per_second": 0.19,
22
+ "step": 16
23
+ },
24
+ {
25
+ "epoch": 2.0,
26
+ "eval_gen_len": 600.0,
27
+ "eval_loss": 6.511104583740234,
28
+ "eval_rouge1": 0.3978,
29
+ "eval_rouge2": 0.1673,
30
+ "eval_rougeL": 0.2181,
31
+ "eval_rougeLsum": 0.3434,
32
+ "eval_runtime": 21.5368,
33
+ "eval_samples_per_second": 11.515,
34
+ "eval_steps_per_second": 0.186,
35
+ "step": 32
36
+ },
37
+ {
38
+ "epoch": 3.0,
39
+ "eval_gen_len": 600.0,
40
+ "eval_loss": 6.349050998687744,
41
+ "eval_rouge1": 0.3988,
42
+ "eval_rouge2": 0.1679,
43
+ "eval_rougeL": 0.2188,
44
+ "eval_rougeLsum": 0.3443,
45
+ "eval_runtime": 21.2101,
46
+ "eval_samples_per_second": 11.693,
47
+ "eval_steps_per_second": 0.189,
48
+ "step": 48
49
+ },
50
+ {
51
+ "epoch": 4.0,
52
+ "eval_gen_len": 600.0,
53
+ "eval_loss": 6.258257865905762,
54
+ "eval_rouge1": 0.3996,
55
+ "eval_rouge2": 0.1681,
56
+ "eval_rougeL": 0.2189,
57
+ "eval_rougeLsum": 0.345,
58
+ "eval_runtime": 21.3129,
59
+ "eval_samples_per_second": 11.636,
60
+ "eval_steps_per_second": 0.188,
61
+ "step": 64
62
+ },
63
+ {
64
+ "epoch": 5.0,
65
+ "eval_gen_len": 600.0,
66
+ "eval_loss": 6.200411796569824,
67
+ "eval_rouge1": 0.3986,
68
+ "eval_rouge2": 0.1677,
69
+ "eval_rougeL": 0.2184,
70
+ "eval_rougeLsum": 0.3439,
71
+ "eval_runtime": 21.6445,
72
+ "eval_samples_per_second": 11.458,
73
+ "eval_steps_per_second": 0.185,
74
+ "step": 80
75
+ },
76
+ {
77
+ "epoch": 6.0,
78
+ "eval_gen_len": 600.0,
79
+ "eval_loss": 6.170421600341797,
80
+ "eval_rouge1": 0.3981,
81
+ "eval_rouge2": 0.1674,
82
+ "eval_rougeL": 0.2178,
83
+ "eval_rougeLsum": 0.3432,
84
+ "eval_runtime": 21.242,
85
+ "eval_samples_per_second": 11.675,
86
+ "eval_steps_per_second": 0.188,
87
+ "step": 96
88
+ },
89
+ {
90
+ "epoch": 7.0,
91
+ "eval_gen_len": 600.0,
92
+ "eval_loss": 6.150304317474365,
93
+ "eval_rouge1": 0.3976,
94
+ "eval_rouge2": 0.1672,
95
+ "eval_rougeL": 0.2176,
96
+ "eval_rougeLsum": 0.3428,
97
+ "eval_runtime": 21.3562,
98
+ "eval_samples_per_second": 11.613,
99
+ "eval_steps_per_second": 0.187,
100
+ "step": 112
101
+ },
102
+ {
103
+ "epoch": 8.0,
104
+ "eval_gen_len": 600.0,
105
+ "eval_loss": 6.135751724243164,
106
+ "eval_rouge1": 0.3977,
107
+ "eval_rouge2": 0.1672,
108
+ "eval_rougeL": 0.2175,
109
+ "eval_rougeLsum": 0.3427,
110
+ "eval_runtime": 21.5836,
111
+ "eval_samples_per_second": 11.49,
112
+ "eval_steps_per_second": 0.185,
113
+ "step": 128
114
+ },
115
+ {
116
+ "epoch": 9.0,
117
+ "eval_gen_len": 600.0,
118
+ "eval_loss": 6.122563362121582,
119
+ "eval_rouge1": 0.3977,
120
+ "eval_rouge2": 0.1671,
121
+ "eval_rougeL": 0.2171,
122
+ "eval_rougeLsum": 0.3425,
123
+ "eval_runtime": 21.5829,
124
+ "eval_samples_per_second": 11.491,
125
+ "eval_steps_per_second": 0.185,
126
+ "step": 144
127
+ },
128
+ {
129
+ "epoch": 10.0,
130
+ "eval_gen_len": 600.0,
131
+ "eval_loss": 6.114274501800537,
132
+ "eval_rouge1": 0.397,
133
+ "eval_rouge2": 0.1669,
134
+ "eval_rougeL": 0.2174,
135
+ "eval_rougeLsum": 0.3427,
136
+ "eval_runtime": 21.508,
137
+ "eval_samples_per_second": 11.531,
138
+ "eval_steps_per_second": 0.186,
139
+ "step": 160
140
+ },
141
+ {
142
+ "epoch": 11.0,
143
+ "eval_gen_len": 600.0,
144
+ "eval_loss": 6.108905792236328,
145
+ "eval_rouge1": 0.3973,
146
+ "eval_rouge2": 0.167,
147
+ "eval_rougeL": 0.2173,
148
+ "eval_rougeLsum": 0.3427,
149
+ "eval_runtime": 21.2386,
150
+ "eval_samples_per_second": 11.677,
151
+ "eval_steps_per_second": 0.188,
152
+ "step": 176
153
+ },
154
+ {
155
+ "epoch": 12.0,
156
+ "eval_gen_len": 600.0,
157
+ "eval_loss": 6.107725620269775,
158
+ "eval_rouge1": 0.3974,
159
+ "eval_rouge2": 0.167,
160
+ "eval_rougeL": 0.2173,
161
+ "eval_rougeLsum": 0.3426,
162
+ "eval_runtime": 21.6952,
163
+ "eval_samples_per_second": 11.431,
164
+ "eval_steps_per_second": 0.184,
165
+ "step": 192
166
+ },
167
+ {
168
+ "epoch": 13.0,
169
+ "eval_gen_len": 600.0,
170
+ "eval_loss": 6.099628448486328,
171
+ "eval_rouge1": 0.3976,
172
+ "eval_rouge2": 0.167,
173
+ "eval_rougeL": 0.2172,
174
+ "eval_rougeLsum": 0.3428,
175
+ "eval_runtime": 21.1438,
176
+ "eval_samples_per_second": 11.729,
177
+ "eval_steps_per_second": 0.189,
178
+ "step": 208
179
+ },
180
+ {
181
+ "epoch": 14.0,
182
+ "eval_gen_len": 600.0,
183
+ "eval_loss": 6.096395492553711,
184
+ "eval_rouge1": 0.3975,
185
+ "eval_rouge2": 0.167,
186
+ "eval_rougeL": 0.2171,
187
+ "eval_rougeLsum": 0.3426,
188
+ "eval_runtime": 21.6504,
189
+ "eval_samples_per_second": 11.455,
190
+ "eval_steps_per_second": 0.185,
191
+ "step": 224
192
+ },
193
+ {
194
+ "epoch": 15.0,
195
+ "eval_gen_len": 600.0,
196
+ "eval_loss": 6.0916852951049805,
197
+ "eval_rouge1": 0.3979,
198
+ "eval_rouge2": 0.167,
199
+ "eval_rougeL": 0.2168,
200
+ "eval_rougeLsum": 0.3427,
201
+ "eval_runtime": 21.4782,
202
+ "eval_samples_per_second": 11.547,
203
+ "eval_steps_per_second": 0.186,
204
+ "step": 240
205
+ },
206
+ {
207
+ "epoch": 16.0,
208
+ "eval_gen_len": 600.0,
209
+ "eval_loss": 6.090492248535156,
210
+ "eval_rouge1": 0.3977,
211
+ "eval_rouge2": 0.1672,
212
+ "eval_rougeL": 0.2173,
213
+ "eval_rougeLsum": 0.3428,
214
+ "eval_runtime": 21.7128,
215
+ "eval_samples_per_second": 11.422,
216
+ "eval_steps_per_second": 0.184,
217
+ "step": 256
218
+ },
219
+ {
220
+ "epoch": 17.0,
221
+ "eval_gen_len": 600.0,
222
+ "eval_loss": 6.091054916381836,
223
+ "eval_rouge1": 0.399,
224
+ "eval_rouge2": 0.168,
225
+ "eval_rougeL": 0.2176,
226
+ "eval_rougeLsum": 0.3436,
227
+ "eval_runtime": 21.2583,
228
+ "eval_samples_per_second": 11.666,
229
+ "eval_steps_per_second": 0.188,
230
+ "step": 272
231
+ },
232
+ {
233
+ "epoch": 18.0,
234
+ "eval_gen_len": 600.0,
235
+ "eval_loss": 6.0864386558532715,
236
+ "eval_rouge1": 0.3985,
237
+ "eval_rouge2": 0.1675,
238
+ "eval_rougeL": 0.2172,
239
+ "eval_rougeLsum": 0.3431,
240
+ "eval_runtime": 21.4489,
241
+ "eval_samples_per_second": 11.562,
242
+ "eval_steps_per_second": 0.186,
243
+ "step": 288
244
+ },
245
+ {
246
+ "epoch": 19.0,
247
+ "eval_gen_len": 600.0,
248
+ "eval_loss": 6.082566261291504,
249
+ "eval_rouge1": 0.4004,
250
+ "eval_rouge2": 0.1686,
251
+ "eval_rougeL": 0.2186,
252
+ "eval_rougeLsum": 0.3451,
253
+ "eval_runtime": 21.4779,
254
+ "eval_samples_per_second": 11.547,
255
+ "eval_steps_per_second": 0.186,
256
+ "step": 304
257
+ },
258
+ {
259
+ "epoch": 20.0,
260
+ "eval_gen_len": 600.0,
261
+ "eval_loss": 6.0813798904418945,
262
+ "eval_rouge1": 0.4009,
263
+ "eval_rouge2": 0.1689,
264
+ "eval_rougeL": 0.2189,
265
+ "eval_rougeLsum": 0.3454,
266
+ "eval_runtime": 21.5568,
267
+ "eval_samples_per_second": 11.504,
268
+ "eval_steps_per_second": 0.186,
269
+ "step": 320
270
+ },
271
+ {
272
+ "epoch": 21.0,
273
+ "eval_gen_len": 600.0,
274
+ "eval_loss": 6.082016944885254,
275
+ "eval_rouge1": 0.3999,
276
+ "eval_rouge2": 0.1682,
277
+ "eval_rougeL": 0.218,
278
+ "eval_rougeLsum": 0.3444,
279
+ "eval_runtime": 21.5727,
280
+ "eval_samples_per_second": 11.496,
281
+ "eval_steps_per_second": 0.185,
282
+ "step": 336
283
+ },
284
+ {
285
+ "epoch": 22.0,
286
+ "eval_gen_len": 600.0,
287
+ "eval_loss": 6.082878589630127,
288
+ "eval_rouge1": 0.4076,
289
+ "eval_rouge2": 0.1718,
290
+ "eval_rougeL": 0.2222,
291
+ "eval_rougeLsum": 0.3508,
292
+ "eval_runtime": 20.8434,
293
+ "eval_samples_per_second": 11.898,
294
+ "eval_steps_per_second": 0.192,
295
+ "step": 352
296
+ },
297
+ {
298
+ "epoch": 23.0,
299
+ "eval_gen_len": 600.0,
300
+ "eval_loss": 6.080228805541992,
301
+ "eval_rouge1": 0.405,
302
+ "eval_rouge2": 0.1705,
303
+ "eval_rougeL": 0.221,
304
+ "eval_rougeLsum": 0.3488,
305
+ "eval_runtime": 21.1916,
306
+ "eval_samples_per_second": 11.703,
307
+ "eval_steps_per_second": 0.189,
308
+ "step": 368
309
+ },
310
+ {
311
+ "epoch": 24.0,
312
+ "eval_gen_len": 600.0,
313
+ "eval_loss": 6.07808780670166,
314
+ "eval_rouge1": 0.4052,
315
+ "eval_rouge2": 0.1709,
316
+ "eval_rougeL": 0.2212,
317
+ "eval_rougeLsum": 0.3491,
318
+ "eval_runtime": 21.3026,
319
+ "eval_samples_per_second": 11.642,
320
+ "eval_steps_per_second": 0.188,
321
+ "step": 384
322
+ },
323
+ {
324
+ "epoch": 25.0,
325
+ "eval_gen_len": 600.0,
326
+ "eval_loss": 6.077059268951416,
327
+ "eval_rouge1": 0.4064,
328
+ "eval_rouge2": 0.1711,
329
+ "eval_rougeL": 0.2216,
330
+ "eval_rougeLsum": 0.3498,
331
+ "eval_runtime": 20.9702,
332
+ "eval_samples_per_second": 11.826,
333
+ "eval_steps_per_second": 0.191,
334
+ "step": 400
335
+ },
336
+ {
337
+ "epoch": 26.0,
338
+ "eval_gen_len": 600.0,
339
+ "eval_loss": 6.075596809387207,
340
+ "eval_rouge1": 0.4086,
341
+ "eval_rouge2": 0.1723,
342
+ "eval_rougeL": 0.223,
343
+ "eval_rougeLsum": 0.3517,
344
+ "eval_runtime": 21.1984,
345
+ "eval_samples_per_second": 11.699,
346
+ "eval_steps_per_second": 0.189,
347
+ "step": 416
348
+ },
349
+ {
350
+ "epoch": 27.0,
351
+ "eval_gen_len": 600.0,
352
+ "eval_loss": 6.075705528259277,
353
+ "eval_rouge1": 0.4075,
354
+ "eval_rouge2": 0.1719,
355
+ "eval_rougeL": 0.2224,
356
+ "eval_rougeLsum": 0.3509,
357
+ "eval_runtime": 20.7964,
358
+ "eval_samples_per_second": 11.925,
359
+ "eval_steps_per_second": 0.192,
360
+ "step": 432
361
+ },
362
+ {
363
+ "epoch": 28.0,
364
+ "eval_gen_len": 600.0,
365
+ "eval_loss": 6.075275421142578,
366
+ "eval_rouge1": 0.4081,
367
+ "eval_rouge2": 0.1722,
368
+ "eval_rougeL": 0.2224,
369
+ "eval_rougeLsum": 0.3509,
370
+ "eval_runtime": 20.9972,
371
+ "eval_samples_per_second": 11.811,
372
+ "eval_steps_per_second": 0.191,
373
+ "step": 448
374
+ },
375
+ {
376
+ "epoch": 29.0,
377
+ "eval_gen_len": 600.0,
378
+ "eval_loss": 6.076692581176758,
379
+ "eval_rouge1": 0.4132,
380
+ "eval_rouge2": 0.1751,
381
+ "eval_rougeL": 0.2258,
382
+ "eval_rougeLsum": 0.3553,
383
+ "eval_runtime": 21.0313,
384
+ "eval_samples_per_second": 11.792,
385
+ "eval_steps_per_second": 0.19,
386
+ "step": 464
387
+ },
388
+ {
389
+ "epoch": 30.0,
390
+ "eval_gen_len": 600.0,
391
+ "eval_loss": 6.075990676879883,
392
+ "eval_rouge1": 0.4108,
393
+ "eval_rouge2": 0.1737,
394
+ "eval_rougeL": 0.2242,
395
+ "eval_rougeLsum": 0.3533,
396
+ "eval_runtime": 20.714,
397
+ "eval_samples_per_second": 11.973,
398
+ "eval_steps_per_second": 0.193,
399
+ "step": 480
400
+ },
401
+ {
402
+ "epoch": 31.0,
403
+ "eval_gen_len": 600.0,
404
+ "eval_loss": 6.074672222137451,
405
+ "eval_rouge1": 0.4126,
406
+ "eval_rouge2": 0.1747,
407
+ "eval_rougeL": 0.2253,
408
+ "eval_rougeLsum": 0.3546,
409
+ "eval_runtime": 21.1511,
410
+ "eval_samples_per_second": 11.725,
411
+ "eval_steps_per_second": 0.189,
412
+ "step": 496
413
+ },
414
+ {
415
+ "epoch": 31.25,
416
+ "grad_norm": 237350.25,
417
+ "learning_rate": 2.1428571428571427e-06,
418
+ "loss": 6.1153,
419
+ "step": 500
420
+ },
421
+ {
422
+ "epoch": 32.0,
423
+ "eval_gen_len": 600.0,
424
+ "eval_loss": 6.076193809509277,
425
+ "eval_rouge1": 0.4119,
426
+ "eval_rouge2": 0.1744,
427
+ "eval_rougeL": 0.2248,
428
+ "eval_rougeLsum": 0.3541,
429
+ "eval_runtime": 20.5412,
430
+ "eval_samples_per_second": 12.073,
431
+ "eval_steps_per_second": 0.195,
432
+ "step": 512
433
+ },
434
+ {
435
+ "epoch": 33.0,
436
+ "eval_gen_len": 600.0,
437
+ "eval_loss": 6.074151039123535,
438
+ "eval_rouge1": 0.4123,
439
+ "eval_rouge2": 0.1746,
440
+ "eval_rougeL": 0.2251,
441
+ "eval_rougeLsum": 0.3545,
442
+ "eval_runtime": 21.0056,
443
+ "eval_samples_per_second": 11.806,
444
+ "eval_steps_per_second": 0.19,
445
+ "step": 528
446
+ },
447
+ {
448
+ "epoch": 34.0,
449
+ "eval_gen_len": 600.0,
450
+ "eval_loss": 6.076315879821777,
451
+ "eval_rouge1": 0.4114,
452
+ "eval_rouge2": 0.1741,
453
+ "eval_rougeL": 0.2246,
454
+ "eval_rougeLsum": 0.3537,
455
+ "eval_runtime": 20.6937,
456
+ "eval_samples_per_second": 11.984,
457
+ "eval_steps_per_second": 0.193,
458
+ "step": 544
459
+ }
460
+ ],
461
+ "logging_steps": 500,
462
+ "max_steps": 560,
463
+ "num_input_tokens_seen": 0,
464
+ "num_train_epochs": 35,
465
+ "save_steps": 500,
466
+ "total_flos": 8786205868032000.0,
467
+ "train_batch_size": 64,
468
+ "trial_name": null,
469
+ "trial_params": null
470
+ }
checkpoint-544/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ecd0faf5f9d90a3838ab34a2691a497d77a695ec8c1fb0490440a6eb10f007c
3
+ size 4795
checkpoint-544/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-560/config.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai-community/gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "model_type": "gpt2",
14
+ "n_ctx": 1024,
15
+ "n_embd": 768,
16
+ "n_head": 12,
17
+ "n_inner": null,
18
+ "n_layer": 12,
19
+ "n_positions": 1024,
20
+ "reorder_and_upcast_attn": false,
21
+ "resid_pdrop": 0.1,
22
+ "scale_attn_by_inverse_layer_idx": false,
23
+ "scale_attn_weights": true,
24
+ "summary_activation": null,
25
+ "summary_first_dropout": 0.1,
26
+ "summary_proj_to_labels": true,
27
+ "summary_type": "cls_index",
28
+ "summary_use_proj": true,
29
+ "task_specific_params": {
30
+ "text-generation": {
31
+ "do_sample": true,
32
+ "max_length": 50
33
+ }
34
+ },
35
+ "torch_dtype": "float32",
36
+ "transformers_version": "4.40.0.dev0",
37
+ "use_cache": true,
38
+ "vocab_size": 50257
39
+ }
checkpoint-560/generation_config.json ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 50256,
3
+ "eos_token_id": 50256,
4
+ "transformers_version": "4.40.0.dev0"
5
+ }
checkpoint-560/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-560/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f13e2b11db45448096ef22fbb68eeeba1a9450325f651cf1d4f592a1aee0d5c4
3
+ size 497774208
checkpoint-560/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc4b4598182332697e0371a2fbbc8099f137187918fb752a836ada14176b3794
3
+ size 995641861
checkpoint-560/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8aed69ae9b01a6ecd5908d165e977a5245c45b420008ee85a2430fc1e9f8865
3
+ size 14575
checkpoint-560/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9043307235da66398d0c2724b2c58e4c82ce5dd47fb8a33bb762c35c95b7d938
3
+ size 627
checkpoint-560/special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>",
5
+ "unk_token": "<|endoftext|>"
6
+ }
checkpoint-560/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
checkpoint-560/tokenizer_config.json ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "added_tokens_decoder": {
4
+ "50256": {
5
+ "content": "<|endoftext|>",
6
+ "lstrip": false,
7
+ "normalized": true,
8
+ "rstrip": false,
9
+ "single_word": false,
10
+ "special": true
11
+ }
12
+ },
13
+ "bos_token": "<|endoftext|>",
14
+ "clean_up_tokenization_spaces": true,
15
+ "eos_token": "<|endoftext|>",
16
+ "model_max_length": 1024,
17
+ "pad_token": "<|endoftext|>",
18
+ "padding_side": "left",
19
+ "tokenizer_class": "GPT2Tokenizer",
20
+ "unk_token": "<|endoftext|>"
21
+ }
checkpoint-560/trainer_state.json ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 6.074151039123535,
3
+ "best_model_checkpoint": "bill_sum_finetune_test_gpt2/checkpoint-528",
4
+ "epoch": 35.0,
5
+ "eval_steps": 500,
6
+ "global_step": 560,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "eval_gen_len": 600.0,
14
+ "eval_loss": 6.843741416931152,
15
+ "eval_rouge1": 0.4053,
16
+ "eval_rouge2": 0.1708,
17
+ "eval_rougeL": 0.2228,
18
+ "eval_rougeLsum": 0.35,
19
+ "eval_runtime": 21.0375,
20
+ "eval_samples_per_second": 11.788,
21
+ "eval_steps_per_second": 0.19,
22
+ "step": 16
23
+ },
24
+ {
25
+ "epoch": 2.0,
26
+ "eval_gen_len": 600.0,
27
+ "eval_loss": 6.511104583740234,
28
+ "eval_rouge1": 0.3978,
29
+ "eval_rouge2": 0.1673,
30
+ "eval_rougeL": 0.2181,
31
+ "eval_rougeLsum": 0.3434,
32
+ "eval_runtime": 21.5368,
33
+ "eval_samples_per_second": 11.515,
34
+ "eval_steps_per_second": 0.186,
35
+ "step": 32
36
+ },
37
+ {
38
+ "epoch": 3.0,
39
+ "eval_gen_len": 600.0,
40
+ "eval_loss": 6.349050998687744,
41
+ "eval_rouge1": 0.3988,
42
+ "eval_rouge2": 0.1679,
43
+ "eval_rougeL": 0.2188,
44
+ "eval_rougeLsum": 0.3443,
45
+ "eval_runtime": 21.2101,
46
+ "eval_samples_per_second": 11.693,
47
+ "eval_steps_per_second": 0.189,
48
+ "step": 48
49
+ },
50
+ {
51
+ "epoch": 4.0,
52
+ "eval_gen_len": 600.0,
53
+ "eval_loss": 6.258257865905762,
54
+ "eval_rouge1": 0.3996,
55
+ "eval_rouge2": 0.1681,
56
+ "eval_rougeL": 0.2189,
57
+ "eval_rougeLsum": 0.345,
58
+ "eval_runtime": 21.3129,
59
+ "eval_samples_per_second": 11.636,
60
+ "eval_steps_per_second": 0.188,
61
+ "step": 64
62
+ },
63
+ {
64
+ "epoch": 5.0,
65
+ "eval_gen_len": 600.0,
66
+ "eval_loss": 6.200411796569824,
67
+ "eval_rouge1": 0.3986,
68
+ "eval_rouge2": 0.1677,
69
+ "eval_rougeL": 0.2184,
70
+ "eval_rougeLsum": 0.3439,
71
+ "eval_runtime": 21.6445,
72
+ "eval_samples_per_second": 11.458,
73
+ "eval_steps_per_second": 0.185,
74
+ "step": 80
75
+ },
76
+ {
77
+ "epoch": 6.0,
78
+ "eval_gen_len": 600.0,
79
+ "eval_loss": 6.170421600341797,
80
+ "eval_rouge1": 0.3981,
81
+ "eval_rouge2": 0.1674,
82
+ "eval_rougeL": 0.2178,
83
+ "eval_rougeLsum": 0.3432,
84
+ "eval_runtime": 21.242,
85
+ "eval_samples_per_second": 11.675,
86
+ "eval_steps_per_second": 0.188,
87
+ "step": 96
88
+ },
89
+ {
90
+ "epoch": 7.0,
91
+ "eval_gen_len": 600.0,
92
+ "eval_loss": 6.150304317474365,
93
+ "eval_rouge1": 0.3976,
94
+ "eval_rouge2": 0.1672,
95
+ "eval_rougeL": 0.2176,
96
+ "eval_rougeLsum": 0.3428,
97
+ "eval_runtime": 21.3562,
98
+ "eval_samples_per_second": 11.613,
99
+ "eval_steps_per_second": 0.187,
100
+ "step": 112
101
+ },
102
+ {
103
+ "epoch": 8.0,
104
+ "eval_gen_len": 600.0,
105
+ "eval_loss": 6.135751724243164,
106
+ "eval_rouge1": 0.3977,
107
+ "eval_rouge2": 0.1672,
108
+ "eval_rougeL": 0.2175,
109
+ "eval_rougeLsum": 0.3427,
110
+ "eval_runtime": 21.5836,
111
+ "eval_samples_per_second": 11.49,
112
+ "eval_steps_per_second": 0.185,
113
+ "step": 128
114
+ },
115
+ {
116
+ "epoch": 9.0,
117
+ "eval_gen_len": 600.0,
118
+ "eval_loss": 6.122563362121582,
119
+ "eval_rouge1": 0.3977,
120
+ "eval_rouge2": 0.1671,
121
+ "eval_rougeL": 0.2171,
122
+ "eval_rougeLsum": 0.3425,
123
+ "eval_runtime": 21.5829,
124
+ "eval_samples_per_second": 11.491,
125
+ "eval_steps_per_second": 0.185,
126
+ "step": 144
127
+ },
128
+ {
129
+ "epoch": 10.0,
130
+ "eval_gen_len": 600.0,
131
+ "eval_loss": 6.114274501800537,
132
+ "eval_rouge1": 0.397,
133
+ "eval_rouge2": 0.1669,
134
+ "eval_rougeL": 0.2174,
135
+ "eval_rougeLsum": 0.3427,
136
+ "eval_runtime": 21.508,
137
+ "eval_samples_per_second": 11.531,
138
+ "eval_steps_per_second": 0.186,
139
+ "step": 160
140
+ },
141
+ {
142
+ "epoch": 11.0,
143
+ "eval_gen_len": 600.0,
144
+ "eval_loss": 6.108905792236328,
145
+ "eval_rouge1": 0.3973,
146
+ "eval_rouge2": 0.167,
147
+ "eval_rougeL": 0.2173,
148
+ "eval_rougeLsum": 0.3427,
149
+ "eval_runtime": 21.2386,
150
+ "eval_samples_per_second": 11.677,
151
+ "eval_steps_per_second": 0.188,
152
+ "step": 176
153
+ },
154
+ {
155
+ "epoch": 12.0,
156
+ "eval_gen_len": 600.0,
157
+ "eval_loss": 6.107725620269775,
158
+ "eval_rouge1": 0.3974,
159
+ "eval_rouge2": 0.167,
160
+ "eval_rougeL": 0.2173,
161
+ "eval_rougeLsum": 0.3426,
162
+ "eval_runtime": 21.6952,
163
+ "eval_samples_per_second": 11.431,
164
+ "eval_steps_per_second": 0.184,
165
+ "step": 192
166
+ },
167
+ {
168
+ "epoch": 13.0,
169
+ "eval_gen_len": 600.0,
170
+ "eval_loss": 6.099628448486328,
171
+ "eval_rouge1": 0.3976,
172
+ "eval_rouge2": 0.167,
173
+ "eval_rougeL": 0.2172,
174
+ "eval_rougeLsum": 0.3428,
175
+ "eval_runtime": 21.1438,
176
+ "eval_samples_per_second": 11.729,
177
+ "eval_steps_per_second": 0.189,
178
+ "step": 208
179
+ },
180
+ {
181
+ "epoch": 14.0,
182
+ "eval_gen_len": 600.0,
183
+ "eval_loss": 6.096395492553711,
184
+ "eval_rouge1": 0.3975,
185
+ "eval_rouge2": 0.167,
186
+ "eval_rougeL": 0.2171,
187
+ "eval_rougeLsum": 0.3426,
188
+ "eval_runtime": 21.6504,
189
+ "eval_samples_per_second": 11.455,
190
+ "eval_steps_per_second": 0.185,
191
+ "step": 224
192
+ },
193
+ {
194
+ "epoch": 15.0,
195
+ "eval_gen_len": 600.0,
196
+ "eval_loss": 6.0916852951049805,
197
+ "eval_rouge1": 0.3979,
198
+ "eval_rouge2": 0.167,
199
+ "eval_rougeL": 0.2168,
200
+ "eval_rougeLsum": 0.3427,
201
+ "eval_runtime": 21.4782,
202
+ "eval_samples_per_second": 11.547,
203
+ "eval_steps_per_second": 0.186,
204
+ "step": 240
205
+ },
206
+ {
207
+ "epoch": 16.0,
208
+ "eval_gen_len": 600.0,
209
+ "eval_loss": 6.090492248535156,
210
+ "eval_rouge1": 0.3977,
211
+ "eval_rouge2": 0.1672,
212
+ "eval_rougeL": 0.2173,
213
+ "eval_rougeLsum": 0.3428,
214
+ "eval_runtime": 21.7128,
215
+ "eval_samples_per_second": 11.422,
216
+ "eval_steps_per_second": 0.184,
217
+ "step": 256
218
+ },
219
+ {
220
+ "epoch": 17.0,
221
+ "eval_gen_len": 600.0,
222
+ "eval_loss": 6.091054916381836,
223
+ "eval_rouge1": 0.399,
224
+ "eval_rouge2": 0.168,
225
+ "eval_rougeL": 0.2176,
226
+ "eval_rougeLsum": 0.3436,
227
+ "eval_runtime": 21.2583,
228
+ "eval_samples_per_second": 11.666,
229
+ "eval_steps_per_second": 0.188,
230
+ "step": 272
231
+ },
232
+ {
233
+ "epoch": 18.0,
234
+ "eval_gen_len": 600.0,
235
+ "eval_loss": 6.0864386558532715,
236
+ "eval_rouge1": 0.3985,
237
+ "eval_rouge2": 0.1675,
238
+ "eval_rougeL": 0.2172,
239
+ "eval_rougeLsum": 0.3431,
240
+ "eval_runtime": 21.4489,
241
+ "eval_samples_per_second": 11.562,
242
+ "eval_steps_per_second": 0.186,
243
+ "step": 288
244
+ },
245
+ {
246
+ "epoch": 19.0,
247
+ "eval_gen_len": 600.0,
248
+ "eval_loss": 6.082566261291504,
249
+ "eval_rouge1": 0.4004,
250
+ "eval_rouge2": 0.1686,
251
+ "eval_rougeL": 0.2186,
252
+ "eval_rougeLsum": 0.3451,
253
+ "eval_runtime": 21.4779,
254
+ "eval_samples_per_second": 11.547,
255
+ "eval_steps_per_second": 0.186,
256
+ "step": 304
257
+ },
258
+ {
259
+ "epoch": 20.0,
260
+ "eval_gen_len": 600.0,
261
+ "eval_loss": 6.0813798904418945,
262
+ "eval_rouge1": 0.4009,
263
+ "eval_rouge2": 0.1689,
264
+ "eval_rougeL": 0.2189,
265
+ "eval_rougeLsum": 0.3454,
266
+ "eval_runtime": 21.5568,
267
+ "eval_samples_per_second": 11.504,
268
+ "eval_steps_per_second": 0.186,
269
+ "step": 320
270
+ },
271
+ {
272
+ "epoch": 21.0,
273
+ "eval_gen_len": 600.0,
274
+ "eval_loss": 6.082016944885254,
275
+ "eval_rouge1": 0.3999,
276
+ "eval_rouge2": 0.1682,
277
+ "eval_rougeL": 0.218,
278
+ "eval_rougeLsum": 0.3444,
279
+ "eval_runtime": 21.5727,
280
+ "eval_samples_per_second": 11.496,
281
+ "eval_steps_per_second": 0.185,
282
+ "step": 336
283
+ },
284
+ {
285
+ "epoch": 22.0,
286
+ "eval_gen_len": 600.0,
287
+ "eval_loss": 6.082878589630127,
288
+ "eval_rouge1": 0.4076,
289
+ "eval_rouge2": 0.1718,
290
+ "eval_rougeL": 0.2222,
291
+ "eval_rougeLsum": 0.3508,
292
+ "eval_runtime": 20.8434,
293
+ "eval_samples_per_second": 11.898,
294
+ "eval_steps_per_second": 0.192,
295
+ "step": 352
296
+ },
297
+ {
298
+ "epoch": 23.0,
299
+ "eval_gen_len": 600.0,
300
+ "eval_loss": 6.080228805541992,
301
+ "eval_rouge1": 0.405,
302
+ "eval_rouge2": 0.1705,
303
+ "eval_rougeL": 0.221,
304
+ "eval_rougeLsum": 0.3488,
305
+ "eval_runtime": 21.1916,
306
+ "eval_samples_per_second": 11.703,
307
+ "eval_steps_per_second": 0.189,
308
+ "step": 368
309
+ },
310
+ {
311
+ "epoch": 24.0,
312
+ "eval_gen_len": 600.0,
313
+ "eval_loss": 6.07808780670166,
314
+ "eval_rouge1": 0.4052,
315
+ "eval_rouge2": 0.1709,
316
+ "eval_rougeL": 0.2212,
317
+ "eval_rougeLsum": 0.3491,
318
+ "eval_runtime": 21.3026,
319
+ "eval_samples_per_second": 11.642,
320
+ "eval_steps_per_second": 0.188,
321
+ "step": 384
322
+ },
323
+ {
324
+ "epoch": 25.0,
325
+ "eval_gen_len": 600.0,
326
+ "eval_loss": 6.077059268951416,
327
+ "eval_rouge1": 0.4064,
328
+ "eval_rouge2": 0.1711,
329
+ "eval_rougeL": 0.2216,
330
+ "eval_rougeLsum": 0.3498,
331
+ "eval_runtime": 20.9702,
332
+ "eval_samples_per_second": 11.826,
333
+ "eval_steps_per_second": 0.191,
334
+ "step": 400
335
+ },
336
+ {
337
+ "epoch": 26.0,
338
+ "eval_gen_len": 600.0,
339
+ "eval_loss": 6.075596809387207,
340
+ "eval_rouge1": 0.4086,
341
+ "eval_rouge2": 0.1723,
342
+ "eval_rougeL": 0.223,
343
+ "eval_rougeLsum": 0.3517,
344
+ "eval_runtime": 21.1984,
345
+ "eval_samples_per_second": 11.699,
346
+ "eval_steps_per_second": 0.189,
347
+ "step": 416
348
+ },
349
+ {
350
+ "epoch": 27.0,
351
+ "eval_gen_len": 600.0,
352
+ "eval_loss": 6.075705528259277,
353
+ "eval_rouge1": 0.4075,
354
+ "eval_rouge2": 0.1719,
355
+ "eval_rougeL": 0.2224,
356
+ "eval_rougeLsum": 0.3509,
357
+ "eval_runtime": 20.7964,
358
+ "eval_samples_per_second": 11.925,
359
+ "eval_steps_per_second": 0.192,
360
+ "step": 432
361
+ },
362
+ {
363
+ "epoch": 28.0,
364
+ "eval_gen_len": 600.0,
365
+ "eval_loss": 6.075275421142578,
366
+ "eval_rouge1": 0.4081,
367
+ "eval_rouge2": 0.1722,
368
+ "eval_rougeL": 0.2224,
369
+ "eval_rougeLsum": 0.3509,
370
+ "eval_runtime": 20.9972,
371
+ "eval_samples_per_second": 11.811,
372
+ "eval_steps_per_second": 0.191,
373
+ "step": 448
374
+ },
375
+ {
376
+ "epoch": 29.0,
377
+ "eval_gen_len": 600.0,
378
+ "eval_loss": 6.076692581176758,
379
+ "eval_rouge1": 0.4132,
380
+ "eval_rouge2": 0.1751,
381
+ "eval_rougeL": 0.2258,
382
+ "eval_rougeLsum": 0.3553,
383
+ "eval_runtime": 21.0313,
384
+ "eval_samples_per_second": 11.792,
385
+ "eval_steps_per_second": 0.19,
386
+ "step": 464
387
+ },
388
+ {
389
+ "epoch": 30.0,
390
+ "eval_gen_len": 600.0,
391
+ "eval_loss": 6.075990676879883,
392
+ "eval_rouge1": 0.4108,
393
+ "eval_rouge2": 0.1737,
394
+ "eval_rougeL": 0.2242,
395
+ "eval_rougeLsum": 0.3533,
396
+ "eval_runtime": 20.714,
397
+ "eval_samples_per_second": 11.973,
398
+ "eval_steps_per_second": 0.193,
399
+ "step": 480
400
+ },
401
+ {
402
+ "epoch": 31.0,
403
+ "eval_gen_len": 600.0,
404
+ "eval_loss": 6.074672222137451,
405
+ "eval_rouge1": 0.4126,
406
+ "eval_rouge2": 0.1747,
407
+ "eval_rougeL": 0.2253,
408
+ "eval_rougeLsum": 0.3546,
409
+ "eval_runtime": 21.1511,
410
+ "eval_samples_per_second": 11.725,
411
+ "eval_steps_per_second": 0.189,
412
+ "step": 496
413
+ },
414
+ {
415
+ "epoch": 31.25,
416
+ "grad_norm": 237350.25,
417
+ "learning_rate": 2.1428571428571427e-06,
418
+ "loss": 6.1153,
419
+ "step": 500
420
+ },
421
+ {
422
+ "epoch": 32.0,
423
+ "eval_gen_len": 600.0,
424
+ "eval_loss": 6.076193809509277,
425
+ "eval_rouge1": 0.4119,
426
+ "eval_rouge2": 0.1744,
427
+ "eval_rougeL": 0.2248,
428
+ "eval_rougeLsum": 0.3541,
429
+ "eval_runtime": 20.5412,
430
+ "eval_samples_per_second": 12.073,
431
+ "eval_steps_per_second": 0.195,
432
+ "step": 512
433
+ },
434
+ {
435
+ "epoch": 33.0,
436
+ "eval_gen_len": 600.0,
437
+ "eval_loss": 6.074151039123535,
438
+ "eval_rouge1": 0.4123,
439
+ "eval_rouge2": 0.1746,
440
+ "eval_rougeL": 0.2251,
441
+ "eval_rougeLsum": 0.3545,
442
+ "eval_runtime": 21.0056,
443
+ "eval_samples_per_second": 11.806,
444
+ "eval_steps_per_second": 0.19,
445
+ "step": 528
446
+ },
447
+ {
448
+ "epoch": 34.0,
449
+ "eval_gen_len": 600.0,
450
+ "eval_loss": 6.076315879821777,
451
+ "eval_rouge1": 0.4114,
452
+ "eval_rouge2": 0.1741,
453
+ "eval_rougeL": 0.2246,
454
+ "eval_rougeLsum": 0.3537,
455
+ "eval_runtime": 20.6937,
456
+ "eval_samples_per_second": 11.984,
457
+ "eval_steps_per_second": 0.193,
458
+ "step": 544
459
+ },
460
+ {
461
+ "epoch": 35.0,
462
+ "eval_gen_len": 600.0,
463
+ "eval_loss": 6.0760498046875,
464
+ "eval_rouge1": 0.4119,
465
+ "eval_rouge2": 0.1744,
466
+ "eval_rougeL": 0.2249,
467
+ "eval_rougeLsum": 0.3541,
468
+ "eval_runtime": 21.0084,
469
+ "eval_samples_per_second": 11.805,
470
+ "eval_steps_per_second": 0.19,
471
+ "step": 560
472
+ }
473
+ ],
474
+ "logging_steps": 500,
475
+ "max_steps": 560,
476
+ "num_input_tokens_seen": 0,
477
+ "num_train_epochs": 35,
478
+ "save_steps": 500,
479
+ "total_flos": 9044623687680000.0,
480
+ "train_batch_size": 64,
481
+ "trial_name": null,
482
+ "trial_params": null
483
+ }
checkpoint-560/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ecd0faf5f9d90a3838ab34a2691a497d77a695ec8c1fb0490440a6eb10f007c
3
+ size 4795
checkpoint-560/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
runs/Mar26_17-35-02_lambda-hyperplane05/events.out.tfevents.1711492509.lambda-hyperplane05.53901.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:91921cbad614919bc3773363d05d2ba63368ac908d7a6ad7ec92fb62f0f1234f
3
- size 23062
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f056b542e8fde81694d3cc9f11c67a1abf413a1515fa480e87d6ee135fd23bfe
3
+ size 23941