Padlex commited on
Commit
3f70cd7
·
1 Parent(s): bf6f19e

Upload 30 files

Browse files
adapter_config.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model_name_or_path": "decapoda-research/llama-7b-hf",
3
+ "bias": "none",
4
+ "enable_lora": null,
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "lora_alpha": 16,
9
+ "lora_dropout": 0.05,
10
+ "merge_weights": false,
11
+ "modules_to_save": null,
12
+ "peft_type": "LORA",
13
+ "r": 8,
14
+ "target_modules": [
15
+ "q_proj",
16
+ "v_proj"
17
+ ],
18
+ "task_type": "CAUSAL_LM"
19
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64ff77e111d1f45fdc703693a5f15b246d40843847a42012084a0a93cfb8313b
3
+ size 16822989
checkpoint-1000/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78e180c5106dc4637c6e06bd3f6962dd7985c947741124c93fb60e3117950214
3
+ size 33661637
checkpoint-1000/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95f39502a58c7bb52c66dbd78964733626525e111f25b19157447678e4fd8489
3
+ size 16822989
checkpoint-1000/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4d3a419e3565914fb7d23e3a197bab0d29ab2c376b64718bd8d2652b6add445
3
+ size 14575
checkpoint-1000/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:636a1ea3193a7963286c6a67f3befe6fd39638b0c707fe4288acba215be4adc9
3
+ size 557
checkpoint-1000/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d631f2519aa4a7efbc127756b3d2412768d0d7f5c21107b521243789e389c0b2
3
+ size 627
checkpoint-1000/special_tokens_map.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<unk>",
5
+ "unk_token": {
6
+ "content": "",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ }
12
+ }
checkpoint-1000/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-1000/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "model_max_length": 1000000000000000019884624838656,
22
+ "pad_token": null,
23
+ "sp_model_kwargs": {},
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
checkpoint-1000/trainer_state.json ADDED
@@ -0,0 +1,632 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.50592440366745,
3
+ "best_model_checkpoint": "/content/My Drive/LLaMA-LoRA Tuner/lora_models/earthshine-nondecorative-2023-06-27-17-40-07/checkpoint-1000",
4
+ "epoch": 2.808988764044944,
5
+ "global_step": 1000,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.03,
12
+ "learning_rate": 2.3999999999999997e-05,
13
+ "loss": 2.0693,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.06,
18
+ "learning_rate": 5.399999999999999e-05,
19
+ "loss": 1.9946,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.08,
24
+ "learning_rate": 8.1e-05,
25
+ "loss": 1.8837,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.11,
30
+ "learning_rate": 0.00011099999999999999,
31
+ "loss": 1.6655,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.14,
36
+ "learning_rate": 0.00014099999999999998,
37
+ "loss": 1.6095,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.17,
42
+ "learning_rate": 0.00017099999999999998,
43
+ "loss": 1.3838,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.2,
48
+ "learning_rate": 0.000201,
49
+ "loss": 1.3548,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.22,
54
+ "learning_rate": 0.00023099999999999998,
55
+ "loss": 1.0789,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.25,
60
+ "learning_rate": 0.000261,
61
+ "loss": 1.0589,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.28,
66
+ "learning_rate": 0.00029099999999999997,
67
+ "loss": 1.0259,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.31,
72
+ "learning_rate": 0.000299393063583815,
73
+ "loss": 0.9936,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.34,
78
+ "learning_rate": 0.0002985260115606936,
79
+ "loss": 0.9633,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.37,
84
+ "learning_rate": 0.00029765895953757224,
85
+ "loss": 1.0018,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.39,
90
+ "learning_rate": 0.0002967919075144509,
91
+ "loss": 0.893,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.42,
96
+ "learning_rate": 0.00029592485549132945,
97
+ "loss": 0.8079,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.45,
102
+ "learning_rate": 0.0002950578034682081,
103
+ "loss": 0.8668,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.48,
108
+ "learning_rate": 0.00029419075144508666,
109
+ "loss": 0.773,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.51,
114
+ "learning_rate": 0.0002933236994219653,
115
+ "loss": 0.8139,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.53,
120
+ "learning_rate": 0.0002924566473988439,
121
+ "loss": 0.9139,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.56,
126
+ "learning_rate": 0.0002915895953757225,
127
+ "loss": 0.7371,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.59,
132
+ "learning_rate": 0.0002907225433526011,
133
+ "loss": 0.8604,
134
+ "step": 210
135
+ },
136
+ {
137
+ "epoch": 0.62,
138
+ "learning_rate": 0.0002898554913294797,
139
+ "loss": 0.8597,
140
+ "step": 220
141
+ },
142
+ {
143
+ "epoch": 0.65,
144
+ "learning_rate": 0.0002889884393063584,
145
+ "loss": 0.7954,
146
+ "step": 230
147
+ },
148
+ {
149
+ "epoch": 0.67,
150
+ "learning_rate": 0.00028812138728323696,
151
+ "loss": 0.7559,
152
+ "step": 240
153
+ },
154
+ {
155
+ "epoch": 0.7,
156
+ "learning_rate": 0.0002872543352601156,
157
+ "loss": 0.8488,
158
+ "step": 250
159
+ },
160
+ {
161
+ "epoch": 0.73,
162
+ "learning_rate": 0.00028638728323699417,
163
+ "loss": 0.7252,
164
+ "step": 260
165
+ },
166
+ {
167
+ "epoch": 0.76,
168
+ "learning_rate": 0.0002855202312138728,
169
+ "loss": 0.6751,
170
+ "step": 270
171
+ },
172
+ {
173
+ "epoch": 0.79,
174
+ "learning_rate": 0.00028465317919075143,
175
+ "loss": 0.7852,
176
+ "step": 280
177
+ },
178
+ {
179
+ "epoch": 0.81,
180
+ "learning_rate": 0.00028378612716763,
181
+ "loss": 0.7843,
182
+ "step": 290
183
+ },
184
+ {
185
+ "epoch": 0.84,
186
+ "learning_rate": 0.00028291907514450864,
187
+ "loss": 0.7886,
188
+ "step": 300
189
+ },
190
+ {
191
+ "epoch": 0.87,
192
+ "learning_rate": 0.00028205202312138727,
193
+ "loss": 0.7218,
194
+ "step": 310
195
+ },
196
+ {
197
+ "epoch": 0.9,
198
+ "learning_rate": 0.0002811849710982659,
199
+ "loss": 0.7919,
200
+ "step": 320
201
+ },
202
+ {
203
+ "epoch": 0.93,
204
+ "learning_rate": 0.0002803179190751445,
205
+ "loss": 0.7248,
206
+ "step": 330
207
+ },
208
+ {
209
+ "epoch": 0.96,
210
+ "learning_rate": 0.0002794508670520231,
211
+ "loss": 0.7699,
212
+ "step": 340
213
+ },
214
+ {
215
+ "epoch": 0.98,
216
+ "learning_rate": 0.00027858381502890174,
217
+ "loss": 0.6909,
218
+ "step": 350
219
+ },
220
+ {
221
+ "epoch": 1.01,
222
+ "learning_rate": 0.0002777167630057803,
223
+ "loss": 0.6992,
224
+ "step": 360
225
+ },
226
+ {
227
+ "epoch": 1.04,
228
+ "learning_rate": 0.00027684971098265894,
229
+ "loss": 0.6747,
230
+ "step": 370
231
+ },
232
+ {
233
+ "epoch": 1.07,
234
+ "learning_rate": 0.0002759826589595375,
235
+ "loss": 0.672,
236
+ "step": 380
237
+ },
238
+ {
239
+ "epoch": 1.1,
240
+ "learning_rate": 0.0002751156069364162,
241
+ "loss": 0.5997,
242
+ "step": 390
243
+ },
244
+ {
245
+ "epoch": 1.12,
246
+ "learning_rate": 0.0002742485549132948,
247
+ "loss": 0.747,
248
+ "step": 400
249
+ },
250
+ {
251
+ "epoch": 1.15,
252
+ "learning_rate": 0.0002733815028901734,
253
+ "loss": 0.7217,
254
+ "step": 410
255
+ },
256
+ {
257
+ "epoch": 1.18,
258
+ "learning_rate": 0.000272514450867052,
259
+ "loss": 0.683,
260
+ "step": 420
261
+ },
262
+ {
263
+ "epoch": 1.21,
264
+ "learning_rate": 0.0002716473988439306,
265
+ "loss": 0.63,
266
+ "step": 430
267
+ },
268
+ {
269
+ "epoch": 1.24,
270
+ "learning_rate": 0.00027078034682080925,
271
+ "loss": 0.6889,
272
+ "step": 440
273
+ },
274
+ {
275
+ "epoch": 1.26,
276
+ "learning_rate": 0.0002699132947976878,
277
+ "loss": 0.6582,
278
+ "step": 450
279
+ },
280
+ {
281
+ "epoch": 1.29,
282
+ "learning_rate": 0.00026904624277456645,
283
+ "loss": 0.6366,
284
+ "step": 460
285
+ },
286
+ {
287
+ "epoch": 1.32,
288
+ "learning_rate": 0.00026817919075144503,
289
+ "loss": 0.7249,
290
+ "step": 470
291
+ },
292
+ {
293
+ "epoch": 1.35,
294
+ "learning_rate": 0.0002673121387283237,
295
+ "loss": 0.53,
296
+ "step": 480
297
+ },
298
+ {
299
+ "epoch": 1.38,
300
+ "learning_rate": 0.0002664450867052023,
301
+ "loss": 0.6172,
302
+ "step": 490
303
+ },
304
+ {
305
+ "epoch": 1.4,
306
+ "learning_rate": 0.0002655780346820809,
307
+ "loss": 0.6033,
308
+ "step": 500
309
+ },
310
+ {
311
+ "epoch": 1.4,
312
+ "eval_loss": 0.5882205367088318,
313
+ "eval_runtime": 14.2208,
314
+ "eval_samples_per_second": 3.516,
315
+ "eval_steps_per_second": 0.492,
316
+ "step": 500
317
+ },
318
+ {
319
+ "epoch": 1.43,
320
+ "learning_rate": 0.0002647109826589595,
321
+ "loss": 0.7452,
322
+ "step": 510
323
+ },
324
+ {
325
+ "epoch": 1.46,
326
+ "learning_rate": 0.00026384393063583813,
327
+ "loss": 0.6689,
328
+ "step": 520
329
+ },
330
+ {
331
+ "epoch": 1.49,
332
+ "learning_rate": 0.00026297687861271676,
333
+ "loss": 0.6339,
334
+ "step": 530
335
+ },
336
+ {
337
+ "epoch": 1.52,
338
+ "learning_rate": 0.00026210982658959533,
339
+ "loss": 0.6538,
340
+ "step": 540
341
+ },
342
+ {
343
+ "epoch": 1.54,
344
+ "learning_rate": 0.00026124277456647397,
345
+ "loss": 0.6864,
346
+ "step": 550
347
+ },
348
+ {
349
+ "epoch": 1.57,
350
+ "learning_rate": 0.0002603757225433526,
351
+ "loss": 0.6466,
352
+ "step": 560
353
+ },
354
+ {
355
+ "epoch": 1.6,
356
+ "learning_rate": 0.00025950867052023117,
357
+ "loss": 0.6186,
358
+ "step": 570
359
+ },
360
+ {
361
+ "epoch": 1.63,
362
+ "learning_rate": 0.0002586416184971098,
363
+ "loss": 0.6872,
364
+ "step": 580
365
+ },
366
+ {
367
+ "epoch": 1.66,
368
+ "learning_rate": 0.00025777456647398843,
369
+ "loss": 0.7206,
370
+ "step": 590
371
+ },
372
+ {
373
+ "epoch": 1.69,
374
+ "learning_rate": 0.000256907514450867,
375
+ "loss": 0.5848,
376
+ "step": 600
377
+ },
378
+ {
379
+ "epoch": 1.71,
380
+ "learning_rate": 0.00025604046242774564,
381
+ "loss": 0.711,
382
+ "step": 610
383
+ },
384
+ {
385
+ "epoch": 1.74,
386
+ "learning_rate": 0.00025517341040462427,
387
+ "loss": 0.628,
388
+ "step": 620
389
+ },
390
+ {
391
+ "epoch": 1.77,
392
+ "learning_rate": 0.00025430635838150285,
393
+ "loss": 0.7035,
394
+ "step": 630
395
+ },
396
+ {
397
+ "epoch": 1.8,
398
+ "learning_rate": 0.0002534393063583815,
399
+ "loss": 0.5965,
400
+ "step": 640
401
+ },
402
+ {
403
+ "epoch": 1.83,
404
+ "learning_rate": 0.0002525722543352601,
405
+ "loss": 0.6486,
406
+ "step": 650
407
+ },
408
+ {
409
+ "epoch": 1.85,
410
+ "learning_rate": 0.0002517052023121387,
411
+ "loss": 0.6264,
412
+ "step": 660
413
+ },
414
+ {
415
+ "epoch": 1.88,
416
+ "learning_rate": 0.0002508381502890173,
417
+ "loss": 0.6469,
418
+ "step": 670
419
+ },
420
+ {
421
+ "epoch": 1.91,
422
+ "learning_rate": 0.00024997109826589595,
423
+ "loss": 0.61,
424
+ "step": 680
425
+ },
426
+ {
427
+ "epoch": 1.94,
428
+ "learning_rate": 0.0002491040462427746,
429
+ "loss": 0.6664,
430
+ "step": 690
431
+ },
432
+ {
433
+ "epoch": 1.97,
434
+ "learning_rate": 0.00024823699421965315,
435
+ "loss": 0.5312,
436
+ "step": 700
437
+ },
438
+ {
439
+ "epoch": 1.99,
440
+ "learning_rate": 0.0002473699421965318,
441
+ "loss": 0.4703,
442
+ "step": 710
443
+ },
444
+ {
445
+ "epoch": 2.02,
446
+ "learning_rate": 0.00024650289017341036,
447
+ "loss": 0.535,
448
+ "step": 720
449
+ },
450
+ {
451
+ "epoch": 2.05,
452
+ "learning_rate": 0.000245635838150289,
453
+ "loss": 0.6568,
454
+ "step": 730
455
+ },
456
+ {
457
+ "epoch": 2.08,
458
+ "learning_rate": 0.0002447687861271676,
459
+ "loss": 0.6303,
460
+ "step": 740
461
+ },
462
+ {
463
+ "epoch": 2.11,
464
+ "learning_rate": 0.00024390173410404622,
465
+ "loss": 0.4911,
466
+ "step": 750
467
+ },
468
+ {
469
+ "epoch": 2.13,
470
+ "learning_rate": 0.00024303468208092483,
471
+ "loss": 0.5043,
472
+ "step": 760
473
+ },
474
+ {
475
+ "epoch": 2.16,
476
+ "learning_rate": 0.00024216763005780346,
477
+ "loss": 0.5248,
478
+ "step": 770
479
+ },
480
+ {
481
+ "epoch": 2.19,
482
+ "learning_rate": 0.00024130057803468206,
483
+ "loss": 0.6274,
484
+ "step": 780
485
+ },
486
+ {
487
+ "epoch": 2.22,
488
+ "learning_rate": 0.00024043352601156066,
489
+ "loss": 0.5617,
490
+ "step": 790
491
+ },
492
+ {
493
+ "epoch": 2.25,
494
+ "learning_rate": 0.00023956647398843927,
495
+ "loss": 0.5978,
496
+ "step": 800
497
+ },
498
+ {
499
+ "epoch": 2.28,
500
+ "learning_rate": 0.00023869942196531787,
501
+ "loss": 0.6027,
502
+ "step": 810
503
+ },
504
+ {
505
+ "epoch": 2.3,
506
+ "learning_rate": 0.00023783236994219653,
507
+ "loss": 0.5925,
508
+ "step": 820
509
+ },
510
+ {
511
+ "epoch": 2.33,
512
+ "learning_rate": 0.00023696531791907513,
513
+ "loss": 0.593,
514
+ "step": 830
515
+ },
516
+ {
517
+ "epoch": 2.36,
518
+ "learning_rate": 0.00023609826589595373,
519
+ "loss": 0.6181,
520
+ "step": 840
521
+ },
522
+ {
523
+ "epoch": 2.39,
524
+ "learning_rate": 0.00023523121387283234,
525
+ "loss": 0.4752,
526
+ "step": 850
527
+ },
528
+ {
529
+ "epoch": 2.42,
530
+ "learning_rate": 0.00023436416184971097,
531
+ "loss": 0.655,
532
+ "step": 860
533
+ },
534
+ {
535
+ "epoch": 2.44,
536
+ "learning_rate": 0.00023349710982658957,
537
+ "loss": 0.5577,
538
+ "step": 870
539
+ },
540
+ {
541
+ "epoch": 2.47,
542
+ "learning_rate": 0.00023263005780346818,
543
+ "loss": 0.6132,
544
+ "step": 880
545
+ },
546
+ {
547
+ "epoch": 2.5,
548
+ "learning_rate": 0.00023176300578034678,
549
+ "loss": 0.5003,
550
+ "step": 890
551
+ },
552
+ {
553
+ "epoch": 2.53,
554
+ "learning_rate": 0.00023089595375722544,
555
+ "loss": 0.5323,
556
+ "step": 900
557
+ },
558
+ {
559
+ "epoch": 2.56,
560
+ "learning_rate": 0.00023002890173410404,
561
+ "loss": 0.5908,
562
+ "step": 910
563
+ },
564
+ {
565
+ "epoch": 2.58,
566
+ "learning_rate": 0.00022916184971098264,
567
+ "loss": 0.5911,
568
+ "step": 920
569
+ },
570
+ {
571
+ "epoch": 2.61,
572
+ "learning_rate": 0.00022829479768786125,
573
+ "loss": 0.565,
574
+ "step": 930
575
+ },
576
+ {
577
+ "epoch": 2.64,
578
+ "learning_rate": 0.00022742774566473988,
579
+ "loss": 0.5789,
580
+ "step": 940
581
+ },
582
+ {
583
+ "epoch": 2.67,
584
+ "learning_rate": 0.00022656069364161848,
585
+ "loss": 0.5242,
586
+ "step": 950
587
+ },
588
+ {
589
+ "epoch": 2.7,
590
+ "learning_rate": 0.00022569364161849708,
591
+ "loss": 0.5082,
592
+ "step": 960
593
+ },
594
+ {
595
+ "epoch": 2.72,
596
+ "learning_rate": 0.0002248265895953757,
597
+ "loss": 0.5184,
598
+ "step": 970
599
+ },
600
+ {
601
+ "epoch": 2.75,
602
+ "learning_rate": 0.0002239595375722543,
603
+ "loss": 0.6131,
604
+ "step": 980
605
+ },
606
+ {
607
+ "epoch": 2.78,
608
+ "learning_rate": 0.00022309248554913295,
609
+ "loss": 0.5129,
610
+ "step": 990
611
+ },
612
+ {
613
+ "epoch": 2.81,
614
+ "learning_rate": 0.00022222543352601155,
615
+ "loss": 0.6016,
616
+ "step": 1000
617
+ },
618
+ {
619
+ "epoch": 2.81,
620
+ "eval_loss": 0.50592440366745,
621
+ "eval_runtime": 14.2225,
622
+ "eval_samples_per_second": 3.516,
623
+ "eval_steps_per_second": 0.492,
624
+ "step": 1000
625
+ }
626
+ ],
627
+ "max_steps": 3560,
628
+ "num_train_epochs": 10,
629
+ "total_flos": 2.1070675154141184e+17,
630
+ "trial_name": null,
631
+ "trial_params": null
632
+ }
checkpoint-1000/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:192bb8c013ae662fb0d4863dbc3254df0fbc2d1dc1eda37c4f9a2ac04b123a99
3
+ size 3771
checkpoint-500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aa46925987f162d4d15ec6a1a7894299219eeb165ab66207453775a0e251a2ea
3
+ size 33661637
checkpoint-500/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:661bba0eef7d213a66caca9ed4d6c3a10d5ac4f849ec6fd0d298602f4fc52a71
3
+ size 16822989
checkpoint-500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:80374c523339cbada5653606c3f391bc8649fd8866d5fea9a3587097101c165c
3
+ size 14575
checkpoint-500/scaler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0d73e960e0c59a3ccac82210aaa3abcd9e00f7d9ab5c7ba39ef8751e5c0ee4b
3
+ size 557
checkpoint-500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5c26271f5f8a5d30d9a855ee399a8c85768b7490a42865ed931b650844878c5
3
+ size 627
checkpoint-500/special_tokens_map.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "<s>",
3
+ "eos_token": "</s>",
4
+ "pad_token": "<unk>",
5
+ "unk_token": {
6
+ "content": "",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ }
12
+ }
checkpoint-500/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
checkpoint-500/tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "",
16
+ "lstrip": false,
17
+ "normalized": true,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "model_max_length": 1000000000000000019884624838656,
22
+ "pad_token": null,
23
+ "sp_model_kwargs": {},
24
+ "tokenizer_class": "LlamaTokenizer",
25
+ "unk_token": {
26
+ "__type": "AddedToken",
27
+ "content": "",
28
+ "lstrip": false,
29
+ "normalized": true,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
checkpoint-500/trainer_state.json ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5882205367088318,
3
+ "best_model_checkpoint": "/content/My Drive/LLaMA-LoRA Tuner/lora_models/earthshine-nondecorative-2023-06-27-17-40-07/checkpoint-500",
4
+ "epoch": 1.404494382022472,
5
+ "global_step": 500,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.03,
12
+ "learning_rate": 2.3999999999999997e-05,
13
+ "loss": 2.0693,
14
+ "step": 10
15
+ },
16
+ {
17
+ "epoch": 0.06,
18
+ "learning_rate": 5.399999999999999e-05,
19
+ "loss": 1.9946,
20
+ "step": 20
21
+ },
22
+ {
23
+ "epoch": 0.08,
24
+ "learning_rate": 8.1e-05,
25
+ "loss": 1.8837,
26
+ "step": 30
27
+ },
28
+ {
29
+ "epoch": 0.11,
30
+ "learning_rate": 0.00011099999999999999,
31
+ "loss": 1.6655,
32
+ "step": 40
33
+ },
34
+ {
35
+ "epoch": 0.14,
36
+ "learning_rate": 0.00014099999999999998,
37
+ "loss": 1.6095,
38
+ "step": 50
39
+ },
40
+ {
41
+ "epoch": 0.17,
42
+ "learning_rate": 0.00017099999999999998,
43
+ "loss": 1.3838,
44
+ "step": 60
45
+ },
46
+ {
47
+ "epoch": 0.2,
48
+ "learning_rate": 0.000201,
49
+ "loss": 1.3548,
50
+ "step": 70
51
+ },
52
+ {
53
+ "epoch": 0.22,
54
+ "learning_rate": 0.00023099999999999998,
55
+ "loss": 1.0789,
56
+ "step": 80
57
+ },
58
+ {
59
+ "epoch": 0.25,
60
+ "learning_rate": 0.000261,
61
+ "loss": 1.0589,
62
+ "step": 90
63
+ },
64
+ {
65
+ "epoch": 0.28,
66
+ "learning_rate": 0.00029099999999999997,
67
+ "loss": 1.0259,
68
+ "step": 100
69
+ },
70
+ {
71
+ "epoch": 0.31,
72
+ "learning_rate": 0.000299393063583815,
73
+ "loss": 0.9936,
74
+ "step": 110
75
+ },
76
+ {
77
+ "epoch": 0.34,
78
+ "learning_rate": 0.0002985260115606936,
79
+ "loss": 0.9633,
80
+ "step": 120
81
+ },
82
+ {
83
+ "epoch": 0.37,
84
+ "learning_rate": 0.00029765895953757224,
85
+ "loss": 1.0018,
86
+ "step": 130
87
+ },
88
+ {
89
+ "epoch": 0.39,
90
+ "learning_rate": 0.0002967919075144509,
91
+ "loss": 0.893,
92
+ "step": 140
93
+ },
94
+ {
95
+ "epoch": 0.42,
96
+ "learning_rate": 0.00029592485549132945,
97
+ "loss": 0.8079,
98
+ "step": 150
99
+ },
100
+ {
101
+ "epoch": 0.45,
102
+ "learning_rate": 0.0002950578034682081,
103
+ "loss": 0.8668,
104
+ "step": 160
105
+ },
106
+ {
107
+ "epoch": 0.48,
108
+ "learning_rate": 0.00029419075144508666,
109
+ "loss": 0.773,
110
+ "step": 170
111
+ },
112
+ {
113
+ "epoch": 0.51,
114
+ "learning_rate": 0.0002933236994219653,
115
+ "loss": 0.8139,
116
+ "step": 180
117
+ },
118
+ {
119
+ "epoch": 0.53,
120
+ "learning_rate": 0.0002924566473988439,
121
+ "loss": 0.9139,
122
+ "step": 190
123
+ },
124
+ {
125
+ "epoch": 0.56,
126
+ "learning_rate": 0.0002915895953757225,
127
+ "loss": 0.7371,
128
+ "step": 200
129
+ },
130
+ {
131
+ "epoch": 0.59,
132
+ "learning_rate": 0.0002907225433526011,
133
+ "loss": 0.8604,
134
+ "step": 210
135
+ },
136
+ {
137
+ "epoch": 0.62,
138
+ "learning_rate": 0.0002898554913294797,
139
+ "loss": 0.8597,
140
+ "step": 220
141
+ },
142
+ {
143
+ "epoch": 0.65,
144
+ "learning_rate": 0.0002889884393063584,
145
+ "loss": 0.7954,
146
+ "step": 230
147
+ },
148
+ {
149
+ "epoch": 0.67,
150
+ "learning_rate": 0.00028812138728323696,
151
+ "loss": 0.7559,
152
+ "step": 240
153
+ },
154
+ {
155
+ "epoch": 0.7,
156
+ "learning_rate": 0.0002872543352601156,
157
+ "loss": 0.8488,
158
+ "step": 250
159
+ },
160
+ {
161
+ "epoch": 0.73,
162
+ "learning_rate": 0.00028638728323699417,
163
+ "loss": 0.7252,
164
+ "step": 260
165
+ },
166
+ {
167
+ "epoch": 0.76,
168
+ "learning_rate": 0.0002855202312138728,
169
+ "loss": 0.6751,
170
+ "step": 270
171
+ },
172
+ {
173
+ "epoch": 0.79,
174
+ "learning_rate": 0.00028465317919075143,
175
+ "loss": 0.7852,
176
+ "step": 280
177
+ },
178
+ {
179
+ "epoch": 0.81,
180
+ "learning_rate": 0.00028378612716763,
181
+ "loss": 0.7843,
182
+ "step": 290
183
+ },
184
+ {
185
+ "epoch": 0.84,
186
+ "learning_rate": 0.00028291907514450864,
187
+ "loss": 0.7886,
188
+ "step": 300
189
+ },
190
+ {
191
+ "epoch": 0.87,
192
+ "learning_rate": 0.00028205202312138727,
193
+ "loss": 0.7218,
194
+ "step": 310
195
+ },
196
+ {
197
+ "epoch": 0.9,
198
+ "learning_rate": 0.0002811849710982659,
199
+ "loss": 0.7919,
200
+ "step": 320
201
+ },
202
+ {
203
+ "epoch": 0.93,
204
+ "learning_rate": 0.0002803179190751445,
205
+ "loss": 0.7248,
206
+ "step": 330
207
+ },
208
+ {
209
+ "epoch": 0.96,
210
+ "learning_rate": 0.0002794508670520231,
211
+ "loss": 0.7699,
212
+ "step": 340
213
+ },
214
+ {
215
+ "epoch": 0.98,
216
+ "learning_rate": 0.00027858381502890174,
217
+ "loss": 0.6909,
218
+ "step": 350
219
+ },
220
+ {
221
+ "epoch": 1.01,
222
+ "learning_rate": 0.0002777167630057803,
223
+ "loss": 0.6992,
224
+ "step": 360
225
+ },
226
+ {
227
+ "epoch": 1.04,
228
+ "learning_rate": 0.00027684971098265894,
229
+ "loss": 0.6747,
230
+ "step": 370
231
+ },
232
+ {
233
+ "epoch": 1.07,
234
+ "learning_rate": 0.0002759826589595375,
235
+ "loss": 0.672,
236
+ "step": 380
237
+ },
238
+ {
239
+ "epoch": 1.1,
240
+ "learning_rate": 0.0002751156069364162,
241
+ "loss": 0.5997,
242
+ "step": 390
243
+ },
244
+ {
245
+ "epoch": 1.12,
246
+ "learning_rate": 0.0002742485549132948,
247
+ "loss": 0.747,
248
+ "step": 400
249
+ },
250
+ {
251
+ "epoch": 1.15,
252
+ "learning_rate": 0.0002733815028901734,
253
+ "loss": 0.7217,
254
+ "step": 410
255
+ },
256
+ {
257
+ "epoch": 1.18,
258
+ "learning_rate": 0.000272514450867052,
259
+ "loss": 0.683,
260
+ "step": 420
261
+ },
262
+ {
263
+ "epoch": 1.21,
264
+ "learning_rate": 0.0002716473988439306,
265
+ "loss": 0.63,
266
+ "step": 430
267
+ },
268
+ {
269
+ "epoch": 1.24,
270
+ "learning_rate": 0.00027078034682080925,
271
+ "loss": 0.6889,
272
+ "step": 440
273
+ },
274
+ {
275
+ "epoch": 1.26,
276
+ "learning_rate": 0.0002699132947976878,
277
+ "loss": 0.6582,
278
+ "step": 450
279
+ },
280
+ {
281
+ "epoch": 1.29,
282
+ "learning_rate": 0.00026904624277456645,
283
+ "loss": 0.6366,
284
+ "step": 460
285
+ },
286
+ {
287
+ "epoch": 1.32,
288
+ "learning_rate": 0.00026817919075144503,
289
+ "loss": 0.7249,
290
+ "step": 470
291
+ },
292
+ {
293
+ "epoch": 1.35,
294
+ "learning_rate": 0.0002673121387283237,
295
+ "loss": 0.53,
296
+ "step": 480
297
+ },
298
+ {
299
+ "epoch": 1.38,
300
+ "learning_rate": 0.0002664450867052023,
301
+ "loss": 0.6172,
302
+ "step": 490
303
+ },
304
+ {
305
+ "epoch": 1.4,
306
+ "learning_rate": 0.0002655780346820809,
307
+ "loss": 0.6033,
308
+ "step": 500
309
+ },
310
+ {
311
+ "epoch": 1.4,
312
+ "eval_loss": 0.5882205367088318,
313
+ "eval_runtime": 14.2208,
314
+ "eval_samples_per_second": 3.516,
315
+ "eval_steps_per_second": 0.492,
316
+ "step": 500
317
+ }
318
+ ],
319
+ "max_steps": 3560,
320
+ "num_train_epochs": 10,
321
+ "total_flos": 1.0535163032469504e+17,
322
+ "trial_name": null,
323
+ "trial_params": null
324
+ }
checkpoint-500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:192bb8c013ae662fb0d4863dbc3254df0fbc2d1dc1eda37c4f9a2ac04b123a99
3
+ size 3771
finetune_args.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "micro_batch_size": 4,
3
+ "gradient_accumulation_steps": 1,
4
+ "num_train_epochs": 10,
5
+ "learning_rate": 0.0003,
6
+ "cutoff_len": 2048,
7
+ "val_set_size": 50,
8
+ "lora_r": 8,
9
+ "lora_alpha": 16,
10
+ "lora_dropout": 0.05,
11
+ "lora_target_modules": [
12
+ "q_proj",
13
+ "v_proj"
14
+ ],
15
+ "lora_modules_to_save": [],
16
+ "train_on_inputs": true,
17
+ "group_by_length": false,
18
+ "load_in_8bit": true,
19
+ "fp16": true,
20
+ "bf16": false,
21
+ "gradient_checkpointing": false,
22
+ "save_steps": 500,
23
+ "save_total_limit": 5,
24
+ "logging_steps": 10,
25
+ "additional_training_arguments": null,
26
+ "additional_lora_config": null
27
+ }
info.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "base_model": "decapoda-research/llama-7b-hf",
3
+ "prompt_template": "alpaca",
4
+ "dataset_name": "N/A (from text input)",
5
+ "dataset_rows": 1473,
6
+ "trained_on_machine": "04e3f719f875",
7
+ "timestamp": 1687887738.143719,
8
+ "tuner_version": "f4254216"
9
+ }
runs/Jun27_17-42-45_04e3f719f875/1687887765.9930556/events.out.tfevents.1687887765.04e3f719f875.4175.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59cd0c7b15630c24e21d8875a07f94feb3f09a7c7a2dce48987feb914d9c2def
3
+ size 6109
runs/Jun27_17-42-45_04e3f719f875/events.out.tfevents.1687887765.04e3f719f875.4175.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d8cfa583b47c9b31f7c856b7919355df0e4cab0434410bbec2a99ea8d839cd53
3
+ size 21356
train_data_samples.json ADDED
The diff for this file is too large to render. See raw diff
 
train_output.json ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ 1041,
3
+ 0.7449809693580173,
4
+ {
5
+ "train_runtime": 3194.7197,
6
+ "train_samples_per_second": 4.454,
7
+ "train_steps_per_second": 1.114,
8
+ "total_flos": 2.2017658953498624e+17,
9
+ "train_loss": 0.7449809693580173,
10
+ "epoch": 2.92
11
+ }
12
+ ]
trainer_args.json ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "output_dir": "/content/My Drive/LLaMA-LoRA Tuner/lora_models/earthshine-nondecorative-2023-06-27-17-40-07",
3
+ "overwrite_output_dir": false,
4
+ "do_train": false,
5
+ "do_eval": true,
6
+ "do_predict": false,
7
+ "evaluation_strategy": "steps",
8
+ "prediction_loss_only": false,
9
+ "per_device_train_batch_size": 4,
10
+ "per_device_eval_batch_size": 8,
11
+ "per_gpu_train_batch_size": null,
12
+ "per_gpu_eval_batch_size": null,
13
+ "gradient_accumulation_steps": 1,
14
+ "eval_accumulation_steps": null,
15
+ "eval_delay": 0,
16
+ "learning_rate": 0.0003,
17
+ "weight_decay": 0.0,
18
+ "adam_beta1": 0.9,
19
+ "adam_beta2": 0.999,
20
+ "adam_epsilon": 1e-08,
21
+ "max_grad_norm": 1.0,
22
+ "num_train_epochs": 10,
23
+ "max_steps": -1,
24
+ "lr_scheduler_type": "linear",
25
+ "warmup_ratio": 0.0,
26
+ "warmup_steps": 100,
27
+ "log_level": "passive",
28
+ "log_level_replica": "warning",
29
+ "log_on_each_node": true,
30
+ "logging_dir": "/content/My Drive/LLaMA-LoRA Tuner/lora_models/earthshine-nondecorative-2023-06-27-17-40-07/runs/Jun27_17-42-45_04e3f719f875",
31
+ "logging_strategy": "steps",
32
+ "logging_first_step": false,
33
+ "logging_steps": 10,
34
+ "logging_nan_inf_filter": true,
35
+ "save_strategy": "steps",
36
+ "save_steps": 500,
37
+ "save_total_limit": 5,
38
+ "save_safetensors": false,
39
+ "save_on_each_node": false,
40
+ "no_cuda": false,
41
+ "use_mps_device": false,
42
+ "seed": 42,
43
+ "data_seed": null,
44
+ "jit_mode_eval": false,
45
+ "use_ipex": false,
46
+ "bf16": false,
47
+ "fp16": true,
48
+ "fp16_opt_level": "O1",
49
+ "half_precision_backend": "cuda_amp",
50
+ "bf16_full_eval": false,
51
+ "fp16_full_eval": false,
52
+ "tf32": null,
53
+ "local_rank": -1,
54
+ "xpu_backend": null,
55
+ "tpu_num_cores": null,
56
+ "tpu_metrics_debug": false,
57
+ "debug": [],
58
+ "dataloader_drop_last": false,
59
+ "eval_steps": 500,
60
+ "dataloader_num_workers": 0,
61
+ "past_index": -1,
62
+ "run_name": "/content/My Drive/LLaMA-LoRA Tuner/lora_models/earthshine-nondecorative-2023-06-27-17-40-07",
63
+ "disable_tqdm": false,
64
+ "remove_unused_columns": true,
65
+ "label_names": null,
66
+ "load_best_model_at_end": true,
67
+ "metric_for_best_model": "loss",
68
+ "greater_is_better": false,
69
+ "ignore_data_skip": false,
70
+ "sharded_ddp": [],
71
+ "fsdp": [],
72
+ "fsdp_min_num_params": 0,
73
+ "fsdp_config": {
74
+ "fsdp_min_num_params": 0,
75
+ "xla": false,
76
+ "xla_fsdp_grad_ckpt": false
77
+ },
78
+ "fsdp_transformer_layer_cls_to_wrap": null,
79
+ "deepspeed": null,
80
+ "label_smoothing_factor": 0.0,
81
+ "optim": "adamw_torch",
82
+ "optim_args": null,
83
+ "adafactor": false,
84
+ "group_by_length": false,
85
+ "length_column_name": "length",
86
+ "report_to": [
87
+ "tensorboard"
88
+ ],
89
+ "ddp_find_unused_parameters": null,
90
+ "ddp_bucket_cap_mb": null,
91
+ "dataloader_pin_memory": true,
92
+ "skip_memory_metrics": true,
93
+ "use_legacy_prediction_loop": false,
94
+ "push_to_hub": false,
95
+ "resume_from_checkpoint": null,
96
+ "hub_model_id": null,
97
+ "hub_strategy": "every_save",
98
+ "hub_token": "<HUB_TOKEN>",
99
+ "hub_private_repo": false,
100
+ "gradient_checkpointing": false,
101
+ "include_inputs_for_metrics": false,
102
+ "fp16_backend": "auto",
103
+ "push_to_hub_model_id": null,
104
+ "push_to_hub_organization": null,
105
+ "push_to_hub_token": "<PUSH_TO_HUB_TOKEN>",
106
+ "mp_parameters": "",
107
+ "auto_find_batch_size": false,
108
+ "full_determinism": false,
109
+ "torchdynamo": null,
110
+ "ray_scope": "last",
111
+ "ddp_timeout": 1800,
112
+ "torch_compile": false,
113
+ "torch_compile_backend": null,
114
+ "torch_compile_mode": null
115
+ }
trainer_log_history.jsonl ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"loss": 2.0693, "learning_rate": 2.3999999999999997e-05, "epoch": 0.03, "step": 10}
2
+ {"loss": 1.9946, "learning_rate": 5.399999999999999e-05, "epoch": 0.06, "step": 20}
3
+ {"loss": 1.8837, "learning_rate": 8.1e-05, "epoch": 0.08, "step": 30}
4
+ {"loss": 1.6655, "learning_rate": 0.00011099999999999999, "epoch": 0.11, "step": 40}
5
+ {"loss": 1.6095, "learning_rate": 0.00014099999999999998, "epoch": 0.14, "step": 50}
6
+ {"loss": 1.3838, "learning_rate": 0.00017099999999999998, "epoch": 0.17, "step": 60}
7
+ {"loss": 1.3548, "learning_rate": 0.000201, "epoch": 0.2, "step": 70}
8
+ {"loss": 1.0789, "learning_rate": 0.00023099999999999998, "epoch": 0.22, "step": 80}
9
+ {"loss": 1.0589, "learning_rate": 0.000261, "epoch": 0.25, "step": 90}
10
+ {"loss": 1.0259, "learning_rate": 0.00029099999999999997, "epoch": 0.28, "step": 100}
11
+ {"loss": 0.9936, "learning_rate": 0.000299393063583815, "epoch": 0.31, "step": 110}
12
+ {"loss": 0.9633, "learning_rate": 0.0002985260115606936, "epoch": 0.34, "step": 120}
13
+ {"loss": 1.0018, "learning_rate": 0.00029765895953757224, "epoch": 0.37, "step": 130}
14
+ {"loss": 0.893, "learning_rate": 0.0002967919075144509, "epoch": 0.39, "step": 140}
15
+ {"loss": 0.8079, "learning_rate": 0.00029592485549132945, "epoch": 0.42, "step": 150}
16
+ {"loss": 0.8668, "learning_rate": 0.0002950578034682081, "epoch": 0.45, "step": 160}
17
+ {"loss": 0.773, "learning_rate": 0.00029419075144508666, "epoch": 0.48, "step": 170}
18
+ {"loss": 0.8139, "learning_rate": 0.0002933236994219653, "epoch": 0.51, "step": 180}
19
+ {"loss": 0.9139, "learning_rate": 0.0002924566473988439, "epoch": 0.53, "step": 190}
20
+ {"loss": 0.7371, "learning_rate": 0.0002915895953757225, "epoch": 0.56, "step": 200}
21
+ {"loss": 0.8604, "learning_rate": 0.0002907225433526011, "epoch": 0.59, "step": 210}
22
+ {"loss": 0.8597, "learning_rate": 0.0002898554913294797, "epoch": 0.62, "step": 220}
23
+ {"loss": 0.7954, "learning_rate": 0.0002889884393063584, "epoch": 0.65, "step": 230}
24
+ {"loss": 0.7559, "learning_rate": 0.00028812138728323696, "epoch": 0.67, "step": 240}
25
+ {"loss": 0.8488, "learning_rate": 0.0002872543352601156, "epoch": 0.7, "step": 250}
26
+ {"loss": 0.7252, "learning_rate": 0.00028638728323699417, "epoch": 0.73, "step": 260}
27
+ {"loss": 0.6751, "learning_rate": 0.0002855202312138728, "epoch": 0.76, "step": 270}
28
+ {"loss": 0.7852, "learning_rate": 0.00028465317919075143, "epoch": 0.79, "step": 280}
29
+ {"loss": 0.7843, "learning_rate": 0.00028378612716763, "epoch": 0.81, "step": 290}
30
+ {"loss": 0.7886, "learning_rate": 0.00028291907514450864, "epoch": 0.84, "step": 300}
31
+ {"loss": 0.7218, "learning_rate": 0.00028205202312138727, "epoch": 0.87, "step": 310}
32
+ {"loss": 0.7919, "learning_rate": 0.0002811849710982659, "epoch": 0.9, "step": 320}
33
+ {"loss": 0.7248, "learning_rate": 0.0002803179190751445, "epoch": 0.93, "step": 330}
34
+ {"loss": 0.7699, "learning_rate": 0.0002794508670520231, "epoch": 0.96, "step": 340}
35
+ {"loss": 0.6909, "learning_rate": 0.00027858381502890174, "epoch": 0.98, "step": 350}
36
+ {"loss": 0.6992, "learning_rate": 0.0002777167630057803, "epoch": 1.01, "step": 360}
37
+ {"loss": 0.6747, "learning_rate": 0.00027684971098265894, "epoch": 1.04, "step": 370}
38
+ {"loss": 0.672, "learning_rate": 0.0002759826589595375, "epoch": 1.07, "step": 380}
39
+ {"loss": 0.5997, "learning_rate": 0.0002751156069364162, "epoch": 1.1, "step": 390}
40
+ {"loss": 0.747, "learning_rate": 0.0002742485549132948, "epoch": 1.12, "step": 400}
41
+ {"loss": 0.7217, "learning_rate": 0.0002733815028901734, "epoch": 1.15, "step": 410}
42
+ {"loss": 0.683, "learning_rate": 0.000272514450867052, "epoch": 1.18, "step": 420}
43
+ {"loss": 0.63, "learning_rate": 0.0002716473988439306, "epoch": 1.21, "step": 430}
44
+ {"loss": 0.6889, "learning_rate": 0.00027078034682080925, "epoch": 1.24, "step": 440}
45
+ {"loss": 0.6582, "learning_rate": 0.0002699132947976878, "epoch": 1.26, "step": 450}
46
+ {"loss": 0.6366, "learning_rate": 0.00026904624277456645, "epoch": 1.29, "step": 460}
47
+ {"loss": 0.7249, "learning_rate": 0.00026817919075144503, "epoch": 1.32, "step": 470}
48
+ {"loss": 0.53, "learning_rate": 0.0002673121387283237, "epoch": 1.35, "step": 480}
49
+ {"loss": 0.6172, "learning_rate": 0.0002664450867052023, "epoch": 1.38, "step": 490}
50
+ {"loss": 0.6033, "learning_rate": 0.0002655780346820809, "epoch": 1.4, "step": 500}
51
+ {"eval_loss": 0.5882205367088318, "eval_runtime": 14.2208, "eval_samples_per_second": 3.516, "eval_steps_per_second": 0.492, "epoch": 1.4, "step": 500}
52
+ {"loss": 0.7452, "learning_rate": 0.0002647109826589595, "epoch": 1.43, "step": 510}
53
+ {"loss": 0.6689, "learning_rate": 0.00026384393063583813, "epoch": 1.46, "step": 520}
54
+ {"loss": 0.6339, "learning_rate": 0.00026297687861271676, "epoch": 1.49, "step": 530}
55
+ {"loss": 0.6538, "learning_rate": 0.00026210982658959533, "epoch": 1.52, "step": 540}
56
+ {"loss": 0.6864, "learning_rate": 0.00026124277456647397, "epoch": 1.54, "step": 550}
57
+ {"loss": 0.6466, "learning_rate": 0.0002603757225433526, "epoch": 1.57, "step": 560}
58
+ {"loss": 0.6186, "learning_rate": 0.00025950867052023117, "epoch": 1.6, "step": 570}
59
+ {"loss": 0.6872, "learning_rate": 0.0002586416184971098, "epoch": 1.63, "step": 580}
60
+ {"loss": 0.7206, "learning_rate": 0.00025777456647398843, "epoch": 1.66, "step": 590}
61
+ {"loss": 0.5848, "learning_rate": 0.000256907514450867, "epoch": 1.69, "step": 600}
62
+ {"loss": 0.711, "learning_rate": 0.00025604046242774564, "epoch": 1.71, "step": 610}
63
+ {"loss": 0.628, "learning_rate": 0.00025517341040462427, "epoch": 1.74, "step": 620}
64
+ {"loss": 0.7035, "learning_rate": 0.00025430635838150285, "epoch": 1.77, "step": 630}
65
+ {"loss": 0.5965, "learning_rate": 0.0002534393063583815, "epoch": 1.8, "step": 640}
66
+ {"loss": 0.6486, "learning_rate": 0.0002525722543352601, "epoch": 1.83, "step": 650}
67
+ {"loss": 0.6264, "learning_rate": 0.0002517052023121387, "epoch": 1.85, "step": 660}
68
+ {"loss": 0.6469, "learning_rate": 0.0002508381502890173, "epoch": 1.88, "step": 670}
69
+ {"loss": 0.61, "learning_rate": 0.00024997109826589595, "epoch": 1.91, "step": 680}
70
+ {"loss": 0.6664, "learning_rate": 0.0002491040462427746, "epoch": 1.94, "step": 690}
71
+ {"loss": 0.5312, "learning_rate": 0.00024823699421965315, "epoch": 1.97, "step": 700}
72
+ {"loss": 0.4703, "learning_rate": 0.0002473699421965318, "epoch": 1.99, "step": 710}
73
+ {"loss": 0.535, "learning_rate": 0.00024650289017341036, "epoch": 2.02, "step": 720}
74
+ {"loss": 0.6568, "learning_rate": 0.000245635838150289, "epoch": 2.05, "step": 730}
75
+ {"loss": 0.6303, "learning_rate": 0.0002447687861271676, "epoch": 2.08, "step": 740}
76
+ {"loss": 0.4911, "learning_rate": 0.00024390173410404622, "epoch": 2.11, "step": 750}
77
+ {"loss": 0.5043, "learning_rate": 0.00024303468208092483, "epoch": 2.13, "step": 760}
78
+ {"loss": 0.5248, "learning_rate": 0.00024216763005780346, "epoch": 2.16, "step": 770}
79
+ {"loss": 0.6274, "learning_rate": 0.00024130057803468206, "epoch": 2.19, "step": 780}
80
+ {"loss": 0.5617, "learning_rate": 0.00024043352601156066, "epoch": 2.22, "step": 790}
81
+ {"loss": 0.5978, "learning_rate": 0.00023956647398843927, "epoch": 2.25, "step": 800}
82
+ {"loss": 0.6027, "learning_rate": 0.00023869942196531787, "epoch": 2.28, "step": 810}
83
+ {"loss": 0.5925, "learning_rate": 0.00023783236994219653, "epoch": 2.3, "step": 820}
84
+ {"loss": 0.593, "learning_rate": 0.00023696531791907513, "epoch": 2.33, "step": 830}
85
+ {"loss": 0.6181, "learning_rate": 0.00023609826589595373, "epoch": 2.36, "step": 840}
86
+ {"loss": 0.4752, "learning_rate": 0.00023523121387283234, "epoch": 2.39, "step": 850}
87
+ {"loss": 0.655, "learning_rate": 0.00023436416184971097, "epoch": 2.42, "step": 860}
88
+ {"loss": 0.5577, "learning_rate": 0.00023349710982658957, "epoch": 2.44, "step": 870}
89
+ {"loss": 0.6132, "learning_rate": 0.00023263005780346818, "epoch": 2.47, "step": 880}
90
+ {"loss": 0.5003, "learning_rate": 0.00023176300578034678, "epoch": 2.5, "step": 890}
91
+ {"loss": 0.5323, "learning_rate": 0.00023089595375722544, "epoch": 2.53, "step": 900}
92
+ {"loss": 0.5908, "learning_rate": 0.00023002890173410404, "epoch": 2.56, "step": 910}
93
+ {"loss": 0.5911, "learning_rate": 0.00022916184971098264, "epoch": 2.58, "step": 920}
94
+ {"loss": 0.565, "learning_rate": 0.00022829479768786125, "epoch": 2.61, "step": 930}
95
+ {"loss": 0.5789, "learning_rate": 0.00022742774566473988, "epoch": 2.64, "step": 940}
96
+ {"loss": 0.5242, "learning_rate": 0.00022656069364161848, "epoch": 2.67, "step": 950}
97
+ {"loss": 0.5082, "learning_rate": 0.00022569364161849708, "epoch": 2.7, "step": 960}
98
+ {"loss": 0.5184, "learning_rate": 0.0002248265895953757, "epoch": 2.72, "step": 970}
99
+ {"loss": 0.6131, "learning_rate": 0.0002239595375722543, "epoch": 2.75, "step": 980}
100
+ {"loss": 0.5129, "learning_rate": 0.00022309248554913295, "epoch": 2.78, "step": 990}
101
+ {"loss": 0.6016, "learning_rate": 0.00022222543352601155, "epoch": 2.81, "step": 1000}
102
+ {"eval_loss": 0.50592440366745, "eval_runtime": 14.2225, "eval_samples_per_second": 3.516, "eval_steps_per_second": 0.492, "epoch": 2.81, "step": 1000}
103
+ {"loss": 0.5728, "learning_rate": 0.00022135838150289016, "epoch": 2.84, "step": 1010}
104
+ {"loss": 0.5155, "learning_rate": 0.00022049132947976876, "epoch": 2.87, "step": 1020}
105
+ {"loss": 0.5522, "learning_rate": 0.0002196242774566474, "epoch": 2.89, "step": 1030}
106
+ {"loss": 0.5176, "learning_rate": 0.000218757225433526, "epoch": 2.92, "step": 1040}
107
+ {"train_runtime": 3194.7197, "train_samples_per_second": 4.454, "train_steps_per_second": 1.114, "total_flos": 2.2017658953498624e+17, "train_loss": 0.7449809693580173, "epoch": 2.92, "step": 1041}