train_cb_123_1760637639 / trainer_state.json
rbelanec's picture
End of training
15e1508 verified
{
"best_global_step": 399,
"best_metric": 0.13073493540287018,
"best_model_checkpoint": "saves_multiple/p-tuning/llama-3-8b-instruct/train_cb_123_1760637639/checkpoint-399",
"epoch": 20.0,
"eval_steps": 57,
"global_step": 1140,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.08771929824561403,
"grad_norm": 128.95028686523438,
"learning_rate": 3.508771929824561e-05,
"loss": 8.5412,
"num_input_tokens_seen": 3552,
"step": 5
},
{
"epoch": 0.17543859649122806,
"grad_norm": 61.61833190917969,
"learning_rate": 7.894736842105263e-05,
"loss": 2.8916,
"num_input_tokens_seen": 7264,
"step": 10
},
{
"epoch": 0.2631578947368421,
"grad_norm": 59.73617935180664,
"learning_rate": 0.00012280701754385965,
"loss": 0.6364,
"num_input_tokens_seen": 10528,
"step": 15
},
{
"epoch": 0.3508771929824561,
"grad_norm": 37.573612213134766,
"learning_rate": 0.00016666666666666666,
"loss": 0.8324,
"num_input_tokens_seen": 14784,
"step": 20
},
{
"epoch": 0.43859649122807015,
"grad_norm": 35.550758361816406,
"learning_rate": 0.00021052631578947367,
"loss": 0.4937,
"num_input_tokens_seen": 18112,
"step": 25
},
{
"epoch": 0.5263157894736842,
"grad_norm": 28.5069580078125,
"learning_rate": 0.0002543859649122807,
"loss": 1.8405,
"num_input_tokens_seen": 20736,
"step": 30
},
{
"epoch": 0.6140350877192983,
"grad_norm": 37.91823196411133,
"learning_rate": 0.0002982456140350877,
"loss": 0.5002,
"num_input_tokens_seen": 24896,
"step": 35
},
{
"epoch": 0.7017543859649122,
"grad_norm": 4.9007887840271,
"learning_rate": 0.00034210526315789477,
"loss": 0.2443,
"num_input_tokens_seen": 28160,
"step": 40
},
{
"epoch": 0.7894736842105263,
"grad_norm": 2.743189811706543,
"learning_rate": 0.00038596491228070175,
"loss": 0.2807,
"num_input_tokens_seen": 31040,
"step": 45
},
{
"epoch": 0.8771929824561403,
"grad_norm": 3.108569860458374,
"learning_rate": 0.0004298245614035088,
"loss": 0.3373,
"num_input_tokens_seen": 33760,
"step": 50
},
{
"epoch": 0.9649122807017544,
"grad_norm": 1.4290847778320312,
"learning_rate": 0.00047368421052631577,
"loss": 0.2819,
"num_input_tokens_seen": 36416,
"step": 55
},
{
"epoch": 1.0,
"eval_loss": 0.758574903011322,
"eval_runtime": 0.7377,
"eval_samples_per_second": 33.889,
"eval_steps_per_second": 9.489,
"num_input_tokens_seen": 37160,
"step": 57
},
{
"epoch": 1.0526315789473684,
"grad_norm": 7.512516975402832,
"learning_rate": 0.0005175438596491229,
"loss": 0.221,
"num_input_tokens_seen": 39176,
"step": 60
},
{
"epoch": 1.1403508771929824,
"grad_norm": 16.071565628051758,
"learning_rate": 0.0005614035087719298,
"loss": 0.358,
"num_input_tokens_seen": 42632,
"step": 65
},
{
"epoch": 1.2280701754385965,
"grad_norm": 2.7750377655029297,
"learning_rate": 0.0006052631578947369,
"loss": 0.3661,
"num_input_tokens_seen": 45704,
"step": 70
},
{
"epoch": 1.3157894736842106,
"grad_norm": 93.76712036132812,
"learning_rate": 0.0006491228070175439,
"loss": 0.3337,
"num_input_tokens_seen": 49448,
"step": 75
},
{
"epoch": 1.4035087719298245,
"grad_norm": 8.643919944763184,
"learning_rate": 0.0006929824561403509,
"loss": 0.7437,
"num_input_tokens_seen": 52456,
"step": 80
},
{
"epoch": 1.4912280701754386,
"grad_norm": 1.2434449195861816,
"learning_rate": 0.0007368421052631579,
"loss": 0.2926,
"num_input_tokens_seen": 56488,
"step": 85
},
{
"epoch": 1.5789473684210527,
"grad_norm": 2.895885944366455,
"learning_rate": 0.0007807017543859649,
"loss": 0.4001,
"num_input_tokens_seen": 59208,
"step": 90
},
{
"epoch": 1.6666666666666665,
"grad_norm": 1.0412471294403076,
"learning_rate": 0.000824561403508772,
"loss": 0.1042,
"num_input_tokens_seen": 62696,
"step": 95
},
{
"epoch": 1.7543859649122808,
"grad_norm": 2.8564751148223877,
"learning_rate": 0.000868421052631579,
"loss": 0.1886,
"num_input_tokens_seen": 66024,
"step": 100
},
{
"epoch": 1.8421052631578947,
"grad_norm": 1.2726372480392456,
"learning_rate": 0.000912280701754386,
"loss": 0.3836,
"num_input_tokens_seen": 69000,
"step": 105
},
{
"epoch": 1.9298245614035088,
"grad_norm": 0.24507200717926025,
"learning_rate": 0.0009561403508771929,
"loss": 0.1652,
"num_input_tokens_seen": 72072,
"step": 110
},
{
"epoch": 2.0,
"eval_loss": 0.21064288914203644,
"eval_runtime": 0.744,
"eval_samples_per_second": 33.604,
"eval_steps_per_second": 9.409,
"num_input_tokens_seen": 73720,
"step": 114
},
{
"epoch": 2.017543859649123,
"grad_norm": 0.7454385161399841,
"learning_rate": 0.001,
"loss": 0.0736,
"num_input_tokens_seen": 74264,
"step": 115
},
{
"epoch": 2.1052631578947367,
"grad_norm": 0.21397805213928223,
"learning_rate": 0.000999941402841295,
"loss": 0.0278,
"num_input_tokens_seen": 77528,
"step": 120
},
{
"epoch": 2.192982456140351,
"grad_norm": 0.030098894611001015,
"learning_rate": 0.0009997656250996883,
"loss": 0.0966,
"num_input_tokens_seen": 80600,
"step": 125
},
{
"epoch": 2.280701754385965,
"grad_norm": 1.9476398229599,
"learning_rate": 0.0009994727079754844,
"loss": 0.2501,
"num_input_tokens_seen": 83800,
"step": 130
},
{
"epoch": 2.3684210526315788,
"grad_norm": 0.6966570019721985,
"learning_rate": 0.0009990627201251284,
"loss": 0.3148,
"num_input_tokens_seen": 86552,
"step": 135
},
{
"epoch": 2.456140350877193,
"grad_norm": 0.9255178570747375,
"learning_rate": 0.0009985357576451127,
"loss": 0.0624,
"num_input_tokens_seen": 90008,
"step": 140
},
{
"epoch": 2.543859649122807,
"grad_norm": 0.1348254233598709,
"learning_rate": 0.0009978919440494537,
"loss": 0.4121,
"num_input_tokens_seen": 93496,
"step": 145
},
{
"epoch": 2.6315789473684212,
"grad_norm": 0.49739935994148254,
"learning_rate": 0.0009971314302407413,
"loss": 0.0957,
"num_input_tokens_seen": 96888,
"step": 150
},
{
"epoch": 2.719298245614035,
"grad_norm": 0.5351743102073669,
"learning_rate": 0.0009962543944747686,
"loss": 0.0688,
"num_input_tokens_seen": 100728,
"step": 155
},
{
"epoch": 2.807017543859649,
"grad_norm": 0.04349172115325928,
"learning_rate": 0.0009952610423187517,
"loss": 0.1149,
"num_input_tokens_seen": 103960,
"step": 160
},
{
"epoch": 2.8947368421052633,
"grad_norm": 0.695615828037262,
"learning_rate": 0.0009941516066031462,
"loss": 0.159,
"num_input_tokens_seen": 106648,
"step": 165
},
{
"epoch": 2.982456140350877,
"grad_norm": 0.04241182282567024,
"learning_rate": 0.0009929263473670749,
"loss": 0.1091,
"num_input_tokens_seen": 110136,
"step": 170
},
{
"epoch": 3.0,
"eval_loss": 0.16814041137695312,
"eval_runtime": 0.7418,
"eval_samples_per_second": 33.701,
"eval_steps_per_second": 9.436,
"num_input_tokens_seen": 110296,
"step": 171
},
{
"epoch": 3.0701754385964914,
"grad_norm": 0.3171384930610657,
"learning_rate": 0.0009915855517973774,
"loss": 0.0703,
"num_input_tokens_seen": 112984,
"step": 175
},
{
"epoch": 3.1578947368421053,
"grad_norm": 0.40988221764564514,
"learning_rate": 0.0009901295341612972,
"loss": 0.0453,
"num_input_tokens_seen": 116120,
"step": 180
},
{
"epoch": 3.245614035087719,
"grad_norm": 0.5177841782569885,
"learning_rate": 0.00098855863573282,
"loss": 0.1497,
"num_input_tokens_seen": 119576,
"step": 185
},
{
"epoch": 3.3333333333333335,
"grad_norm": 0.495584100484848,
"learning_rate": 0.0009868732247126839,
"loss": 0.0167,
"num_input_tokens_seen": 122968,
"step": 190
},
{
"epoch": 3.4210526315789473,
"grad_norm": 0.038633063435554504,
"learning_rate": 0.000985073696142077,
"loss": 0.1827,
"num_input_tokens_seen": 126104,
"step": 195
},
{
"epoch": 3.5087719298245617,
"grad_norm": 0.3946753442287445,
"learning_rate": 0.0009831604718100442,
"loss": 0.2164,
"num_input_tokens_seen": 129368,
"step": 200
},
{
"epoch": 3.5964912280701755,
"grad_norm": 0.38737550377845764,
"learning_rate": 0.0009811340001546253,
"loss": 0.0977,
"num_input_tokens_seen": 132888,
"step": 205
},
{
"epoch": 3.6842105263157894,
"grad_norm": 0.36464640498161316,
"learning_rate": 0.0009789947561577445,
"loss": 0.06,
"num_input_tokens_seen": 135832,
"step": 210
},
{
"epoch": 3.7719298245614032,
"grad_norm": 0.11651939898729324,
"learning_rate": 0.000976743241233882,
"loss": 0.1588,
"num_input_tokens_seen": 138936,
"step": 215
},
{
"epoch": 3.8596491228070176,
"grad_norm": 0.17754873633384705,
"learning_rate": 0.0009743799831125471,
"loss": 0.0407,
"num_input_tokens_seen": 142488,
"step": 220
},
{
"epoch": 3.9473684210526314,
"grad_norm": 0.3502120077610016,
"learning_rate": 0.0009719055357145847,
"loss": 0.0807,
"num_input_tokens_seen": 146808,
"step": 225
},
{
"epoch": 4.0,
"eval_loss": 0.18574421107769012,
"eval_runtime": 0.7462,
"eval_samples_per_second": 33.501,
"eval_steps_per_second": 9.38,
"num_input_tokens_seen": 147784,
"step": 228
},
{
"epoch": 4.035087719298246,
"grad_norm": 0.21568936109542847,
"learning_rate": 0.0009693204790223423,
"loss": 0.0834,
"num_input_tokens_seen": 149448,
"step": 230
},
{
"epoch": 4.12280701754386,
"grad_norm": 0.4902511239051819,
"learning_rate": 0.0009666254189437286,
"loss": 0.0547,
"num_input_tokens_seen": 152488,
"step": 235
},
{
"epoch": 4.2105263157894735,
"grad_norm": 1.5583466291427612,
"learning_rate": 0.0009638209871701966,
"loss": 0.1172,
"num_input_tokens_seen": 155208,
"step": 240
},
{
"epoch": 4.298245614035087,
"grad_norm": 0.3295286297798157,
"learning_rate": 0.0009609078410286809,
"loss": 0.0856,
"num_input_tokens_seen": 158088,
"step": 245
},
{
"epoch": 4.385964912280702,
"grad_norm": 0.984485924243927,
"learning_rate": 0.0009578866633275287,
"loss": 0.1089,
"num_input_tokens_seen": 161032,
"step": 250
},
{
"epoch": 4.473684210526316,
"grad_norm": 0.12086866050958633,
"learning_rate": 0.0009547581621964571,
"loss": 0.0217,
"num_input_tokens_seen": 164744,
"step": 255
},
{
"epoch": 4.56140350877193,
"grad_norm": 0.07994663715362549,
"learning_rate": 0.0009515230709205749,
"loss": 0.0154,
"num_input_tokens_seen": 167592,
"step": 260
},
{
"epoch": 4.649122807017544,
"grad_norm": 0.022505775094032288,
"learning_rate": 0.0009481821477685101,
"loss": 0.0668,
"num_input_tokens_seen": 170696,
"step": 265
},
{
"epoch": 4.7368421052631575,
"grad_norm": 0.6926897764205933,
"learning_rate": 0.0009447361758146791,
"loss": 0.0471,
"num_input_tokens_seen": 174024,
"step": 270
},
{
"epoch": 4.824561403508772,
"grad_norm": 0.02440646104514599,
"learning_rate": 0.0009411859627557439,
"loss": 0.1644,
"num_input_tokens_seen": 178152,
"step": 275
},
{
"epoch": 4.912280701754386,
"grad_norm": 0.37806436419487,
"learning_rate": 0.0009375323407212969,
"loss": 0.0431,
"num_input_tokens_seen": 181768,
"step": 280
},
{
"epoch": 5.0,
"grad_norm": 0.04274319112300873,
"learning_rate": 0.0009337761660788185,
"loss": 0.0258,
"num_input_tokens_seen": 184368,
"step": 285
},
{
"epoch": 5.0,
"eval_loss": 0.16147072613239288,
"eval_runtime": 0.7468,
"eval_samples_per_second": 33.476,
"eval_steps_per_second": 9.373,
"num_input_tokens_seen": 184368,
"step": 285
},
{
"epoch": 5.087719298245614,
"grad_norm": 0.32277238368988037,
"learning_rate": 0.0009299183192329556,
"loss": 0.0808,
"num_input_tokens_seen": 187312,
"step": 290
},
{
"epoch": 5.175438596491228,
"grad_norm": 0.4225824177265167,
"learning_rate": 0.0009259597044191636,
"loss": 0.0546,
"num_input_tokens_seen": 190736,
"step": 295
},
{
"epoch": 5.2631578947368425,
"grad_norm": 0.3628942668437958,
"learning_rate": 0.0009219012494917644,
"loss": 0.0442,
"num_input_tokens_seen": 194480,
"step": 300
},
{
"epoch": 5.350877192982456,
"grad_norm": 1.066184163093567,
"learning_rate": 0.0009177439057064682,
"loss": 0.0565,
"num_input_tokens_seen": 198576,
"step": 305
},
{
"epoch": 5.43859649122807,
"grad_norm": 0.0820930004119873,
"learning_rate": 0.0009134886474974092,
"loss": 0.0208,
"num_input_tokens_seen": 201808,
"step": 310
},
{
"epoch": 5.526315789473684,
"grad_norm": 0.011820897459983826,
"learning_rate": 0.0009091364722487496,
"loss": 0.0125,
"num_input_tokens_seen": 204912,
"step": 315
},
{
"epoch": 5.614035087719298,
"grad_norm": 0.4663783609867096,
"learning_rate": 0.0009046884000609047,
"loss": 0.1538,
"num_input_tokens_seen": 207760,
"step": 320
},
{
"epoch": 5.701754385964913,
"grad_norm": 0.2975216209888458,
"learning_rate": 0.0009001454735114421,
"loss": 0.0776,
"num_input_tokens_seen": 210544,
"step": 325
},
{
"epoch": 5.7894736842105265,
"grad_norm": 0.3130631148815155,
"learning_rate": 0.0008955087574107137,
"loss": 0.0804,
"num_input_tokens_seen": 214480,
"step": 330
},
{
"epoch": 5.87719298245614,
"grad_norm": 0.6384274363517761,
"learning_rate": 0.0008907793385522767,
"loss": 0.0508,
"num_input_tokens_seen": 218064,
"step": 335
},
{
"epoch": 5.964912280701754,
"grad_norm": 0.5133721232414246,
"learning_rate": 0.0008859583254581605,
"loss": 0.0447,
"num_input_tokens_seen": 220848,
"step": 340
},
{
"epoch": 6.0,
"eval_loss": 0.2889024615287781,
"eval_runtime": 0.8611,
"eval_samples_per_second": 29.032,
"eval_steps_per_second": 8.129,
"num_input_tokens_seen": 221536,
"step": 342
},
{
"epoch": 6.052631578947368,
"grad_norm": 0.2570275664329529,
"learning_rate": 0.0008810468481190428,
"loss": 0.0271,
"num_input_tokens_seen": 223616,
"step": 345
},
{
"epoch": 6.140350877192983,
"grad_norm": 0.581085741519928,
"learning_rate": 0.000876046057729392,
"loss": 0.1896,
"num_input_tokens_seen": 227008,
"step": 350
},
{
"epoch": 6.228070175438597,
"grad_norm": 0.4784376621246338,
"learning_rate": 0.0008709571264176408,
"loss": 0.0695,
"num_input_tokens_seen": 230752,
"step": 355
},
{
"epoch": 6.315789473684211,
"grad_norm": 0.12153349071741104,
"learning_rate": 0.0008657812469714519,
"loss": 0.113,
"num_input_tokens_seen": 234208,
"step": 360
},
{
"epoch": 6.4035087719298245,
"grad_norm": 0.22292231023311615,
"learning_rate": 0.0008605196325581425,
"loss": 0.0643,
"num_input_tokens_seen": 237664,
"step": 365
},
{
"epoch": 6.491228070175438,
"grad_norm": 0.36083993315696716,
"learning_rate": 0.000855173516440332,
"loss": 0.0325,
"num_input_tokens_seen": 240704,
"step": 370
},
{
"epoch": 6.578947368421053,
"grad_norm": 0.6128056049346924,
"learning_rate": 0.000849744151686879,
"loss": 0.1093,
"num_input_tokens_seen": 244256,
"step": 375
},
{
"epoch": 6.666666666666667,
"grad_norm": 0.007612931076437235,
"learning_rate": 0.000844232810879176,
"loss": 0.0406,
"num_input_tokens_seen": 246816,
"step": 380
},
{
"epoch": 6.754385964912281,
"grad_norm": 0.006274615880101919,
"learning_rate": 0.0008386407858128706,
"loss": 0.0096,
"num_input_tokens_seen": 249728,
"step": 385
},
{
"epoch": 6.842105263157895,
"grad_norm": 0.02568778023123741,
"learning_rate": 0.0008329693871950843,
"loss": 0.0167,
"num_input_tokens_seen": 252480,
"step": 390
},
{
"epoch": 6.9298245614035086,
"grad_norm": 1.2652301788330078,
"learning_rate": 0.0008272199443371966,
"loss": 0.0144,
"num_input_tokens_seen": 256480,
"step": 395
},
{
"epoch": 7.0,
"eval_loss": 0.13073493540287018,
"eval_runtime": 0.7527,
"eval_samples_per_second": 33.214,
"eval_steps_per_second": 9.3,
"num_input_tokens_seen": 258720,
"step": 399
},
{
"epoch": 7.017543859649122,
"grad_norm": 0.002408112632110715,
"learning_rate": 0.0008213938048432696,
"loss": 0.0027,
"num_input_tokens_seen": 259424,
"step": 400
},
{
"epoch": 7.105263157894737,
"grad_norm": 0.003994388971477747,
"learning_rate": 0.0008154923342941862,
"loss": 0.0015,
"num_input_tokens_seen": 262656,
"step": 405
},
{
"epoch": 7.192982456140351,
"grad_norm": 0.017323745414614677,
"learning_rate": 0.0008095169159275712,
"loss": 0.033,
"num_input_tokens_seen": 265408,
"step": 410
},
{
"epoch": 7.280701754385965,
"grad_norm": 1.1956137418746948,
"learning_rate": 0.0008034689503135784,
"loss": 0.0709,
"num_input_tokens_seen": 268992,
"step": 415
},
{
"epoch": 7.368421052631579,
"grad_norm": 0.2515873312950134,
"learning_rate": 0.0007973498550266114,
"loss": 0.0945,
"num_input_tokens_seen": 272288,
"step": 420
},
{
"epoch": 7.456140350877193,
"grad_norm": 0.11299803107976913,
"learning_rate": 0.0007911610643130608,
"loss": 0.0127,
"num_input_tokens_seen": 275104,
"step": 425
},
{
"epoch": 7.543859649122807,
"grad_norm": 0.057308122515678406,
"learning_rate": 0.0007849040287551332,
"loss": 0.072,
"num_input_tokens_seen": 278272,
"step": 430
},
{
"epoch": 7.631578947368421,
"grad_norm": 0.02781853824853897,
"learning_rate": 0.000778580214930851,
"loss": 0.0749,
"num_input_tokens_seen": 282048,
"step": 435
},
{
"epoch": 7.719298245614035,
"grad_norm": 1.6715880632400513,
"learning_rate": 0.0007721911050703032,
"loss": 0.032,
"num_input_tokens_seen": 285536,
"step": 440
},
{
"epoch": 7.807017543859649,
"grad_norm": 0.3265489637851715,
"learning_rate": 0.000765738196708228,
"loss": 0.0259,
"num_input_tokens_seen": 288416,
"step": 445
},
{
"epoch": 7.894736842105263,
"grad_norm": 0.21435128152370453,
"learning_rate": 0.0007592230023330069,
"loss": 0.0222,
"num_input_tokens_seen": 291968,
"step": 450
},
{
"epoch": 7.982456140350877,
"grad_norm": 0.022913899272680283,
"learning_rate": 0.000752647049032155,
"loss": 0.0113,
"num_input_tokens_seen": 295296,
"step": 455
},
{
"epoch": 8.0,
"eval_loss": 0.15292111039161682,
"eval_runtime": 0.7464,
"eval_samples_per_second": 33.494,
"eval_steps_per_second": 9.378,
"num_input_tokens_seen": 295408,
"step": 456
},
{
"epoch": 8.070175438596491,
"grad_norm": 0.02062864601612091,
"learning_rate": 0.0007460118781343892,
"loss": 0.0045,
"num_input_tokens_seen": 298480,
"step": 460
},
{
"epoch": 8.157894736842104,
"grad_norm": 0.006704544648528099,
"learning_rate": 0.000739319044848358,
"loss": 0.078,
"num_input_tokens_seen": 301904,
"step": 465
},
{
"epoch": 8.24561403508772,
"grad_norm": 0.0441327840089798,
"learning_rate": 0.0007325701178981183,
"loss": 0.0203,
"num_input_tokens_seen": 305488,
"step": 470
},
{
"epoch": 8.333333333333334,
"grad_norm": 0.1119450256228447,
"learning_rate": 0.0007257666791554447,
"loss": 0.0179,
"num_input_tokens_seen": 308336,
"step": 475
},
{
"epoch": 8.421052631578947,
"grad_norm": 0.37555059790611267,
"learning_rate": 0.0007189103232690561,
"loss": 0.0517,
"num_input_tokens_seen": 311600,
"step": 480
},
{
"epoch": 8.508771929824562,
"grad_norm": 0.09913496673107147,
"learning_rate": 0.0007120026572908484,
"loss": 0.0194,
"num_input_tokens_seen": 315216,
"step": 485
},
{
"epoch": 8.596491228070175,
"grad_norm": 0.0433107353746891,
"learning_rate": 0.0007050453002992201,
"loss": 0.0256,
"num_input_tokens_seen": 318640,
"step": 490
},
{
"epoch": 8.68421052631579,
"grad_norm": 0.016323000192642212,
"learning_rate": 0.0006980398830195785,
"loss": 0.0051,
"num_input_tokens_seen": 321680,
"step": 495
},
{
"epoch": 8.771929824561404,
"grad_norm": 0.006579800043255091,
"learning_rate": 0.000690988047442116,
"loss": 0.0025,
"num_input_tokens_seen": 324464,
"step": 500
},
{
"epoch": 8.859649122807017,
"grad_norm": 0.00924243126064539,
"learning_rate": 0.0006838914464369467,
"loss": 0.0031,
"num_input_tokens_seen": 327888,
"step": 505
},
{
"epoch": 8.947368421052632,
"grad_norm": 0.007734250742942095,
"learning_rate": 0.0006767517433666918,
"loss": 0.0066,
"num_input_tokens_seen": 331312,
"step": 510
},
{
"epoch": 9.0,
"eval_loss": 0.15188561379909515,
"eval_runtime": 0.7442,
"eval_samples_per_second": 33.592,
"eval_steps_per_second": 9.406,
"num_input_tokens_seen": 332648,
"step": 513
},
{
"epoch": 9.035087719298245,
"grad_norm": 0.005904178135097027,
"learning_rate": 0.0006695706116966074,
"loss": 0.0913,
"num_input_tokens_seen": 334120,
"step": 515
},
{
"epoch": 9.12280701754386,
"grad_norm": 0.0130391800776124,
"learning_rate": 0.0006623497346023419,
"loss": 0.0081,
"num_input_tokens_seen": 338280,
"step": 520
},
{
"epoch": 9.210526315789474,
"grad_norm": 0.030641527846455574,
"learning_rate": 0.0006550908045754194,
"loss": 0.0075,
"num_input_tokens_seen": 341064,
"step": 525
},
{
"epoch": 9.298245614035087,
"grad_norm": 0.018244441598653793,
"learning_rate": 0.0006477955230265393,
"loss": 0.0032,
"num_input_tokens_seen": 344552,
"step": 530
},
{
"epoch": 9.385964912280702,
"grad_norm": 0.006423134822398424,
"learning_rate": 0.0006404655998867848,
"loss": 0.0044,
"num_input_tokens_seen": 347592,
"step": 535
},
{
"epoch": 9.473684210526315,
"grad_norm": 0.0025237384252250195,
"learning_rate": 0.0006331027532068335,
"loss": 0.0032,
"num_input_tokens_seen": 350440,
"step": 540
},
{
"epoch": 9.56140350877193,
"grad_norm": 0.07135269790887833,
"learning_rate": 0.0006257087087542672,
"loss": 0.0025,
"num_input_tokens_seen": 353992,
"step": 545
},
{
"epoch": 9.649122807017545,
"grad_norm": 0.007862250320613384,
"learning_rate": 0.0006182851996090712,
"loss": 0.0442,
"num_input_tokens_seen": 357544,
"step": 550
},
{
"epoch": 9.736842105263158,
"grad_norm": 0.09623616188764572,
"learning_rate": 0.0006108339657574193,
"loss": 0.0439,
"num_input_tokens_seen": 360872,
"step": 555
},
{
"epoch": 9.824561403508772,
"grad_norm": 0.10430023074150085,
"learning_rate": 0.000603356753683842,
"loss": 0.0262,
"num_input_tokens_seen": 364200,
"step": 560
},
{
"epoch": 9.912280701754385,
"grad_norm": 0.0760573297739029,
"learning_rate": 0.0005958553159618693,
"loss": 0.0104,
"num_input_tokens_seen": 367656,
"step": 565
},
{
"epoch": 10.0,
"grad_norm": 0.046638406813144684,
"learning_rate": 0.0005883314108432481,
"loss": 0.0137,
"num_input_tokens_seen": 369976,
"step": 570
},
{
"epoch": 10.0,
"eval_loss": 0.17922309041023254,
"eval_runtime": 0.7446,
"eval_samples_per_second": 33.575,
"eval_steps_per_second": 9.401,
"num_input_tokens_seen": 369976,
"step": 570
},
{
"epoch": 10.087719298245615,
"grad_norm": 0.042038898915052414,
"learning_rate": 0.0005807868018458274,
"loss": 0.0118,
"num_input_tokens_seen": 373304,
"step": 575
},
{
"epoch": 10.175438596491228,
"grad_norm": 0.5674363374710083,
"learning_rate": 0.0005732232573402109,
"loss": 0.0469,
"num_input_tokens_seen": 376632,
"step": 580
},
{
"epoch": 10.263157894736842,
"grad_norm": 0.04679185897111893,
"learning_rate": 0.0005656425501352691,
"loss": 0.0324,
"num_input_tokens_seen": 379608,
"step": 585
},
{
"epoch": 10.350877192982455,
"grad_norm": 0.044311270117759705,
"learning_rate": 0.0005580464570626152,
"loss": 0.0076,
"num_input_tokens_seen": 382296,
"step": 590
},
{
"epoch": 10.43859649122807,
"grad_norm": 0.010670201852917671,
"learning_rate": 0.0005504367585601342,
"loss": 0.0058,
"num_input_tokens_seen": 385880,
"step": 595
},
{
"epoch": 10.526315789473685,
"grad_norm": 0.22319240868091583,
"learning_rate": 0.0005428152382546695,
"loss": 0.018,
"num_input_tokens_seen": 388824,
"step": 600
},
{
"epoch": 10.614035087719298,
"grad_norm": 0.19307509064674377,
"learning_rate": 0.0005351836825439609,
"loss": 0.0099,
"num_input_tokens_seen": 391640,
"step": 605
},
{
"epoch": 10.701754385964913,
"grad_norm": 0.20500296354293823,
"learning_rate": 0.0005275438801779327,
"loss": 0.0048,
"num_input_tokens_seen": 394968,
"step": 610
},
{
"epoch": 10.789473684210526,
"grad_norm": 0.028505077585577965,
"learning_rate": 0.0005198976218394321,
"loss": 0.0016,
"num_input_tokens_seen": 398520,
"step": 615
},
{
"epoch": 10.87719298245614,
"grad_norm": 0.04338080435991287,
"learning_rate": 0.0005122466997245124,
"loss": 0.0015,
"num_input_tokens_seen": 402328,
"step": 620
},
{
"epoch": 10.964912280701755,
"grad_norm": 0.021871905773878098,
"learning_rate": 0.0005045929071223632,
"loss": 0.0018,
"num_input_tokens_seen": 405880,
"step": 625
},
{
"epoch": 11.0,
"eval_loss": 0.2538582682609558,
"eval_runtime": 0.7471,
"eval_samples_per_second": 33.463,
"eval_steps_per_second": 9.37,
"num_input_tokens_seen": 406840,
"step": 627
},
{
"epoch": 11.052631578947368,
"grad_norm": 0.002679206430912018,
"learning_rate": 0.0004969380379949836,
"loss": 0.001,
"num_input_tokens_seen": 408632,
"step": 630
},
{
"epoch": 11.140350877192983,
"grad_norm": 0.6808245182037354,
"learning_rate": 0.0004892838865566986,
"loss": 0.0326,
"num_input_tokens_seen": 411544,
"step": 635
},
{
"epoch": 11.228070175438596,
"grad_norm": 0.010487693361938,
"learning_rate": 0.00048163224685361384,
"loss": 0.0014,
"num_input_tokens_seen": 414648,
"step": 640
},
{
"epoch": 11.31578947368421,
"grad_norm": 0.01465911790728569,
"learning_rate": 0.0004739849123431138,
"loss": 0.0017,
"num_input_tokens_seen": 418968,
"step": 645
},
{
"epoch": 11.403508771929825,
"grad_norm": 0.015837009996175766,
"learning_rate": 0.00046634367547349433,
"loss": 0.0011,
"num_input_tokens_seen": 422584,
"step": 650
},
{
"epoch": 11.491228070175438,
"grad_norm": 0.02893809974193573,
"learning_rate": 0.0004587103272638339,
"loss": 0.0035,
"num_input_tokens_seen": 426136,
"step": 655
},
{
"epoch": 11.578947368421053,
"grad_norm": 0.0007099352078512311,
"learning_rate": 0.0004510866568841981,
"loss": 0.0047,
"num_input_tokens_seen": 429752,
"step": 660
},
{
"epoch": 11.666666666666666,
"grad_norm": 0.10449596494436264,
"learning_rate": 0.0004434744512362797,
"loss": 0.0034,
"num_input_tokens_seen": 432920,
"step": 665
},
{
"epoch": 11.75438596491228,
"grad_norm": 0.03190397843718529,
"learning_rate": 0.00043587549453456836,
"loss": 0.0018,
"num_input_tokens_seen": 435608,
"step": 670
},
{
"epoch": 11.842105263157894,
"grad_norm": 0.0037696484941989183,
"learning_rate": 0.00042829156788815195,
"loss": 0.006,
"num_input_tokens_seen": 438744,
"step": 675
},
{
"epoch": 11.929824561403509,
"grad_norm": 0.005847765598446131,
"learning_rate": 0.0004207244488832429,
"loss": 0.0023,
"num_input_tokens_seen": 442200,
"step": 680
},
{
"epoch": 12.0,
"eval_loss": 0.2909103035926819,
"eval_runtime": 0.7451,
"eval_samples_per_second": 33.553,
"eval_steps_per_second": 9.395,
"num_input_tokens_seen": 444728,
"step": 684
},
{
"epoch": 12.017543859649123,
"grad_norm": 0.014109612442553043,
"learning_rate": 0.00041317591116653486,
"loss": 0.0007,
"num_input_tokens_seen": 445624,
"step": 685
},
{
"epoch": 12.105263157894736,
"grad_norm": 0.0011010151356458664,
"learning_rate": 0.00040564772402947784,
"loss": 0.0005,
"num_input_tokens_seen": 448632,
"step": 690
},
{
"epoch": 12.192982456140351,
"grad_norm": 0.000885249231941998,
"learning_rate": 0.00039814165199357807,
"loss": 0.0006,
"num_input_tokens_seen": 452248,
"step": 695
},
{
"epoch": 12.280701754385966,
"grad_norm": 0.38688522577285767,
"learning_rate": 0.00039065945439681213,
"loss": 0.0053,
"num_input_tokens_seen": 455544,
"step": 700
},
{
"epoch": 12.368421052631579,
"grad_norm": 0.0028335454408079386,
"learning_rate": 0.0003832028849812607,
"loss": 0.0007,
"num_input_tokens_seen": 458520,
"step": 705
},
{
"epoch": 12.456140350877194,
"grad_norm": 0.0012163643259555101,
"learning_rate": 0.00037577369148204934,
"loss": 0.0021,
"num_input_tokens_seen": 462264,
"step": 710
},
{
"epoch": 12.543859649122806,
"grad_norm": 0.001764231943525374,
"learning_rate": 0.00036837361521770053,
"loss": 0.0009,
"num_input_tokens_seen": 465624,
"step": 715
},
{
"epoch": 12.631578947368421,
"grad_norm": 0.018389353528618813,
"learning_rate": 0.00036100439068198676,
"loss": 0.0016,
"num_input_tokens_seen": 468536,
"step": 720
},
{
"epoch": 12.719298245614034,
"grad_norm": 0.014804023317992687,
"learning_rate": 0.00035366774513738707,
"loss": 0.0006,
"num_input_tokens_seen": 472408,
"step": 725
},
{
"epoch": 12.807017543859649,
"grad_norm": 0.0032388227991759777,
"learning_rate": 0.0003463653982102347,
"loss": 0.0006,
"num_input_tokens_seen": 475416,
"step": 730
},
{
"epoch": 12.894736842105264,
"grad_norm": 0.0006264116382226348,
"learning_rate": 0.00033909906148765724,
"loss": 0.0004,
"num_input_tokens_seen": 478712,
"step": 735
},
{
"epoch": 12.982456140350877,
"grad_norm": 0.013625812716782093,
"learning_rate": 0.00033187043811639863,
"loss": 0.0006,
"num_input_tokens_seen": 481592,
"step": 740
},
{
"epoch": 13.0,
"eval_loss": 0.3705059587955475,
"eval_runtime": 0.7446,
"eval_samples_per_second": 33.577,
"eval_steps_per_second": 9.401,
"num_input_tokens_seen": 481720,
"step": 741
},
{
"epoch": 13.070175438596491,
"grad_norm": 0.0008147696498781443,
"learning_rate": 0.00032468122240362287,
"loss": 0.0003,
"num_input_tokens_seen": 484856,
"step": 745
},
{
"epoch": 13.157894736842104,
"grad_norm": 0.005378706846386194,
"learning_rate": 0.00031753309941978615,
"loss": 0.0003,
"num_input_tokens_seen": 487480,
"step": 750
},
{
"epoch": 13.24561403508772,
"grad_norm": 0.002744952216744423,
"learning_rate": 0.0003104277446036764,
"loss": 0.0003,
"num_input_tokens_seen": 490520,
"step": 755
},
{
"epoch": 13.333333333333334,
"grad_norm": 0.0012103226035833359,
"learning_rate": 0.00030336682336970847,
"loss": 0.0002,
"num_input_tokens_seen": 493976,
"step": 760
},
{
"epoch": 13.421052631578947,
"grad_norm": 0.0006653820746578276,
"learning_rate": 0.0002963519907175713,
"loss": 0.0005,
"num_input_tokens_seen": 497080,
"step": 765
},
{
"epoch": 13.508771929824562,
"grad_norm": 0.0035558794625103474,
"learning_rate": 0.00028938489084431363,
"loss": 0.0003,
"num_input_tokens_seen": 500792,
"step": 770
},
{
"epoch": 13.596491228070175,
"grad_norm": 0.013601898215711117,
"learning_rate": 0.0002824671567589635,
"loss": 0.0004,
"num_input_tokens_seen": 503832,
"step": 775
},
{
"epoch": 13.68421052631579,
"grad_norm": 0.0014382227091118693,
"learning_rate": 0.00027560040989976894,
"loss": 0.0004,
"num_input_tokens_seen": 507256,
"step": 780
},
{
"epoch": 13.771929824561404,
"grad_norm": 0.0005540886195376515,
"learning_rate": 0.0002687862597541523,
"loss": 0.0002,
"num_input_tokens_seen": 510616,
"step": 785
},
{
"epoch": 13.859649122807017,
"grad_norm": 0.0010458007454872131,
"learning_rate": 0.0002620263034814632,
"loss": 0.0002,
"num_input_tokens_seen": 514200,
"step": 790
},
{
"epoch": 13.947368421052632,
"grad_norm": 0.06604114919900894,
"learning_rate": 0.00025532212553862446,
"loss": 0.0008,
"num_input_tokens_seen": 517400,
"step": 795
},
{
"epoch": 14.0,
"eval_loss": 0.36595970392227173,
"eval_runtime": 0.746,
"eval_samples_per_second": 33.512,
"eval_steps_per_second": 9.383,
"num_input_tokens_seen": 518664,
"step": 798
},
{
"epoch": 14.035087719298245,
"grad_norm": 0.0014471168396994472,
"learning_rate": 0.000248675297308751,
"loss": 0.0003,
"num_input_tokens_seen": 519848,
"step": 800
},
{
"epoch": 14.12280701754386,
"grad_norm": 0.002391919493675232,
"learning_rate": 0.00024208737673283814,
"loss": 0.0002,
"num_input_tokens_seen": 523176,
"step": 805
},
{
"epoch": 14.210526315789474,
"grad_norm": 0.007587758358567953,
"learning_rate": 0.00023555990794459542,
"loss": 0.0003,
"num_input_tokens_seen": 526280,
"step": 810
},
{
"epoch": 14.298245614035087,
"grad_norm": 0.0007305729086510837,
"learning_rate": 0.00022909442090852144,
"loss": 0.0003,
"num_input_tokens_seen": 529160,
"step": 815
},
{
"epoch": 14.385964912280702,
"grad_norm": 0.005135450512170792,
"learning_rate": 0.0002226924310612956,
"loss": 0.0002,
"num_input_tokens_seen": 533256,
"step": 820
},
{
"epoch": 14.473684210526315,
"grad_norm": 0.0027909493073821068,
"learning_rate": 0.00021635543895657866,
"loss": 0.0002,
"num_input_tokens_seen": 536712,
"step": 825
},
{
"epoch": 14.56140350877193,
"grad_norm": 0.0009187912801280618,
"learning_rate": 0.00021008492991329863,
"loss": 0.0002,
"num_input_tokens_seen": 539912,
"step": 830
},
{
"epoch": 14.649122807017545,
"grad_norm": 0.0006335495854727924,
"learning_rate": 0.00020388237366751006,
"loss": 0.0002,
"num_input_tokens_seen": 543336,
"step": 835
},
{
"epoch": 14.736842105263158,
"grad_norm": 0.003865489037707448,
"learning_rate": 0.0001977492240279035,
"loss": 0.0002,
"num_input_tokens_seen": 547496,
"step": 840
},
{
"epoch": 14.824561403508772,
"grad_norm": 0.0015492694219574332,
"learning_rate": 0.0001916869185350505,
"loss": 0.0004,
"num_input_tokens_seen": 550280,
"step": 845
},
{
"epoch": 14.912280701754385,
"grad_norm": 0.0075766416266560555,
"learning_rate": 0.00018569687812445895,
"loss": 0.0004,
"num_input_tokens_seen": 553672,
"step": 850
},
{
"epoch": 15.0,
"grad_norm": 0.0077131628058850765,
"learning_rate": 0.00017978050679352359,
"loss": 0.0004,
"num_input_tokens_seen": 555728,
"step": 855
},
{
"epoch": 15.0,
"eval_loss": 0.36777299642562866,
"eval_runtime": 0.7431,
"eval_samples_per_second": 33.641,
"eval_steps_per_second": 9.419,
"num_input_tokens_seen": 555728,
"step": 855
},
{
"epoch": 15.087719298245615,
"grad_norm": 0.0028925647493451834,
"learning_rate": 0.00017393919127244346,
"loss": 0.0002,
"num_input_tokens_seen": 559312,
"step": 860
},
{
"epoch": 15.175438596491228,
"grad_norm": 0.000598029000684619,
"learning_rate": 0.00016817430069918936,
"loss": 0.0002,
"num_input_tokens_seen": 563056,
"step": 865
},
{
"epoch": 15.263157894736842,
"grad_norm": 0.0024406970478594303,
"learning_rate": 0.00016248718629859244,
"loss": 0.0002,
"num_input_tokens_seen": 566224,
"step": 870
},
{
"epoch": 15.350877192982455,
"grad_norm": 0.0006085984059609473,
"learning_rate": 0.00015687918106563326,
"loss": 0.0004,
"num_input_tokens_seen": 569776,
"step": 875
},
{
"epoch": 15.43859649122807,
"grad_norm": 0.0007883381913416088,
"learning_rate": 0.0001513515994530023,
"loss": 0.0002,
"num_input_tokens_seen": 573360,
"step": 880
},
{
"epoch": 15.526315789473685,
"grad_norm": 0.002710202243179083,
"learning_rate": 0.00014590573706300782,
"loss": 0.0003,
"num_input_tokens_seen": 576144,
"step": 885
},
{
"epoch": 15.614035087719298,
"grad_norm": 0.010271117091178894,
"learning_rate": 0.00014054287034390045,
"loss": 0.0003,
"num_input_tokens_seen": 579088,
"step": 890
},
{
"epoch": 15.701754385964913,
"grad_norm": 0.003705398179590702,
"learning_rate": 0.00013526425629068966,
"loss": 0.0003,
"num_input_tokens_seen": 582032,
"step": 895
},
{
"epoch": 15.789473684210526,
"grad_norm": 0.0013794214464724064,
"learning_rate": 0.00013007113215051673,
"loss": 0.0002,
"num_input_tokens_seen": 585584,
"step": 900
},
{
"epoch": 15.87719298245614,
"grad_norm": 0.00568090844899416,
"learning_rate": 0.00012496471513265967,
"loss": 0.0003,
"num_input_tokens_seen": 588944,
"step": 905
},
{
"epoch": 15.964912280701755,
"grad_norm": 0.001053197542205453,
"learning_rate": 0.00011994620212323176,
"loss": 0.0002,
"num_input_tokens_seen": 592112,
"step": 910
},
{
"epoch": 16.0,
"eval_loss": 0.3890913426876068,
"eval_runtime": 0.7441,
"eval_samples_per_second": 33.596,
"eval_steps_per_second": 9.407,
"num_input_tokens_seen": 593096,
"step": 912
},
{
"epoch": 16.05263157894737,
"grad_norm": 0.001349198049865663,
"learning_rate": 0.00011501676940464645,
"loss": 0.0002,
"num_input_tokens_seen": 594440,
"step": 915
},
{
"epoch": 16.140350877192983,
"grad_norm": 0.0005845442065037787,
"learning_rate": 0.00011017757237990877,
"loss": 0.0002,
"num_input_tokens_seen": 598440,
"step": 920
},
{
"epoch": 16.228070175438596,
"grad_norm": 0.007813757285475731,
"learning_rate": 0.00010542974530180327,
"loss": 0.0002,
"num_input_tokens_seen": 602152,
"step": 925
},
{
"epoch": 16.31578947368421,
"grad_norm": 0.001801942940801382,
"learning_rate": 0.00010077440100703683,
"loss": 0.0003,
"num_input_tokens_seen": 605736,
"step": 930
},
{
"epoch": 16.403508771929825,
"grad_norm": 0.00401125755161047,
"learning_rate": 9.621263065540364e-05,
"loss": 0.0002,
"num_input_tokens_seen": 608520,
"step": 935
},
{
"epoch": 16.49122807017544,
"grad_norm": 0.0013953890884295106,
"learning_rate": 9.174550347402855e-05,
"loss": 0.0001,
"num_input_tokens_seen": 612040,
"step": 940
},
{
"epoch": 16.57894736842105,
"grad_norm": 0.0009140498586930335,
"learning_rate": 8.737406650675333e-05,
"loss": 0.0002,
"num_input_tokens_seen": 614920,
"step": 945
},
{
"epoch": 16.666666666666668,
"grad_norm": 0.0005205461056903005,
"learning_rate": 8.309934436872074e-05,
"loss": 0.0001,
"num_input_tokens_seen": 618440,
"step": 950
},
{
"epoch": 16.75438596491228,
"grad_norm": 0.0019901059567928314,
"learning_rate": 7.89223390062172e-05,
"loss": 0.0006,
"num_input_tokens_seen": 621320,
"step": 955
},
{
"epoch": 16.842105263157894,
"grad_norm": 0.0006933985278010368,
"learning_rate": 7.4844029461827e-05,
"loss": 0.0001,
"num_input_tokens_seen": 624648,
"step": 960
},
{
"epoch": 16.92982456140351,
"grad_norm": 0.006941209081560373,
"learning_rate": 7.086537164495688e-05,
"loss": 0.0002,
"num_input_tokens_seen": 628232,
"step": 965
},
{
"epoch": 17.0,
"eval_loss": 0.37933316826820374,
"eval_runtime": 0.7447,
"eval_samples_per_second": 33.571,
"eval_steps_per_second": 9.4,
"num_input_tokens_seen": 629760,
"step": 969
},
{
"epoch": 17.017543859649123,
"grad_norm": 0.0028358723502606153,
"learning_rate": 6.698729810778065e-05,
"loss": 0.0002,
"num_input_tokens_seen": 630528,
"step": 970
},
{
"epoch": 17.105263157894736,
"grad_norm": 0.0003263541730120778,
"learning_rate": 6.321071782666077e-05,
"loss": 0.0002,
"num_input_tokens_seen": 633600,
"step": 975
},
{
"epoch": 17.19298245614035,
"grad_norm": 0.0036093771923333406,
"learning_rate": 5.953651598909332e-05,
"loss": 0.0002,
"num_input_tokens_seen": 637024,
"step": 980
},
{
"epoch": 17.280701754385966,
"grad_norm": 0.002093312796205282,
"learning_rate": 5.596555378623125e-05,
"loss": 0.0004,
"num_input_tokens_seen": 640160,
"step": 985
},
{
"epoch": 17.36842105263158,
"grad_norm": 0.002227015094831586,
"learning_rate": 5.2498668211030166e-05,
"loss": 0.0002,
"num_input_tokens_seen": 643040,
"step": 990
},
{
"epoch": 17.45614035087719,
"grad_norm": 0.001337834750302136,
"learning_rate": 4.913667186206722e-05,
"loss": 0.0001,
"num_input_tokens_seen": 646336,
"step": 995
},
{
"epoch": 17.54385964912281,
"grad_norm": 0.0036892376374453306,
"learning_rate": 4.588035275307689e-05,
"loss": 0.0002,
"num_input_tokens_seen": 649984,
"step": 1000
},
{
"epoch": 17.63157894736842,
"grad_norm": 0.0012848501792177558,
"learning_rate": 4.273047412824954e-05,
"loss": 0.0001,
"num_input_tokens_seen": 653632,
"step": 1005
},
{
"epoch": 17.719298245614034,
"grad_norm": 0.0038990762550383806,
"learning_rate": 3.9687774283335975e-05,
"loss": 0.0002,
"num_input_tokens_seen": 656960,
"step": 1010
},
{
"epoch": 17.80701754385965,
"grad_norm": 0.00040850613731890917,
"learning_rate": 3.675296639259912e-05,
"loss": 0.0003,
"num_input_tokens_seen": 659840,
"step": 1015
},
{
"epoch": 17.894736842105264,
"grad_norm": 0.01073791366070509,
"learning_rate": 3.392673834165388e-05,
"loss": 0.0002,
"num_input_tokens_seen": 663872,
"step": 1020
},
{
"epoch": 17.982456140350877,
"grad_norm": 0.004404230508953333,
"learning_rate": 3.120975256623465e-05,
"loss": 0.0002,
"num_input_tokens_seen": 667264,
"step": 1025
},
{
"epoch": 18.0,
"eval_loss": 0.38870540261268616,
"eval_runtime": 0.7479,
"eval_samples_per_second": 33.429,
"eval_steps_per_second": 9.36,
"num_input_tokens_seen": 667432,
"step": 1026
},
{
"epoch": 18.07017543859649,
"grad_norm": 0.0011177717242389917,
"learning_rate": 2.8602645896928293e-05,
"loss": 0.0002,
"num_input_tokens_seen": 669544,
"step": 1030
},
{
"epoch": 18.157894736842106,
"grad_norm": 0.0034124741796404123,
"learning_rate": 2.610602940990797e-05,
"loss": 0.0003,
"num_input_tokens_seen": 672776,
"step": 1035
},
{
"epoch": 18.24561403508772,
"grad_norm": 0.004636778496205807,
"learning_rate": 2.3720488283703547e-05,
"loss": 0.0003,
"num_input_tokens_seen": 676008,
"step": 1040
},
{
"epoch": 18.333333333333332,
"grad_norm": 0.002139877527952194,
"learning_rate": 2.144658166204294e-05,
"loss": 0.0002,
"num_input_tokens_seen": 679848,
"step": 1045
},
{
"epoch": 18.42105263157895,
"grad_norm": 0.0005989308701828122,
"learning_rate": 1.9284842522794942e-05,
"loss": 0.0001,
"num_input_tokens_seen": 683144,
"step": 1050
},
{
"epoch": 18.50877192982456,
"grad_norm": 0.0007390376995317638,
"learning_rate": 1.7235777553045283e-05,
"loss": 0.0003,
"num_input_tokens_seen": 686280,
"step": 1055
},
{
"epoch": 18.596491228070175,
"grad_norm": 0.0010978280333802104,
"learning_rate": 1.5299867030334813e-05,
"loss": 0.0002,
"num_input_tokens_seen": 689480,
"step": 1060
},
{
"epoch": 18.68421052631579,
"grad_norm": 0.0012614470906555653,
"learning_rate": 1.3477564710088097e-05,
"loss": 0.0001,
"num_input_tokens_seen": 692296,
"step": 1065
},
{
"epoch": 18.771929824561404,
"grad_norm": 0.0010993927717208862,
"learning_rate": 1.1769297719258221e-05,
"loss": 0.0002,
"num_input_tokens_seen": 695912,
"step": 1070
},
{
"epoch": 18.859649122807017,
"grad_norm": 0.0016077933833003044,
"learning_rate": 1.0175466456213034e-05,
"loss": 0.0002,
"num_input_tokens_seen": 699432,
"step": 1075
},
{
"epoch": 18.94736842105263,
"grad_norm": 0.013521168380975723,
"learning_rate": 8.696444496886502e-06,
"loss": 0.0003,
"num_input_tokens_seen": 703080,
"step": 1080
},
{
"epoch": 19.0,
"eval_loss": 0.39144256711006165,
"eval_runtime": 0.7477,
"eval_samples_per_second": 33.437,
"eval_steps_per_second": 9.362,
"num_input_tokens_seen": 704816,
"step": 1083
},
{
"epoch": 19.035087719298247,
"grad_norm": 0.004061270505189896,
"learning_rate": 7.332578507216469e-06,
"loss": 0.0003,
"num_input_tokens_seen": 706192,
"step": 1085
},
{
"epoch": 19.12280701754386,
"grad_norm": 0.005511495750397444,
"learning_rate": 6.084188161890325e-06,
"loss": 0.0004,
"num_input_tokens_seen": 709072,
"step": 1090
},
{
"epoch": 19.210526315789473,
"grad_norm": 0.0010893200524151325,
"learning_rate": 4.95156606941688e-06,
"loss": 0.0001,
"num_input_tokens_seen": 712496,
"step": 1095
},
{
"epoch": 19.29824561403509,
"grad_norm": 0.0010816626017913222,
"learning_rate": 3.9349777035421194e-06,
"loss": 0.0002,
"num_input_tokens_seen": 715056,
"step": 1100
},
{
"epoch": 19.385964912280702,
"grad_norm": 0.00045030307956039906,
"learning_rate": 3.034661341025258e-06,
"loss": 0.0002,
"num_input_tokens_seen": 718416,
"step": 1105
},
{
"epoch": 19.473684210526315,
"grad_norm": 0.0028827146161347628,
"learning_rate": 2.250828005789518e-06,
"loss": 0.0002,
"num_input_tokens_seen": 721744,
"step": 1110
},
{
"epoch": 19.56140350877193,
"grad_norm": 0.001782647450454533,
"learning_rate": 1.5836614194602028e-06,
"loss": 0.0001,
"num_input_tokens_seen": 725328,
"step": 1115
},
{
"epoch": 19.649122807017545,
"grad_norm": 0.0003462164895609021,
"learning_rate": 1.033317958302693e-06,
"loss": 0.0002,
"num_input_tokens_seen": 728976,
"step": 1120
},
{
"epoch": 19.736842105263158,
"grad_norm": 0.010955123230814934,
"learning_rate": 5.999266165694906e-07,
"loss": 0.0003,
"num_input_tokens_seen": 732368,
"step": 1125
},
{
"epoch": 19.82456140350877,
"grad_norm": 0.001071834471076727,
"learning_rate": 2.8358897626556966e-07,
"loss": 0.0002,
"num_input_tokens_seen": 735440,
"step": 1130
},
{
"epoch": 19.912280701754387,
"grad_norm": 0.0024311819579452276,
"learning_rate": 8.437918333864537e-08,
"loss": 0.0002,
"num_input_tokens_seen": 739120,
"step": 1135
},
{
"epoch": 20.0,
"grad_norm": 0.0004392106784507632,
"learning_rate": 2.343930299963937e-09,
"loss": 0.0001,
"num_input_tokens_seen": 742296,
"step": 1140
},
{
"epoch": 20.0,
"eval_loss": 0.39334043860435486,
"eval_runtime": 0.7462,
"eval_samples_per_second": 33.501,
"eval_steps_per_second": 9.38,
"num_input_tokens_seen": 742296,
"step": 1140
},
{
"epoch": 20.0,
"num_input_tokens_seen": 742296,
"step": 1140,
"total_flos": 3.342525338596147e+16,
"train_loss": 0.11720111153829892,
"train_runtime": 292.3442,
"train_samples_per_second": 15.393,
"train_steps_per_second": 3.9
}
],
"logging_steps": 5,
"max_steps": 1140,
"num_input_tokens_seen": 742296,
"num_train_epochs": 20,
"save_steps": 57,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.342525338596147e+16,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}