tmplrtest1 / trainer_state.json
besimray's picture
Upload task output tmplrtest
9b9e86d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.991304347826087,
"eval_steps": 500,
"global_step": 458,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.021739130434782608,
"grad_norm": 0.2065464166118803,
"learning_rate": 9.237540571428572e-06,
"loss": 1.3456,
"step": 5
},
{
"epoch": 0.043478260869565216,
"grad_norm": 0.1480933204239956,
"learning_rate": 2.0784466285714287e-05,
"loss": 1.2885,
"step": 10
},
{
"epoch": 0.06521739130434782,
"grad_norm": 0.150974056451755,
"learning_rate": 3.2331392000000005e-05,
"loss": 1.3042,
"step": 15
},
{
"epoch": 0.08695652173913043,
"grad_norm": 0.17178907051327394,
"learning_rate": 4.3878317714285716e-05,
"loss": 1.2698,
"step": 20
},
{
"epoch": 0.10869565217391304,
"grad_norm": 0.1565313504705507,
"learning_rate": 5.542524342857144e-05,
"loss": 1.3194,
"step": 25
},
{
"epoch": 0.13043478260869565,
"grad_norm": 0.16154165023636324,
"learning_rate": 6.697216914285716e-05,
"loss": 1.3126,
"step": 30
},
{
"epoch": 0.15217391304347827,
"grad_norm": 0.1508862078278226,
"learning_rate": 7.851909485714286e-05,
"loss": 1.2444,
"step": 35
},
{
"epoch": 0.17391304347826086,
"grad_norm": 0.13942726185321958,
"learning_rate": 8.082290185896143e-05,
"loss": 1.359,
"step": 40
},
{
"epoch": 0.1956521739130435,
"grad_norm": 0.16794065891151863,
"learning_rate": 8.08002441797129e-05,
"loss": 1.2772,
"step": 45
},
{
"epoch": 0.21739130434782608,
"grad_norm": 0.13583828632003314,
"learning_rate": 8.076017134863456e-05,
"loss": 1.3199,
"step": 50
},
{
"epoch": 0.2391304347826087,
"grad_norm": 0.12058729414284189,
"learning_rate": 8.07027064112386e-05,
"loss": 1.1545,
"step": 55
},
{
"epoch": 0.2608695652173913,
"grad_norm": 0.1580929470393744,
"learning_rate": 8.062788241507575e-05,
"loss": 1.2062,
"step": 60
},
{
"epoch": 0.2826086956521739,
"grad_norm": 0.14344490834313062,
"learning_rate": 8.053574239072986e-05,
"loss": 1.241,
"step": 65
},
{
"epoch": 0.30434782608695654,
"grad_norm": 0.15510628341722088,
"learning_rate": 8.042633932707147e-05,
"loss": 1.2569,
"step": 70
},
{
"epoch": 0.32608695652173914,
"grad_norm": 0.15855414118402722,
"learning_rate": 8.029973614078431e-05,
"loss": 1.323,
"step": 75
},
{
"epoch": 0.34782608695652173,
"grad_norm": 0.17952542628856666,
"learning_rate": 8.015600564018263e-05,
"loss": 1.4028,
"step": 80
},
{
"epoch": 0.3695652173913043,
"grad_norm": 0.15172040801101977,
"learning_rate": 7.999523048333973e-05,
"loss": 1.3163,
"step": 85
},
{
"epoch": 0.391304347826087,
"grad_norm": 0.15965559075617186,
"learning_rate": 7.981750313055204e-05,
"loss": 1.2999,
"step": 90
},
{
"epoch": 0.41304347826086957,
"grad_norm": 0.15306603709633343,
"learning_rate": 7.962292579116623e-05,
"loss": 1.3225,
"step": 95
},
{
"epoch": 0.43478260869565216,
"grad_norm": 0.1389965548962345,
"learning_rate": 7.941161036479934e-05,
"loss": 1.2768,
"step": 100
},
{
"epoch": 0.45652173913043476,
"grad_norm": 0.16277701740229086,
"learning_rate": 7.918367837698652e-05,
"loss": 1.2641,
"step": 105
},
{
"epoch": 0.4782608695652174,
"grad_norm": 2.167084486878609,
"learning_rate": 7.893926090929274e-05,
"loss": 1.2767,
"step": 110
},
{
"epoch": 0.5,
"grad_norm": 0.18721740530412184,
"learning_rate": 7.867849852392907e-05,
"loss": 1.3353,
"step": 115
},
{
"epoch": 0.5217391304347826,
"grad_norm": 0.1595853671566002,
"learning_rate": 7.840154118291658e-05,
"loss": 1.3192,
"step": 120
},
{
"epoch": 0.5434782608695652,
"grad_norm": 0.15538820768411876,
"learning_rate": 7.810854816184464e-05,
"loss": 1.3333,
"step": 125
},
{
"epoch": 0.5652173913043478,
"grad_norm": 0.16998123505161036,
"learning_rate": 7.779968795827297e-05,
"loss": 1.3044,
"step": 130
},
{
"epoch": 0.5869565217391305,
"grad_norm": 0.15621775760411818,
"learning_rate": 7.747513819483031e-05,
"loss": 1.3067,
"step": 135
},
{
"epoch": 0.6086956521739131,
"grad_norm": 0.16293023551932742,
"learning_rate": 7.713508551706524e-05,
"loss": 1.4349,
"step": 140
},
{
"epoch": 0.6304347826086957,
"grad_norm": 0.16734455190765907,
"learning_rate": 7.677972548610812e-05,
"loss": 1.31,
"step": 145
},
{
"epoch": 0.6521739130434783,
"grad_norm": 0.15739717348744878,
"learning_rate": 7.640926246620566e-05,
"loss": 1.2386,
"step": 150
},
{
"epoch": 0.6739130434782609,
"grad_norm": 0.1931909414435857,
"learning_rate": 7.602390950719296e-05,
"loss": 1.2572,
"step": 155
},
{
"epoch": 0.6956521739130435,
"grad_norm": 0.15051641328484738,
"learning_rate": 7.562388822197052e-05,
"loss": 1.2769,
"step": 160
},
{
"epoch": 0.717391304347826,
"grad_norm": 0.16776500409127318,
"learning_rate": 7.520942865905675e-05,
"loss": 1.2971,
"step": 165
},
{
"epoch": 0.7391304347826086,
"grad_norm": 0.1558669075485943,
"learning_rate": 7.478076917028921e-05,
"loss": 1.2169,
"step": 170
},
{
"epoch": 0.7608695652173914,
"grad_norm": 0.16119001773393687,
"learning_rate": 7.433815627375065e-05,
"loss": 1.3132,
"step": 175
},
{
"epoch": 0.782608695652174,
"grad_norm": 0.15949576336588536,
"learning_rate": 7.388184451199874e-05,
"loss": 1.2098,
"step": 180
},
{
"epoch": 0.8043478260869565,
"grad_norm": 0.16762909113707056,
"learning_rate": 7.341209630568113e-05,
"loss": 1.2119,
"step": 185
},
{
"epoch": 0.8260869565217391,
"grad_norm": 0.15938491024669238,
"learning_rate": 7.292918180261961e-05,
"loss": 1.214,
"step": 190
},
{
"epoch": 0.8478260869565217,
"grad_norm": 0.1565363254612612,
"learning_rate": 7.243337872245076e-05,
"loss": 1.3229,
"step": 195
},
{
"epoch": 0.8695652173913043,
"grad_norm": 0.1837054421065538,
"learning_rate": 7.192497219691187e-05,
"loss": 1.2572,
"step": 200
},
{
"epoch": 0.8913043478260869,
"grad_norm": 0.17510726258023546,
"learning_rate": 7.14042546058644e-05,
"loss": 1.2429,
"step": 205
},
{
"epoch": 0.9130434782608695,
"grad_norm": 0.17918443573588427,
"learning_rate": 7.087152540914899e-05,
"loss": 1.27,
"step": 210
},
{
"epoch": 0.9347826086956522,
"grad_norm": 0.16181464986135835,
"learning_rate": 7.032709097436893e-05,
"loss": 1.3244,
"step": 215
},
{
"epoch": 0.9565217391304348,
"grad_norm": 0.18983732250278182,
"learning_rate": 6.977126440070099e-05,
"loss": 1.2607,
"step": 220
},
{
"epoch": 0.9782608695652174,
"grad_norm": 0.16413532578219092,
"learning_rate": 6.9204365338835e-05,
"loss": 1.3419,
"step": 225
},
{
"epoch": 0.9956521739130435,
"eval_loss": 1.2522636651992798,
"eval_runtime": 88.1417,
"eval_samples_per_second": 0.783,
"eval_steps_per_second": 0.102,
"step": 229
},
{
"epoch": 1.0,
"grad_norm": 0.46992013172628483,
"learning_rate": 6.862671980714567e-05,
"loss": 1.2573,
"step": 230
},
{
"epoch": 1.0217391304347827,
"grad_norm": 0.17964246635807615,
"learning_rate": 6.803866000420255e-05,
"loss": 1.1699,
"step": 235
},
{
"epoch": 1.0434782608695652,
"grad_norm": 0.17711791749777692,
"learning_rate": 6.744052411772561e-05,
"loss": 1.1813,
"step": 240
},
{
"epoch": 1.065217391304348,
"grad_norm": 0.20800740462129375,
"learning_rate": 6.683265613009676e-05,
"loss": 1.2145,
"step": 245
},
{
"epoch": 1.0869565217391304,
"grad_norm": 0.1790435581426176,
"learning_rate": 6.621540562053868e-05,
"loss": 1.1687,
"step": 250
},
{
"epoch": 1.108695652173913,
"grad_norm": 0.19206023313050768,
"learning_rate": 6.558912756407504e-05,
"loss": 1.1168,
"step": 255
},
{
"epoch": 1.1304347826086956,
"grad_norm": 0.19351056033989472,
"learning_rate": 6.495418212738781e-05,
"loss": 1.1031,
"step": 260
},
{
"epoch": 1.1521739130434783,
"grad_norm": 0.1929035962432216,
"learning_rate": 6.431093446168862e-05,
"loss": 1.1046,
"step": 265
},
{
"epoch": 1.1739130434782608,
"grad_norm": 0.18031873301953555,
"learning_rate": 6.365975449272376e-05,
"loss": 1.0313,
"step": 270
},
{
"epoch": 1.1956521739130435,
"grad_norm": 0.19072557482852942,
"learning_rate": 6.300101670803327e-05,
"loss": 1.0155,
"step": 275
},
{
"epoch": 1.2173913043478262,
"grad_norm": 0.19222722891199284,
"learning_rate": 6.233509994158672e-05,
"loss": 1.0188,
"step": 280
},
{
"epoch": 1.2391304347826086,
"grad_norm": 0.1977105629184088,
"learning_rate": 6.166238715591927e-05,
"loss": 1.149,
"step": 285
},
{
"epoch": 1.2608695652173914,
"grad_norm": 0.17886902159518417,
"learning_rate": 6.0983265221893474e-05,
"loss": 1.0339,
"step": 290
},
{
"epoch": 1.2826086956521738,
"grad_norm": 0.1938241832022054,
"learning_rate": 6.0298124696213476e-05,
"loss": 1.0982,
"step": 295
},
{
"epoch": 1.3043478260869565,
"grad_norm": 0.19758944910159232,
"learning_rate": 5.960735959681945e-05,
"loss": 1.123,
"step": 300
},
{
"epoch": 1.3260869565217392,
"grad_norm": 0.1877646524241858,
"learning_rate": 5.891136717629159e-05,
"loss": 1.0153,
"step": 305
},
{
"epoch": 1.3478260869565217,
"grad_norm": 0.2053796629015727,
"learning_rate": 5.82105476933938e-05,
"loss": 1.1374,
"step": 310
},
{
"epoch": 1.3695652173913042,
"grad_norm": 0.20656589720047988,
"learning_rate": 5.7505304182888675e-05,
"loss": 1.0031,
"step": 315
},
{
"epoch": 1.391304347826087,
"grad_norm": 0.20517612870523771,
"learning_rate": 5.6796042223755874e-05,
"loss": 1.1024,
"step": 320
},
{
"epoch": 1.4130434782608696,
"grad_norm": 0.2059235805386809,
"learning_rate": 5.608316970594749e-05,
"loss": 1.071,
"step": 325
},
{
"epoch": 1.434782608695652,
"grad_norm": 0.20924273619816075,
"learning_rate": 5.5367096595814274e-05,
"loss": 1.1296,
"step": 330
},
{
"epoch": 1.4565217391304348,
"grad_norm": 0.19136863234304072,
"learning_rate": 5.464823470033789e-05,
"loss": 1.0511,
"step": 335
},
{
"epoch": 1.4782608695652173,
"grad_norm": 0.20700709646972654,
"learning_rate": 5.392699743030447e-05,
"loss": 1.1051,
"step": 340
},
{
"epoch": 1.5,
"grad_norm": 0.20931900716899277,
"learning_rate": 5.320379956255593e-05,
"loss": 1.0226,
"step": 345
},
{
"epoch": 1.5217391304347827,
"grad_norm": 0.20298044586118655,
"learning_rate": 5.247905700145572e-05,
"loss": 1.0848,
"step": 350
},
{
"epoch": 1.5434782608695652,
"grad_norm": 0.20694877907868348,
"learning_rate": 5.175318653970605e-05,
"loss": 1.095,
"step": 355
},
{
"epoch": 1.5652173913043477,
"grad_norm": 0.19516755215038753,
"learning_rate": 5.102660561865429e-05,
"loss": 1.0546,
"step": 360
},
{
"epoch": 1.5869565217391304,
"grad_norm": 0.1964975411046726,
"learning_rate": 5.029973208822634e-05,
"loss": 1.0689,
"step": 365
},
{
"epoch": 1.608695652173913,
"grad_norm": 0.2056348548121518,
"learning_rate": 4.9572983966625056e-05,
"loss": 1.087,
"step": 370
},
{
"epoch": 1.6304347826086958,
"grad_norm": 0.20756916326417496,
"learning_rate": 4.8846779199931804e-05,
"loss": 1.0973,
"step": 375
},
{
"epoch": 1.6521739130434783,
"grad_norm": 0.2201415808276959,
"learning_rate": 4.812153542174962e-05,
"loss": 1.0988,
"step": 380
},
{
"epoch": 1.6739130434782608,
"grad_norm": 0.2029740340446862,
"learning_rate": 4.7397669713025996e-05,
"loss": 1.0824,
"step": 385
},
{
"epoch": 1.6956521739130435,
"grad_norm": 0.21082370579952373,
"learning_rate": 4.667559836219353e-05,
"loss": 1.0526,
"step": 390
},
{
"epoch": 1.7173913043478262,
"grad_norm": 0.22827766879708888,
"learning_rate": 4.595573662576621e-05,
"loss": 1.153,
"step": 395
},
{
"epoch": 1.7391304347826086,
"grad_norm": 0.21594690562138477,
"learning_rate": 4.5238498489529416e-05,
"loss": 1.0416,
"step": 400
},
{
"epoch": 1.7608695652173914,
"grad_norm": 0.2234495033400394,
"learning_rate": 4.452429643046047e-05,
"loss": 1.1061,
"step": 405
},
{
"epoch": 1.7826086956521738,
"grad_norm": 0.20429899070533641,
"learning_rate": 4.381354117951693e-05,
"loss": 1.0673,
"step": 410
},
{
"epoch": 1.8043478260869565,
"grad_norm": 0.20175980259236084,
"learning_rate": 4.310664148542911e-05,
"loss": 1.1085,
"step": 415
},
{
"epoch": 1.8260869565217392,
"grad_norm": 0.22972932269987778,
"learning_rate": 4.2404003879632465e-05,
"loss": 1.1106,
"step": 420
},
{
"epoch": 1.8478260869565217,
"grad_norm": 0.2069219549317397,
"learning_rate": 4.170603244247511e-05,
"loss": 1.0134,
"step": 425
},
{
"epoch": 1.8695652173913042,
"grad_norm": 0.28862471197127954,
"learning_rate": 4.101312857083501e-05,
"loss": 1.062,
"step": 430
},
{
"epoch": 1.891304347826087,
"grad_norm": 0.22289565470086473,
"learning_rate": 4.032569074728038e-05,
"loss": 1.2017,
"step": 435
},
{
"epoch": 1.9130434782608696,
"grad_norm": 0.24443473389994466,
"learning_rate": 3.964411431090602e-05,
"loss": 1.1038,
"step": 440
},
{
"epoch": 1.9347826086956523,
"grad_norm": 0.22270022150824992,
"learning_rate": 3.896879122997752e-05,
"loss": 1.0885,
"step": 445
},
{
"epoch": 1.9565217391304348,
"grad_norm": 0.2148088888391178,
"learning_rate": 3.8300109876513964e-05,
"loss": 1.1251,
"step": 450
},
{
"epoch": 1.9782608695652173,
"grad_norm": 0.21318145678486677,
"learning_rate": 3.763845480293877e-05,
"loss": 1.1119,
"step": 455
},
{
"epoch": 1.991304347826087,
"eval_loss": 1.2423216104507446,
"eval_runtime": 88.1188,
"eval_samples_per_second": 0.783,
"eval_steps_per_second": 0.102,
"step": 458
}
],
"logging_steps": 5,
"max_steps": 690,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 475053447708672.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}