RL4Decomp_epoch2 / trainer_state.json
rrvaswin's picture
Upload folder using huggingface_hub
83d0c7e verified
Invalid JSON: Unexpected token 'N', ..."al_loss": NaN, "... is not valid JSON
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 100,
"global_step": 326,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.030745580322828592,
"grad_norm": 6.350613102744608,
"learning_rate": 1.0204081632653061e-07,
"loss": 6.5284,
"step": 5
},
{
"epoch": 0.061491160645657184,
"grad_norm": 5.249984394687515,
"learning_rate": 2.0408163265306121e-07,
"loss": 6.4945,
"step": 10
},
{
"epoch": 0.09223674096848578,
"grad_norm": 5.352306724137368,
"learning_rate": 3.0612244897959183e-07,
"loss": 6.6828,
"step": 15
},
{
"epoch": 0.12298232129131437,
"grad_norm": 5.486794769339902,
"learning_rate": 4.0816326530612243e-07,
"loss": 6.467,
"step": 20
},
{
"epoch": 0.15372790161414296,
"grad_norm": 4.077117426584361,
"learning_rate": 5.10204081632653e-07,
"loss": 6.1826,
"step": 25
},
{
"epoch": 0.18447348193697155,
"grad_norm": 3.119167144675083,
"learning_rate": 6.122448979591837e-07,
"loss": 5.949,
"step": 30
},
{
"epoch": 0.21521906225980014,
"grad_norm": 2.6950432819564356,
"learning_rate": 7.142857142857143e-07,
"loss": 5.4738,
"step": 35
},
{
"epoch": 0.24596464258262873,
"grad_norm": 1.5382799926978608,
"learning_rate": 8.163265306122449e-07,
"loss": 5.2285,
"step": 40
},
{
"epoch": 0.2767102229054573,
"grad_norm": 1.2825119089431523,
"learning_rate": 9.183673469387755e-07,
"loss": 5.0174,
"step": 45
},
{
"epoch": 0.3074558032282859,
"grad_norm": 1.14912166210836,
"learning_rate": 9.99987079628245e-07,
"loss": 4.7716,
"step": 50
},
{
"epoch": 0.3382013835511145,
"grad_norm": 1.285686948508955,
"learning_rate": 9.995349367260807e-07,
"loss": 4.5585,
"step": 55
},
{
"epoch": 0.3689469638739431,
"grad_norm": 1.054957564577766,
"learning_rate": 9.984374428250894e-07,
"loss": 4.3855,
"step": 60
},
{
"epoch": 0.3996925441967717,
"grad_norm": 0.9925990274913604,
"learning_rate": 9.966960157816278e-07,
"loss": 4.1541,
"step": 65
},
{
"epoch": 0.4304381245196003,
"grad_norm": 0.8744404881743455,
"learning_rate": 9.943129053516174e-07,
"loss": 4.1257,
"step": 70
},
{
"epoch": 0.4611837048424289,
"grad_norm": 0.7463667798296578,
"learning_rate": 9.91291190284077e-07,
"loss": 3.8852,
"step": 75
},
{
"epoch": 0.49192928516525747,
"grad_norm": 0.7902691532243193,
"learning_rate": 9.876347743436758e-07,
"loss": 3.8846,
"step": 80
},
{
"epoch": 0.5226748654880861,
"grad_norm": 0.926879835692504,
"learning_rate": 9.833483812674452e-07,
"loss": 3.9166,
"step": 85
},
{
"epoch": 0.5534204458109147,
"grad_norm": 0.803407155130827,
"learning_rate": 9.784375486621668e-07,
"loss": 3.7194,
"step": 90
},
{
"epoch": 0.5841660261337432,
"grad_norm": 0.8117888501469026,
"learning_rate": 9.729086208503173e-07,
"loss": 3.7475,
"step": 95
},
{
"epoch": 0.6149116064565718,
"grad_norm": 0.7821755674358282,
"learning_rate": 9.66768740673815e-07,
"loss": 3.6407,
"step": 100
},
{
"epoch": 0.6149116064565718,
"eval_loss": NaN,
"eval_runtime": 349.4965,
"eval_samples_per_second": 15.285,
"eval_steps_per_second": 0.956,
"step": 100
},
{
"epoch": 0.6456571867794004,
"grad_norm": 0.6822137175921249,
"learning_rate": 9.600258402661569e-07,
"loss": 3.6531,
"step": 105
},
{
"epoch": 0.676402767102229,
"grad_norm": 0.7500359822402426,
"learning_rate": 9.526886308048668e-07,
"loss": 3.6282,
"step": 110
},
{
"epoch": 0.7071483474250576,
"grad_norm": 0.8622924385168957,
"learning_rate": 9.447665912574929e-07,
"loss": 3.5622,
"step": 115
},
{
"epoch": 0.7378939277478862,
"grad_norm": 0.7766220833707687,
"learning_rate": 9.362699561356956e-07,
"loss": 3.4787,
"step": 120
},
{
"epoch": 0.7686395080707148,
"grad_norm": 0.7590912417940764,
"learning_rate": 9.272097022732443e-07,
"loss": 3.4078,
"step": 125
},
{
"epoch": 0.7993850883935434,
"grad_norm": 0.822433668441714,
"learning_rate": 9.175975346450062e-07,
"loss": 3.3673,
"step": 130
},
{
"epoch": 0.830130668716372,
"grad_norm": 0.7129904612641743,
"learning_rate": 9.074458712452475e-07,
"loss": 3.4122,
"step": 135
},
{
"epoch": 0.8608762490392006,
"grad_norm": 0.8733565752917316,
"learning_rate": 8.967678270447798e-07,
"loss": 3.3817,
"step": 140
},
{
"epoch": 0.8916218293620292,
"grad_norm": 0.7851319189409072,
"learning_rate": 8.855771970476833e-07,
"loss": 3.2653,
"step": 145
},
{
"epoch": 0.9223674096848578,
"grad_norm": 0.8962287259777225,
"learning_rate": 8.738884384694905e-07,
"loss": 3.3029,
"step": 150
},
{
"epoch": 0.9531129900076863,
"grad_norm": 0.9466863901587774,
"learning_rate": 8.617166520598562e-07,
"loss": 3.2964,
"step": 155
},
{
"epoch": 0.9838585703305149,
"grad_norm": 0.8018084383307482,
"learning_rate": 8.490775625938451e-07,
"loss": 3.2516,
"step": 160
},
{
"epoch": 1.0122982321291314,
"grad_norm": 0.7973445057492382,
"learning_rate": 8.359874985570377e-07,
"loss": 3.0663,
"step": 165
},
{
"epoch": 1.04304381245196,
"grad_norm": 0.901771344389446,
"learning_rate": 8.224633710506997e-07,
"loss": 3.2066,
"step": 170
},
{
"epoch": 1.0737893927747886,
"grad_norm": 0.7790569731296355,
"learning_rate": 8.085226519442697e-07,
"loss": 3.1987,
"step": 175
},
{
"epoch": 1.1045349730976173,
"grad_norm": 0.8027091819730047,
"learning_rate": 7.941833513033872e-07,
"loss": 3.1081,
"step": 180
},
{
"epoch": 1.1352805534204458,
"grad_norm": 0.836190595252302,
"learning_rate": 7.794639941226237e-07,
"loss": 3.2306,
"step": 185
},
{
"epoch": 1.1660261337432745,
"grad_norm": 0.8184681552542381,
"learning_rate": 7.643835963929746e-07,
"loss": 3.071,
"step": 190
},
{
"epoch": 1.196771714066103,
"grad_norm": 0.7396008036718764,
"learning_rate": 7.489616405350318e-07,
"loss": 3.1746,
"step": 195
},
{
"epoch": 1.2275172943889316,
"grad_norm": 0.8695385291013205,
"learning_rate": 7.332180502295728e-07,
"loss": 3.1306,
"step": 200
},
{
"epoch": 1.2275172943889316,
"eval_loss": NaN,
"eval_runtime": 344.6921,
"eval_samples_per_second": 15.498,
"eval_steps_per_second": 0.969,
"step": 200
},
{
"epoch": 1.2582628747117601,
"grad_norm": 0.9685883470665286,
"learning_rate": 7.171731646780867e-07,
"loss": 3.0465,
"step": 205
},
{
"epoch": 1.2890084550345888,
"grad_norm": 0.9706097655856817,
"learning_rate": 7.008477123264847e-07,
"loss": 2.9686,
"step": 210
},
{
"epoch": 1.3197540353574173,
"grad_norm": 0.8219747020280328,
"learning_rate": 6.84262784085946e-07,
"loss": 3.0241,
"step": 215
},
{
"epoch": 1.350499615680246,
"grad_norm": 0.960997631829513,
"learning_rate": 6.67439806085493e-07,
"loss": 3.0101,
"step": 220
},
{
"epoch": 1.3812451960030745,
"grad_norm": 0.9241788986351824,
"learning_rate": 6.504005119914975e-07,
"loss": 2.9648,
"step": 225
},
{
"epoch": 1.4119907763259032,
"grad_norm": 0.8461980858410534,
"learning_rate": 6.33166914929878e-07,
"loss": 3.0516,
"step": 230
},
{
"epoch": 1.4427363566487317,
"grad_norm": 0.788283378335606,
"learning_rate": 6.157612790472625e-07,
"loss": 3.0047,
"step": 235
},
{
"epoch": 1.4734819369715604,
"grad_norm": 0.7867127269455163,
"learning_rate": 5.982060907478567e-07,
"loss": 2.9992,
"step": 240
},
{
"epoch": 1.5042275172943889,
"grad_norm": 0.9174037687493481,
"learning_rate": 5.805240296431765e-07,
"loss": 2.9248,
"step": 245
},
{
"epoch": 1.5349730976172176,
"grad_norm": 1.0136136957988287,
"learning_rate": 5.627379392521757e-07,
"loss": 3.1472,
"step": 250
},
{
"epoch": 1.5657186779400463,
"grad_norm": 0.7666439492867059,
"learning_rate": 5.448707974896213e-07,
"loss": 2.8998,
"step": 255
},
{
"epoch": 1.5964642582628747,
"grad_norm": 0.866265723726005,
"learning_rate": 5.269456869808408e-07,
"loss": 3.0281,
"step": 260
},
{
"epoch": 1.6272098385857032,
"grad_norm": 0.9455130272746435,
"learning_rate": 5.08985765241196e-07,
"loss": 3.0122,
"step": 265
},
{
"epoch": 1.657955418908532,
"grad_norm": 0.8976450298583974,
"learning_rate": 4.910142347588041e-07,
"loss": 2.9808,
"step": 270
},
{
"epoch": 1.6887009992313606,
"grad_norm": 0.7984117514993636,
"learning_rate": 4.7305431301915935e-07,
"loss": 2.9423,
"step": 275
},
{
"epoch": 1.7194465795541891,
"grad_norm": 0.9717048570700036,
"learning_rate": 4.5512920251037885e-07,
"loss": 2.9908,
"step": 280
},
{
"epoch": 1.7501921598770176,
"grad_norm": 0.9953832113865739,
"learning_rate": 4.372620607478241e-07,
"loss": 2.9601,
"step": 285
},
{
"epoch": 1.7809377401998463,
"grad_norm": 0.8785516378851284,
"learning_rate": 4.1947597035682347e-07,
"loss": 2.9295,
"step": 290
},
{
"epoch": 1.811683320522675,
"grad_norm": 1.0560496189183655,
"learning_rate": 4.0179390925214333e-07,
"loss": 2.9191,
"step": 295
},
{
"epoch": 1.8424289008455035,
"grad_norm": 0.9516031356529069,
"learning_rate": 3.8423872095273733e-07,
"loss": 2.9151,
"step": 300
},
{
"epoch": 1.8424289008455035,
"eval_loss": NaN,
"eval_runtime": 344.0766,
"eval_samples_per_second": 15.526,
"eval_steps_per_second": 0.971,
"step": 300
},
{
"epoch": 1.873174481168332,
"grad_norm": 0.909396889323808,
"learning_rate": 3.668330850701219e-07,
"loss": 2.9435,
"step": 305
},
{
"epoch": 1.9039200614911607,
"grad_norm": 0.9293115834232327,
"learning_rate": 3.4959948800850247e-07,
"loss": 2.9465,
"step": 310
},
{
"epoch": 1.9346656418139894,
"grad_norm": 1.067559496517317,
"learning_rate": 3.325601939145069e-07,
"loss": 2.9686,
"step": 315
},
{
"epoch": 1.9654112221368178,
"grad_norm": 1.1704277043479576,
"learning_rate": 3.15737215914054e-07,
"loss": 3.022,
"step": 320
},
{
"epoch": 1.9961568024596463,
"grad_norm": 0.9427205447892176,
"learning_rate": 2.9915228767351535e-07,
"loss": 2.9455,
"step": 325
}
],
"logging_steps": 5,
"max_steps": 486,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 102476595888128.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}