python_and_text_gpt2 / trainer_state.json
gbemilekeonilude's picture
End of training
028cb8c verified
Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN, "... is not valid JSON
{
"best_metric": 1.6996692419052124,
"best_model_checkpoint": "./python_and_text_gpt2/checkpoint-200",
"epoch": 3.0,
"eval_steps": 50,
"global_step": 237,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_accuracy": 0.11267605633802817,
"eval_loss": 8.402484893798828,
"eval_runtime": 1.1735,
"eval_samples_per_second": 60.505,
"eval_steps_per_second": 7.67,
"num_input_tokens_seen": 0,
"step": 0
},
{
"epoch": 0.012658227848101266,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 9.7173,
"num_input_tokens_seen": 8192,
"step": 1
},
{
"epoch": 0.06329113924050633,
"grad_norm": Infinity,
"learning_rate": 0.0,
"loss": 7.5816,
"num_input_tokens_seen": 40960,
"step": 5
},
{
"epoch": 0.12658227848101267,
"grad_norm": Infinity,
"learning_rate": 2.5e-06,
"loss": 10.0592,
"num_input_tokens_seen": 81920,
"step": 10
},
{
"epoch": 0.189873417721519,
"grad_norm": 218.1217803955078,
"learning_rate": 1.5000000000000002e-05,
"loss": 8.4642,
"num_input_tokens_seen": 122880,
"step": 15
},
{
"epoch": 0.25316455696202533,
"grad_norm": 296.8869934082031,
"learning_rate": 1.999153201672344e-05,
"loss": 5.9304,
"num_input_tokens_seen": 163840,
"step": 20
},
{
"epoch": 0.31645569620253167,
"grad_norm": 61.51658630371094,
"learning_rate": 1.9939835156657616e-05,
"loss": 2.9233,
"num_input_tokens_seen": 204800,
"step": 25
},
{
"epoch": 0.379746835443038,
"grad_norm": 40.88030242919922,
"learning_rate": 1.9841388720031727e-05,
"loss": 2.6465,
"num_input_tokens_seen": 245760,
"step": 30
},
{
"epoch": 0.4430379746835443,
"grad_norm": 47.827491760253906,
"learning_rate": 1.9696655725512933e-05,
"loss": 2.7127,
"num_input_tokens_seen": 286720,
"step": 35
},
{
"epoch": 0.5063291139240507,
"grad_norm": 20.431304931640625,
"learning_rate": 1.9506316889240027e-05,
"loss": 1.8677,
"num_input_tokens_seen": 327680,
"step": 40
},
{
"epoch": 0.569620253164557,
"grad_norm": 29.551557540893555,
"learning_rate": 1.9271267423242028e-05,
"loss": 2.5811,
"num_input_tokens_seen": 368640,
"step": 45
},
{
"epoch": 0.6329113924050633,
"grad_norm": 16.11282730102539,
"learning_rate": 1.8992612825027978e-05,
"loss": 2.0715,
"num_input_tokens_seen": 409600,
"step": 50
},
{
"epoch": 0.6329113924050633,
"eval_accuracy": 0.18309859154929578,
"eval_loss": 1.8793463706970215,
"eval_runtime": 0.9197,
"eval_samples_per_second": 77.198,
"eval_steps_per_second": 9.786,
"num_input_tokens_seen": 409600,
"step": 50
},
{
"epoch": 0.6962025316455697,
"grad_norm": 19.049699783325195,
"learning_rate": 1.8671663678150605e-05,
"loss": 1.8742,
"num_input_tokens_seen": 450560,
"step": 55
},
{
"epoch": 0.759493670886076,
"grad_norm": 13.142491340637207,
"learning_rate": 1.8309929488198012e-05,
"loss": 1.5956,
"num_input_tokens_seen": 491520,
"step": 60
},
{
"epoch": 0.8227848101265823,
"grad_norm": 19.499195098876953,
"learning_rate": 1.790911158320442e-05,
"loss": 2.0677,
"num_input_tokens_seen": 532480,
"step": 65
},
{
"epoch": 0.8860759493670886,
"grad_norm": 13.254035949707031,
"learning_rate": 1.7471095111871076e-05,
"loss": 1.8408,
"num_input_tokens_seen": 573440,
"step": 70
},
{
"epoch": 0.9493670886075949,
"grad_norm": 13.480071067810059,
"learning_rate": 1.6997940177231722e-05,
"loss": 1.73,
"num_input_tokens_seen": 614400,
"step": 75
},
{
"epoch": 1.0126582278481013,
"grad_norm": 16.7222900390625,
"learning_rate": 1.6491872147463307e-05,
"loss": 2.0164,
"num_input_tokens_seen": 655360,
"step": 80
},
{
"epoch": 1.0759493670886076,
"grad_norm": 20.90951156616211,
"learning_rate": 1.5955271189412596e-05,
"loss": 1.838,
"num_input_tokens_seen": 696320,
"step": 85
},
{
"epoch": 1.139240506329114,
"grad_norm": 20.80415153503418,
"learning_rate": 1.5390661074065257e-05,
"loss": 1.8573,
"num_input_tokens_seen": 737280,
"step": 90
},
{
"epoch": 1.2025316455696202,
"grad_norm": 21.243846893310547,
"learning_rate": 1.4800697306608043e-05,
"loss": 1.7605,
"num_input_tokens_seen": 778240,
"step": 95
},
{
"epoch": 1.2658227848101267,
"grad_norm": 9.170429229736328,
"learning_rate": 1.4188154636911524e-05,
"loss": 1.5544,
"num_input_tokens_seen": 819200,
"step": 100
},
{
"epoch": 1.2658227848101267,
"eval_accuracy": 0.2112676056338028,
"eval_loss": 1.8253157138824463,
"eval_runtime": 0.9307,
"eval_samples_per_second": 76.288,
"eval_steps_per_second": 9.67,
"num_input_tokens_seen": 819200,
"step": 100
},
{
"epoch": 1.3291139240506329,
"grad_norm": 11.778152465820312,
"learning_rate": 1.3555914009174665e-05,
"loss": 1.7069,
"num_input_tokens_seen": 860160,
"step": 105
},
{
"epoch": 1.3924050632911391,
"grad_norm": 17.081523895263672,
"learning_rate": 1.2906949012110456e-05,
"loss": 1.5678,
"num_input_tokens_seen": 901120,
"step": 110
},
{
"epoch": 1.4556962025316456,
"grad_norm": 8.573921203613281,
"learning_rate": 1.2244311893400761e-05,
"loss": 1.6158,
"num_input_tokens_seen": 942080,
"step": 115
},
{
"epoch": 1.518987341772152,
"grad_norm": 10.426651000976562,
"learning_rate": 1.1571119204198038e-05,
"loss": 1.6879,
"num_input_tokens_seen": 983040,
"step": 120
},
{
"epoch": 1.5822784810126582,
"grad_norm": 16.324264526367188,
"learning_rate": 1.0890537141191417e-05,
"loss": 1.7929,
"num_input_tokens_seen": 1024000,
"step": 125
},
{
"epoch": 1.6455696202531644,
"grad_norm": 16.601455688476562,
"learning_rate": 1.0205766655177217e-05,
"loss": 1.5528,
"num_input_tokens_seen": 1064960,
"step": 130
},
{
"epoch": 1.7088607594936709,
"grad_norm": 8.302882194519043,
"learning_rate": 9.520028396172002e-06,
"loss": 1.6046,
"num_input_tokens_seen": 1105920,
"step": 135
},
{
"epoch": 1.7721518987341773,
"grad_norm": 14.192557334899902,
"learning_rate": 8.836547565875227e-06,
"loss": 1.5904,
"num_input_tokens_seen": 1146880,
"step": 140
},
{
"epoch": 1.8354430379746836,
"grad_norm": 11.201437950134277,
"learning_rate": 8.158538748724139e-06,
"loss": 1.5891,
"num_input_tokens_seen": 1187840,
"step": 145
},
{
"epoch": 1.8987341772151898,
"grad_norm": 20.305402755737305,
"learning_rate": 7.489190792884338e-06,
"loss": 1.7587,
"num_input_tokens_seen": 1228800,
"step": 150
},
{
"epoch": 1.8987341772151898,
"eval_accuracy": 0.14084507042253522,
"eval_loss": 1.7602745294570923,
"eval_runtime": 0.938,
"eval_samples_per_second": 75.69,
"eval_steps_per_second": 9.594,
"num_input_tokens_seen": 1228800,
"step": 150
},
{
"epoch": 1.9620253164556962,
"grad_norm": 8.459460258483887,
"learning_rate": 6.831651812284652e-06,
"loss": 1.5431,
"num_input_tokens_seen": 1269760,
"step": 155
},
{
"epoch": 2.0253164556962027,
"grad_norm": 15.566550254821777,
"learning_rate": 6.18901438023543e-06,
"loss": 1.6141,
"num_input_tokens_seen": 1310720,
"step": 160
},
{
"epoch": 2.088607594936709,
"grad_norm": 15.074378967285156,
"learning_rate": 5.564300984268556e-06,
"loss": 1.7919,
"num_input_tokens_seen": 1351680,
"step": 165
},
{
"epoch": 2.151898734177215,
"grad_norm": 12.48886775970459,
"learning_rate": 4.960449810608705e-06,
"loss": 1.6343,
"num_input_tokens_seen": 1392640,
"step": 170
},
{
"epoch": 2.2151898734177213,
"grad_norm": 13.26822566986084,
"learning_rate": 4.380300925135138e-06,
"loss": 1.6183,
"num_input_tokens_seen": 1433600,
"step": 175
},
{
"epoch": 2.278481012658228,
"grad_norm": 16.358903884887695,
"learning_rate": 3.826582915828468e-06,
"loss": 1.5828,
"num_input_tokens_seen": 1474560,
"step": 180
},
{
"epoch": 2.3417721518987342,
"grad_norm": 9.541784286499023,
"learning_rate": 3.3019000595263573e-06,
"loss": 1.6376,
"num_input_tokens_seen": 1515520,
"step": 185
},
{
"epoch": 2.4050632911392404,
"grad_norm": 7.097592830657959,
"learning_rate": 2.8087200733462427e-06,
"loss": 1.6106,
"num_input_tokens_seen": 1556480,
"step": 190
},
{
"epoch": 2.4683544303797467,
"grad_norm": 21.592899322509766,
"learning_rate": 2.3493625083831217e-06,
"loss": 1.571,
"num_input_tokens_seen": 1597440,
"step": 195
},
{
"epoch": 2.5316455696202533,
"grad_norm": 11.701766967773438,
"learning_rate": 1.9259878402699704e-06,
"loss": 1.5982,
"num_input_tokens_seen": 1638400,
"step": 200
},
{
"epoch": 2.5316455696202533,
"eval_accuracy": 0.2112676056338028,
"eval_loss": 1.6996692419052124,
"eval_runtime": 0.9416,
"eval_samples_per_second": 75.403,
"eval_steps_per_second": 9.558,
"num_input_tokens_seen": 1638400,
"step": 200
},
{
"epoch": 2.5949367088607596,
"grad_norm": 11.591459274291992,
"learning_rate": 1.5405873079105083e-06,
"loss": 1.5655,
"num_input_tokens_seen": 1679360,
"step": 205
},
{
"epoch": 2.6582278481012658,
"grad_norm": 7.575652599334717,
"learning_rate": 1.1949735481754565e-06,
"loss": 1.5902,
"num_input_tokens_seen": 1720320,
"step": 210
},
{
"epoch": 2.721518987341772,
"grad_norm": 11.8199462890625,
"learning_rate": 8.907720706096223e-07,
"loss": 1.5801,
"num_input_tokens_seen": 1761280,
"step": 215
},
{
"epoch": 2.7848101265822782,
"grad_norm": 8.641218185424805,
"learning_rate": 6.294136122464701e-07,
"loss": 1.5755,
"num_input_tokens_seen": 1802240,
"step": 220
},
{
"epoch": 2.848101265822785,
"grad_norm": 13.54507064819336,
"learning_rate": 4.121274084874194e-07,
"loss": 1.6523,
"num_input_tokens_seen": 1843200,
"step": 225
},
{
"epoch": 2.911392405063291,
"grad_norm": 8.669917106628418,
"learning_rate": 2.399354116946584e-07,
"loss": 1.5707,
"num_input_tokens_seen": 1884160,
"step": 230
},
{
"epoch": 2.9746835443037973,
"grad_norm": 15.19518756866455,
"learning_rate": 1.1364748468886688e-07,
"loss": 1.5997,
"num_input_tokens_seen": 1925120,
"step": 235
},
{
"epoch": 3.0,
"num_input_tokens_seen": 1941504,
"step": 237,
"total_flos": 3521692676653056.0,
"train_loss": 2.3182458032535602,
"train_runtime": 206.8614,
"train_samples_per_second": 9.151,
"train_steps_per_second": 1.146
}
],
"logging_steps": 5,
"max_steps": 237,
"num_input_tokens_seen": 1941504,
"num_train_epochs": 3,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3521692676653056.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}