xp177 / checkpoint-72 /trainer_state.json
ugaoo's picture
Upload folder using huggingface_hub
8c22156 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9953917050691244,
"eval_steps": 500,
"global_step": 72,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013824884792626729,
"grad_norm": 34.963134765625,
"learning_rate": 5.0000000000000004e-08,
"loss": 2.5476,
"step": 1
},
{
"epoch": 0.027649769585253458,
"grad_norm": 35.32600021362305,
"learning_rate": 1.0000000000000001e-07,
"loss": 2.6058,
"step": 2
},
{
"epoch": 0.041474654377880185,
"grad_norm": 34.955448150634766,
"learning_rate": 1.5000000000000002e-07,
"loss": 2.5871,
"step": 3
},
{
"epoch": 0.055299539170506916,
"grad_norm": 35.09806442260742,
"learning_rate": 2.0000000000000002e-07,
"loss": 2.5912,
"step": 4
},
{
"epoch": 0.06912442396313365,
"grad_norm": 34.88739776611328,
"learning_rate": 2.5000000000000004e-07,
"loss": 2.592,
"step": 5
},
{
"epoch": 0.08294930875576037,
"grad_norm": 34.84288024902344,
"learning_rate": 3.0000000000000004e-07,
"loss": 2.5609,
"step": 6
},
{
"epoch": 0.0967741935483871,
"grad_norm": 35.0090217590332,
"learning_rate": 3.5000000000000004e-07,
"loss": 2.5651,
"step": 7
},
{
"epoch": 0.11059907834101383,
"grad_norm": 35.03983688354492,
"learning_rate": 4.0000000000000003e-07,
"loss": 2.5437,
"step": 8
},
{
"epoch": 0.12442396313364056,
"grad_norm": 34.802833557128906,
"learning_rate": 4.5000000000000003e-07,
"loss": 2.5448,
"step": 9
},
{
"epoch": 0.1382488479262673,
"grad_norm": 34.5220947265625,
"learning_rate": 5.000000000000001e-07,
"loss": 2.504,
"step": 10
},
{
"epoch": 0.15207373271889402,
"grad_norm": 34.401580810546875,
"learning_rate": 5.5e-07,
"loss": 2.4814,
"step": 11
},
{
"epoch": 0.16589861751152074,
"grad_norm": 33.76997375488281,
"learning_rate": 6.000000000000001e-07,
"loss": 2.4282,
"step": 12
},
{
"epoch": 0.17972350230414746,
"grad_norm": 33.53415298461914,
"learning_rate": 6.5e-07,
"loss": 2.4216,
"step": 13
},
{
"epoch": 0.1935483870967742,
"grad_norm": 32.401580810546875,
"learning_rate": 7.000000000000001e-07,
"loss": 2.3362,
"step": 14
},
{
"epoch": 0.2073732718894009,
"grad_norm": 33.636661529541016,
"learning_rate": 7.5e-07,
"loss": 2.2978,
"step": 15
},
{
"epoch": 0.22119815668202766,
"grad_norm": 31.3782901763916,
"learning_rate": 8.000000000000001e-07,
"loss": 2.1358,
"step": 16
},
{
"epoch": 0.2350230414746544,
"grad_norm": 30.72391700744629,
"learning_rate": 8.500000000000001e-07,
"loss": 2.0652,
"step": 17
},
{
"epoch": 0.2488479262672811,
"grad_norm": 30.817584991455078,
"learning_rate": 9.000000000000001e-07,
"loss": 2.0115,
"step": 18
},
{
"epoch": 0.2626728110599078,
"grad_norm": 29.683996200561523,
"learning_rate": 9.500000000000001e-07,
"loss": 1.8668,
"step": 19
},
{
"epoch": 0.2764976958525346,
"grad_norm": 29.506683349609375,
"learning_rate": 1.0000000000000002e-06,
"loss": 1.7796,
"step": 20
},
{
"epoch": 0.2903225806451613,
"grad_norm": 27.55340003967285,
"learning_rate": 1.0500000000000001e-06,
"loss": 1.5656,
"step": 21
},
{
"epoch": 0.30414746543778803,
"grad_norm": 27.78036880493164,
"learning_rate": 1.1e-06,
"loss": 1.5112,
"step": 22
},
{
"epoch": 0.31797235023041476,
"grad_norm": 26.36115264892578,
"learning_rate": 1.1500000000000002e-06,
"loss": 1.3283,
"step": 23
},
{
"epoch": 0.3317972350230415,
"grad_norm": 25.388761520385742,
"learning_rate": 1.2000000000000002e-06,
"loss": 1.137,
"step": 24
},
{
"epoch": 0.3456221198156682,
"grad_norm": 25.21432876586914,
"learning_rate": 1.25e-06,
"loss": 0.9867,
"step": 25
},
{
"epoch": 0.35944700460829493,
"grad_norm": 24.924489974975586,
"learning_rate": 1.3e-06,
"loss": 0.7122,
"step": 26
},
{
"epoch": 0.37327188940092165,
"grad_norm": 21.881420135498047,
"learning_rate": 1.3500000000000002e-06,
"loss": 0.4952,
"step": 27
},
{
"epoch": 0.3870967741935484,
"grad_norm": 17.67154884338379,
"learning_rate": 1.4000000000000001e-06,
"loss": 0.3602,
"step": 28
},
{
"epoch": 0.4009216589861751,
"grad_norm": 11.489490509033203,
"learning_rate": 1.45e-06,
"loss": 0.2432,
"step": 29
},
{
"epoch": 0.4147465437788018,
"grad_norm": 7.622438907623291,
"learning_rate": 1.5e-06,
"loss": 0.189,
"step": 30
},
{
"epoch": 0.42857142857142855,
"grad_norm": 4.340638637542725,
"learning_rate": 1.5500000000000002e-06,
"loss": 0.1302,
"step": 31
},
{
"epoch": 0.4423963133640553,
"grad_norm": 3.079514980316162,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.1075,
"step": 32
},
{
"epoch": 0.45622119815668205,
"grad_norm": 2.355943441390991,
"learning_rate": 1.6500000000000003e-06,
"loss": 0.0998,
"step": 33
},
{
"epoch": 0.4700460829493088,
"grad_norm": 1.9480725526809692,
"learning_rate": 1.7000000000000002e-06,
"loss": 0.0926,
"step": 34
},
{
"epoch": 0.4838709677419355,
"grad_norm": 1.8598166704177856,
"learning_rate": 1.75e-06,
"loss": 0.0733,
"step": 35
},
{
"epoch": 0.4976958525345622,
"grad_norm": 0.9892730712890625,
"learning_rate": 1.8000000000000001e-06,
"loss": 0.0664,
"step": 36
},
{
"epoch": 0.511520737327189,
"grad_norm": 0.8992418050765991,
"learning_rate": 1.85e-06,
"loss": 0.0709,
"step": 37
},
{
"epoch": 0.5253456221198156,
"grad_norm": 0.7340101599693298,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.0535,
"step": 38
},
{
"epoch": 0.5391705069124424,
"grad_norm": 0.7032178044319153,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.0573,
"step": 39
},
{
"epoch": 0.5529953917050692,
"grad_norm": 0.6449429392814636,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.0576,
"step": 40
},
{
"epoch": 0.5668202764976958,
"grad_norm": 0.6358592510223389,
"learning_rate": 2.05e-06,
"loss": 0.0502,
"step": 41
},
{
"epoch": 0.5806451612903226,
"grad_norm": 0.572036623954773,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.0556,
"step": 42
},
{
"epoch": 0.5944700460829493,
"grad_norm": 0.6538863778114319,
"learning_rate": 2.15e-06,
"loss": 0.0556,
"step": 43
},
{
"epoch": 0.6082949308755761,
"grad_norm": 0.3532159626483917,
"learning_rate": 2.2e-06,
"loss": 0.0452,
"step": 44
},
{
"epoch": 0.6221198156682027,
"grad_norm": 0.4853012263774872,
"learning_rate": 2.25e-06,
"loss": 0.0471,
"step": 45
},
{
"epoch": 0.6359447004608295,
"grad_norm": 0.4761648178100586,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.0469,
"step": 46
},
{
"epoch": 0.6497695852534562,
"grad_norm": 0.6094638109207153,
"learning_rate": 2.35e-06,
"loss": 0.047,
"step": 47
},
{
"epoch": 0.663594470046083,
"grad_norm": 0.5211306214332581,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.0402,
"step": 48
},
{
"epoch": 0.6774193548387096,
"grad_norm": 0.2997778356075287,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.0425,
"step": 49
},
{
"epoch": 0.6912442396313364,
"grad_norm": 0.37834689021110535,
"learning_rate": 2.5e-06,
"loss": 0.0446,
"step": 50
},
{
"epoch": 0.7050691244239631,
"grad_norm": 0.31011995673179626,
"learning_rate": 2.55e-06,
"loss": 0.0406,
"step": 51
},
{
"epoch": 0.7188940092165899,
"grad_norm": 0.3113131523132324,
"learning_rate": 2.6e-06,
"loss": 0.0368,
"step": 52
},
{
"epoch": 0.7327188940092166,
"grad_norm": 0.5685846209526062,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.0389,
"step": 53
},
{
"epoch": 0.7465437788018433,
"grad_norm": 0.29334983229637146,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.0423,
"step": 54
},
{
"epoch": 0.7603686635944701,
"grad_norm": 0.5776861906051636,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.0399,
"step": 55
},
{
"epoch": 0.7741935483870968,
"grad_norm": 0.35423165559768677,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.0357,
"step": 56
},
{
"epoch": 0.7880184331797235,
"grad_norm": 0.37902742624282837,
"learning_rate": 2.85e-06,
"loss": 0.0407,
"step": 57
},
{
"epoch": 0.8018433179723502,
"grad_norm": 0.26948878169059753,
"learning_rate": 2.9e-06,
"loss": 0.0351,
"step": 58
},
{
"epoch": 0.815668202764977,
"grad_norm": 0.35688117146492004,
"learning_rate": 2.95e-06,
"loss": 0.0377,
"step": 59
},
{
"epoch": 0.8294930875576036,
"grad_norm": 0.5287911891937256,
"learning_rate": 3e-06,
"loss": 0.0377,
"step": 60
},
{
"epoch": 0.8433179723502304,
"grad_norm": 0.2950785756111145,
"learning_rate": 3.05e-06,
"loss": 0.0361,
"step": 61
},
{
"epoch": 0.8571428571428571,
"grad_norm": 0.2789723575115204,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.032,
"step": 62
},
{
"epoch": 0.8709677419354839,
"grad_norm": 0.2802198529243469,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.0394,
"step": 63
},
{
"epoch": 0.8847926267281107,
"grad_norm": 0.286981463432312,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.033,
"step": 64
},
{
"epoch": 0.8986175115207373,
"grad_norm": 0.37392762303352356,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.0335,
"step": 65
},
{
"epoch": 0.9124423963133641,
"grad_norm": 0.25025588274002075,
"learning_rate": 3.3000000000000006e-06,
"loss": 0.0311,
"step": 66
},
{
"epoch": 0.9262672811059908,
"grad_norm": 0.4292861521244049,
"learning_rate": 3.3500000000000005e-06,
"loss": 0.0362,
"step": 67
},
{
"epoch": 0.9400921658986175,
"grad_norm": 0.4717651307582855,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.0303,
"step": 68
},
{
"epoch": 0.9539170506912442,
"grad_norm": 0.49291253089904785,
"learning_rate": 3.45e-06,
"loss": 0.0352,
"step": 69
},
{
"epoch": 0.967741935483871,
"grad_norm": 0.3729935586452484,
"learning_rate": 3.5e-06,
"loss": 0.0297,
"step": 70
},
{
"epoch": 0.9815668202764977,
"grad_norm": 0.27150583267211914,
"learning_rate": 3.5500000000000003e-06,
"loss": 0.0326,
"step": 71
},
{
"epoch": 0.9953917050691244,
"grad_norm": 0.34516096115112305,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0336,
"step": 72
}
],
"logging_steps": 1,
"max_steps": 432,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 72,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 4.631891949769458e+18,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}