model_88cc3c46 / checkpoint-61 /trainer_state.json
ugaoo's picture
Upload folder using huggingface_hub
9ee3e48 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9945652173913043,
"eval_steps": 500,
"global_step": 61,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016304347826086956,
"grad_norm": 45.78702163696289,
"learning_rate": 5.0000000000000004e-08,
"loss": 4.8845,
"step": 1
},
{
"epoch": 0.03260869565217391,
"grad_norm": 45.26464080810547,
"learning_rate": 1.0000000000000001e-07,
"loss": 4.8307,
"step": 2
},
{
"epoch": 0.04891304347826087,
"grad_norm": 45.96015167236328,
"learning_rate": 1.5000000000000002e-07,
"loss": 4.8322,
"step": 3
},
{
"epoch": 0.06521739130434782,
"grad_norm": 45.86792755126953,
"learning_rate": 2.0000000000000002e-07,
"loss": 4.8841,
"step": 4
},
{
"epoch": 0.08152173913043478,
"grad_norm": 44.19926071166992,
"learning_rate": 2.5000000000000004e-07,
"loss": 4.8013,
"step": 5
},
{
"epoch": 0.09782608695652174,
"grad_norm": 45.76038360595703,
"learning_rate": 3.0000000000000004e-07,
"loss": 4.8112,
"step": 6
},
{
"epoch": 0.11413043478260869,
"grad_norm": 45.73483657836914,
"learning_rate": 3.5000000000000004e-07,
"loss": 4.8235,
"step": 7
},
{
"epoch": 0.13043478260869565,
"grad_norm": 46.2863655090332,
"learning_rate": 4.0000000000000003e-07,
"loss": 4.895,
"step": 8
},
{
"epoch": 0.14673913043478262,
"grad_norm": 44.06720733642578,
"learning_rate": 4.5000000000000003e-07,
"loss": 4.7696,
"step": 9
},
{
"epoch": 0.16304347826086957,
"grad_norm": 43.8859977722168,
"learning_rate": 5.000000000000001e-07,
"loss": 4.7098,
"step": 10
},
{
"epoch": 0.1793478260869565,
"grad_norm": 44.059043884277344,
"learning_rate": 5.5e-07,
"loss": 4.7519,
"step": 11
},
{
"epoch": 0.1956521739130435,
"grad_norm": 43.980201721191406,
"learning_rate": 6.000000000000001e-07,
"loss": 4.7465,
"step": 12
},
{
"epoch": 0.21195652173913043,
"grad_norm": 40.88735580444336,
"learning_rate": 6.5e-07,
"loss": 4.5416,
"step": 13
},
{
"epoch": 0.22826086956521738,
"grad_norm": 39.40138244628906,
"learning_rate": 7.000000000000001e-07,
"loss": 4.5273,
"step": 14
},
{
"epoch": 0.24456521739130435,
"grad_norm": 37.21616744995117,
"learning_rate": 7.5e-07,
"loss": 4.4206,
"step": 15
},
{
"epoch": 0.2608695652173913,
"grad_norm": 35.060447692871094,
"learning_rate": 8.000000000000001e-07,
"loss": 4.3318,
"step": 16
},
{
"epoch": 0.27717391304347827,
"grad_norm": 30.492183685302734,
"learning_rate": 8.500000000000001e-07,
"loss": 4.0841,
"step": 17
},
{
"epoch": 0.29347826086956524,
"grad_norm": 28.49239730834961,
"learning_rate": 9.000000000000001e-07,
"loss": 4.0979,
"step": 18
},
{
"epoch": 0.30978260869565216,
"grad_norm": 24.898632049560547,
"learning_rate": 9.500000000000001e-07,
"loss": 3.8752,
"step": 19
},
{
"epoch": 0.32608695652173914,
"grad_norm": 22.521434783935547,
"learning_rate": 1.0000000000000002e-06,
"loss": 3.77,
"step": 20
},
{
"epoch": 0.3423913043478261,
"grad_norm": 20.311031341552734,
"learning_rate": 1.0500000000000001e-06,
"loss": 3.5973,
"step": 21
},
{
"epoch": 0.358695652173913,
"grad_norm": 19.707839965820312,
"learning_rate": 1.1e-06,
"loss": 3.6102,
"step": 22
},
{
"epoch": 0.375,
"grad_norm": 18.431489944458008,
"learning_rate": 1.1500000000000002e-06,
"loss": 3.4147,
"step": 23
},
{
"epoch": 0.391304347826087,
"grad_norm": 17.78904151916504,
"learning_rate": 1.2000000000000002e-06,
"loss": 3.2665,
"step": 24
},
{
"epoch": 0.4076086956521739,
"grad_norm": 18.014617919921875,
"learning_rate": 1.25e-06,
"loss": 3.1941,
"step": 25
},
{
"epoch": 0.42391304347826086,
"grad_norm": 17.8990478515625,
"learning_rate": 1.3e-06,
"loss": 3.0377,
"step": 26
},
{
"epoch": 0.44021739130434784,
"grad_norm": 18.60847282409668,
"learning_rate": 1.3500000000000002e-06,
"loss": 2.8489,
"step": 27
},
{
"epoch": 0.45652173913043476,
"grad_norm": 18.562536239624023,
"learning_rate": 1.4000000000000001e-06,
"loss": 2.6229,
"step": 28
},
{
"epoch": 0.47282608695652173,
"grad_norm": 19.00884246826172,
"learning_rate": 1.45e-06,
"loss": 2.4521,
"step": 29
},
{
"epoch": 0.4891304347826087,
"grad_norm": 20.419940948486328,
"learning_rate": 1.5e-06,
"loss": 2.3907,
"step": 30
},
{
"epoch": 0.5054347826086957,
"grad_norm": 21.181713104248047,
"learning_rate": 1.5500000000000002e-06,
"loss": 2.2032,
"step": 31
},
{
"epoch": 0.5217391304347826,
"grad_norm": 20.592416763305664,
"learning_rate": 1.6000000000000001e-06,
"loss": 2.004,
"step": 32
},
{
"epoch": 0.5380434782608695,
"grad_norm": 20.40924835205078,
"learning_rate": 1.6500000000000003e-06,
"loss": 1.8373,
"step": 33
},
{
"epoch": 0.5543478260869565,
"grad_norm": 18.38524627685547,
"learning_rate": 1.7000000000000002e-06,
"loss": 1.6218,
"step": 34
},
{
"epoch": 0.5706521739130435,
"grad_norm": 18.922391891479492,
"learning_rate": 1.75e-06,
"loss": 1.4708,
"step": 35
},
{
"epoch": 0.5869565217391305,
"grad_norm": 20.200698852539062,
"learning_rate": 1.8000000000000001e-06,
"loss": 1.3044,
"step": 36
},
{
"epoch": 0.6032608695652174,
"grad_norm": 20.70162582397461,
"learning_rate": 1.85e-06,
"loss": 1.1445,
"step": 37
},
{
"epoch": 0.6195652173913043,
"grad_norm": 18.18869400024414,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.9504,
"step": 38
},
{
"epoch": 0.6358695652173914,
"grad_norm": 15.211593627929688,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.8083,
"step": 39
},
{
"epoch": 0.6521739130434783,
"grad_norm": 12.71890640258789,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.6981,
"step": 40
},
{
"epoch": 0.6684782608695652,
"grad_norm": 10.869053840637207,
"learning_rate": 2.05e-06,
"loss": 0.6018,
"step": 41
},
{
"epoch": 0.6847826086956522,
"grad_norm": 9.5787353515625,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.4899,
"step": 42
},
{
"epoch": 0.7010869565217391,
"grad_norm": 9.319293975830078,
"learning_rate": 2.15e-06,
"loss": 0.3986,
"step": 43
},
{
"epoch": 0.717391304347826,
"grad_norm": 9.404044151306152,
"learning_rate": 2.2e-06,
"loss": 0.3084,
"step": 44
},
{
"epoch": 0.7336956521739131,
"grad_norm": 7.995025634765625,
"learning_rate": 2.25e-06,
"loss": 0.2373,
"step": 45
},
{
"epoch": 0.75,
"grad_norm": 4.505397319793701,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.1527,
"step": 46
},
{
"epoch": 0.7663043478260869,
"grad_norm": 2.5063579082489014,
"learning_rate": 2.35e-06,
"loss": 0.1097,
"step": 47
},
{
"epoch": 0.782608695652174,
"grad_norm": 1.5846028327941895,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.102,
"step": 48
},
{
"epoch": 0.7989130434782609,
"grad_norm": 1.1286852359771729,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.093,
"step": 49
},
{
"epoch": 0.8152173913043478,
"grad_norm": 1.0311343669891357,
"learning_rate": 2.5e-06,
"loss": 0.0778,
"step": 50
},
{
"epoch": 0.8315217391304348,
"grad_norm": 0.6458576917648315,
"learning_rate": 2.55e-06,
"loss": 0.0883,
"step": 51
},
{
"epoch": 0.8478260869565217,
"grad_norm": 0.727554202079773,
"learning_rate": 2.6e-06,
"loss": 0.0852,
"step": 52
},
{
"epoch": 0.8641304347826086,
"grad_norm": 0.619137167930603,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.0697,
"step": 53
},
{
"epoch": 0.8804347826086957,
"grad_norm": 0.38241881132125854,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.0665,
"step": 54
},
{
"epoch": 0.8967391304347826,
"grad_norm": 0.541621744632721,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.0658,
"step": 55
},
{
"epoch": 0.9130434782608695,
"grad_norm": 0.3869657516479492,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.0728,
"step": 56
},
{
"epoch": 0.9293478260869565,
"grad_norm": 0.3768727481365204,
"learning_rate": 2.85e-06,
"loss": 0.0741,
"step": 57
},
{
"epoch": 0.9456521739130435,
"grad_norm": 0.31400591135025024,
"learning_rate": 2.9e-06,
"loss": 0.0682,
"step": 58
},
{
"epoch": 0.9619565217391305,
"grad_norm": 0.3604981303215027,
"learning_rate": 2.95e-06,
"loss": 0.0652,
"step": 59
},
{
"epoch": 0.9782608695652174,
"grad_norm": 0.4383264482021332,
"learning_rate": 3e-06,
"loss": 0.0703,
"step": 60
},
{
"epoch": 0.9945652173913043,
"grad_norm": 0.310332328081131,
"learning_rate": 3.05e-06,
"loss": 0.0664,
"step": 61
}
],
"logging_steps": 1,
"max_steps": 366,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 61,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.3664872324451533e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}