model_88cc3c46 / checkpoint-122 /trainer_state.json
ugaoo's picture
Upload folder using huggingface_hub
9ee3e48 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9782608695652173,
"eval_steps": 500,
"global_step": 122,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016304347826086956,
"grad_norm": 45.78702163696289,
"learning_rate": 5.0000000000000004e-08,
"loss": 4.8845,
"step": 1
},
{
"epoch": 0.03260869565217391,
"grad_norm": 45.26464080810547,
"learning_rate": 1.0000000000000001e-07,
"loss": 4.8307,
"step": 2
},
{
"epoch": 0.04891304347826087,
"grad_norm": 45.96015167236328,
"learning_rate": 1.5000000000000002e-07,
"loss": 4.8322,
"step": 3
},
{
"epoch": 0.06521739130434782,
"grad_norm": 45.86792755126953,
"learning_rate": 2.0000000000000002e-07,
"loss": 4.8841,
"step": 4
},
{
"epoch": 0.08152173913043478,
"grad_norm": 44.19926071166992,
"learning_rate": 2.5000000000000004e-07,
"loss": 4.8013,
"step": 5
},
{
"epoch": 0.09782608695652174,
"grad_norm": 45.76038360595703,
"learning_rate": 3.0000000000000004e-07,
"loss": 4.8112,
"step": 6
},
{
"epoch": 0.11413043478260869,
"grad_norm": 45.73483657836914,
"learning_rate": 3.5000000000000004e-07,
"loss": 4.8235,
"step": 7
},
{
"epoch": 0.13043478260869565,
"grad_norm": 46.2863655090332,
"learning_rate": 4.0000000000000003e-07,
"loss": 4.895,
"step": 8
},
{
"epoch": 0.14673913043478262,
"grad_norm": 44.06720733642578,
"learning_rate": 4.5000000000000003e-07,
"loss": 4.7696,
"step": 9
},
{
"epoch": 0.16304347826086957,
"grad_norm": 43.8859977722168,
"learning_rate": 5.000000000000001e-07,
"loss": 4.7098,
"step": 10
},
{
"epoch": 0.1793478260869565,
"grad_norm": 44.059043884277344,
"learning_rate": 5.5e-07,
"loss": 4.7519,
"step": 11
},
{
"epoch": 0.1956521739130435,
"grad_norm": 43.980201721191406,
"learning_rate": 6.000000000000001e-07,
"loss": 4.7465,
"step": 12
},
{
"epoch": 0.21195652173913043,
"grad_norm": 40.88735580444336,
"learning_rate": 6.5e-07,
"loss": 4.5416,
"step": 13
},
{
"epoch": 0.22826086956521738,
"grad_norm": 39.40138244628906,
"learning_rate": 7.000000000000001e-07,
"loss": 4.5273,
"step": 14
},
{
"epoch": 0.24456521739130435,
"grad_norm": 37.21616744995117,
"learning_rate": 7.5e-07,
"loss": 4.4206,
"step": 15
},
{
"epoch": 0.2608695652173913,
"grad_norm": 35.060447692871094,
"learning_rate": 8.000000000000001e-07,
"loss": 4.3318,
"step": 16
},
{
"epoch": 0.27717391304347827,
"grad_norm": 30.492183685302734,
"learning_rate": 8.500000000000001e-07,
"loss": 4.0841,
"step": 17
},
{
"epoch": 0.29347826086956524,
"grad_norm": 28.49239730834961,
"learning_rate": 9.000000000000001e-07,
"loss": 4.0979,
"step": 18
},
{
"epoch": 0.30978260869565216,
"grad_norm": 24.898632049560547,
"learning_rate": 9.500000000000001e-07,
"loss": 3.8752,
"step": 19
},
{
"epoch": 0.32608695652173914,
"grad_norm": 22.521434783935547,
"learning_rate": 1.0000000000000002e-06,
"loss": 3.77,
"step": 20
},
{
"epoch": 0.3423913043478261,
"grad_norm": 20.311031341552734,
"learning_rate": 1.0500000000000001e-06,
"loss": 3.5973,
"step": 21
},
{
"epoch": 0.358695652173913,
"grad_norm": 19.707839965820312,
"learning_rate": 1.1e-06,
"loss": 3.6102,
"step": 22
},
{
"epoch": 0.375,
"grad_norm": 18.431489944458008,
"learning_rate": 1.1500000000000002e-06,
"loss": 3.4147,
"step": 23
},
{
"epoch": 0.391304347826087,
"grad_norm": 17.78904151916504,
"learning_rate": 1.2000000000000002e-06,
"loss": 3.2665,
"step": 24
},
{
"epoch": 0.4076086956521739,
"grad_norm": 18.014617919921875,
"learning_rate": 1.25e-06,
"loss": 3.1941,
"step": 25
},
{
"epoch": 0.42391304347826086,
"grad_norm": 17.8990478515625,
"learning_rate": 1.3e-06,
"loss": 3.0377,
"step": 26
},
{
"epoch": 0.44021739130434784,
"grad_norm": 18.60847282409668,
"learning_rate": 1.3500000000000002e-06,
"loss": 2.8489,
"step": 27
},
{
"epoch": 0.45652173913043476,
"grad_norm": 18.562536239624023,
"learning_rate": 1.4000000000000001e-06,
"loss": 2.6229,
"step": 28
},
{
"epoch": 0.47282608695652173,
"grad_norm": 19.00884246826172,
"learning_rate": 1.45e-06,
"loss": 2.4521,
"step": 29
},
{
"epoch": 0.4891304347826087,
"grad_norm": 20.419940948486328,
"learning_rate": 1.5e-06,
"loss": 2.3907,
"step": 30
},
{
"epoch": 0.5054347826086957,
"grad_norm": 21.181713104248047,
"learning_rate": 1.5500000000000002e-06,
"loss": 2.2032,
"step": 31
},
{
"epoch": 0.5217391304347826,
"grad_norm": 20.592416763305664,
"learning_rate": 1.6000000000000001e-06,
"loss": 2.004,
"step": 32
},
{
"epoch": 0.5380434782608695,
"grad_norm": 20.40924835205078,
"learning_rate": 1.6500000000000003e-06,
"loss": 1.8373,
"step": 33
},
{
"epoch": 0.5543478260869565,
"grad_norm": 18.38524627685547,
"learning_rate": 1.7000000000000002e-06,
"loss": 1.6218,
"step": 34
},
{
"epoch": 0.5706521739130435,
"grad_norm": 18.922391891479492,
"learning_rate": 1.75e-06,
"loss": 1.4708,
"step": 35
},
{
"epoch": 0.5869565217391305,
"grad_norm": 20.200698852539062,
"learning_rate": 1.8000000000000001e-06,
"loss": 1.3044,
"step": 36
},
{
"epoch": 0.6032608695652174,
"grad_norm": 20.70162582397461,
"learning_rate": 1.85e-06,
"loss": 1.1445,
"step": 37
},
{
"epoch": 0.6195652173913043,
"grad_norm": 18.18869400024414,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.9504,
"step": 38
},
{
"epoch": 0.6358695652173914,
"grad_norm": 15.211593627929688,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.8083,
"step": 39
},
{
"epoch": 0.6521739130434783,
"grad_norm": 12.71890640258789,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.6981,
"step": 40
},
{
"epoch": 0.6684782608695652,
"grad_norm": 10.869053840637207,
"learning_rate": 2.05e-06,
"loss": 0.6018,
"step": 41
},
{
"epoch": 0.6847826086956522,
"grad_norm": 9.5787353515625,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.4899,
"step": 42
},
{
"epoch": 0.7010869565217391,
"grad_norm": 9.319293975830078,
"learning_rate": 2.15e-06,
"loss": 0.3986,
"step": 43
},
{
"epoch": 0.717391304347826,
"grad_norm": 9.404044151306152,
"learning_rate": 2.2e-06,
"loss": 0.3084,
"step": 44
},
{
"epoch": 0.7336956521739131,
"grad_norm": 7.995025634765625,
"learning_rate": 2.25e-06,
"loss": 0.2373,
"step": 45
},
{
"epoch": 0.75,
"grad_norm": 4.505397319793701,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.1527,
"step": 46
},
{
"epoch": 0.7663043478260869,
"grad_norm": 2.5063579082489014,
"learning_rate": 2.35e-06,
"loss": 0.1097,
"step": 47
},
{
"epoch": 0.782608695652174,
"grad_norm": 1.5846028327941895,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.102,
"step": 48
},
{
"epoch": 0.7989130434782609,
"grad_norm": 1.1286852359771729,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.093,
"step": 49
},
{
"epoch": 0.8152173913043478,
"grad_norm": 1.0311343669891357,
"learning_rate": 2.5e-06,
"loss": 0.0778,
"step": 50
},
{
"epoch": 0.8315217391304348,
"grad_norm": 0.6458576917648315,
"learning_rate": 2.55e-06,
"loss": 0.0883,
"step": 51
},
{
"epoch": 0.8478260869565217,
"grad_norm": 0.727554202079773,
"learning_rate": 2.6e-06,
"loss": 0.0852,
"step": 52
},
{
"epoch": 0.8641304347826086,
"grad_norm": 0.619137167930603,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.0697,
"step": 53
},
{
"epoch": 0.8804347826086957,
"grad_norm": 0.38241881132125854,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.0665,
"step": 54
},
{
"epoch": 0.8967391304347826,
"grad_norm": 0.541621744632721,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.0658,
"step": 55
},
{
"epoch": 0.9130434782608695,
"grad_norm": 0.3869657516479492,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.0728,
"step": 56
},
{
"epoch": 0.9293478260869565,
"grad_norm": 0.3768727481365204,
"learning_rate": 2.85e-06,
"loss": 0.0741,
"step": 57
},
{
"epoch": 0.9456521739130435,
"grad_norm": 0.31400591135025024,
"learning_rate": 2.9e-06,
"loss": 0.0682,
"step": 58
},
{
"epoch": 0.9619565217391305,
"grad_norm": 0.3604981303215027,
"learning_rate": 2.95e-06,
"loss": 0.0652,
"step": 59
},
{
"epoch": 0.9782608695652174,
"grad_norm": 0.4383264482021332,
"learning_rate": 3e-06,
"loss": 0.0703,
"step": 60
},
{
"epoch": 0.9945652173913043,
"grad_norm": 0.310332328081131,
"learning_rate": 3.05e-06,
"loss": 0.0664,
"step": 61
},
{
"epoch": 1.0,
"grad_norm": 0.310332328081131,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.0509,
"step": 62
},
{
"epoch": 1.016304347826087,
"grad_norm": 0.665212869644165,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.0633,
"step": 63
},
{
"epoch": 1.0326086956521738,
"grad_norm": 0.3108278512954712,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.0648,
"step": 64
},
{
"epoch": 1.048913043478261,
"grad_norm": 0.5986258387565613,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.0769,
"step": 65
},
{
"epoch": 1.065217391304348,
"grad_norm": 0.6987417936325073,
"learning_rate": 3.3000000000000006e-06,
"loss": 0.0728,
"step": 66
},
{
"epoch": 1.0815217391304348,
"grad_norm": 0.36069774627685547,
"learning_rate": 3.3500000000000005e-06,
"loss": 0.0647,
"step": 67
},
{
"epoch": 1.0978260869565217,
"grad_norm": 0.37604954838752747,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.0571,
"step": 68
},
{
"epoch": 1.1141304347826086,
"grad_norm": 0.2985791862010956,
"learning_rate": 3.45e-06,
"loss": 0.0629,
"step": 69
},
{
"epoch": 1.1304347826086956,
"grad_norm": 0.3454388380050659,
"learning_rate": 3.5e-06,
"loss": 0.0644,
"step": 70
},
{
"epoch": 1.1467391304347827,
"grad_norm": 0.3371462821960449,
"learning_rate": 3.5500000000000003e-06,
"loss": 0.0645,
"step": 71
},
{
"epoch": 1.1630434782608696,
"grad_norm": 0.27834194898605347,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0678,
"step": 72
},
{
"epoch": 1.1793478260869565,
"grad_norm": 0.26285555958747864,
"learning_rate": 3.65e-06,
"loss": 0.0586,
"step": 73
},
{
"epoch": 1.1956521739130435,
"grad_norm": 0.34152188897132874,
"learning_rate": 3.7e-06,
"loss": 0.061,
"step": 74
},
{
"epoch": 1.2119565217391304,
"grad_norm": 0.2939279079437256,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0643,
"step": 75
},
{
"epoch": 1.2282608695652173,
"grad_norm": 0.395220547914505,
"learning_rate": 3.8000000000000005e-06,
"loss": 0.0644,
"step": 76
},
{
"epoch": 1.2445652173913044,
"grad_norm": 0.29400259256362915,
"learning_rate": 3.85e-06,
"loss": 0.0562,
"step": 77
},
{
"epoch": 1.2608695652173914,
"grad_norm": 0.25938117504119873,
"learning_rate": 3.900000000000001e-06,
"loss": 0.0658,
"step": 78
},
{
"epoch": 1.2771739130434783,
"grad_norm": 0.36941587924957275,
"learning_rate": 3.95e-06,
"loss": 0.0678,
"step": 79
},
{
"epoch": 1.2934782608695652,
"grad_norm": 0.26572781801223755,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0588,
"step": 80
},
{
"epoch": 1.309782608695652,
"grad_norm": 0.22964634001255035,
"learning_rate": 4.05e-06,
"loss": 0.0612,
"step": 81
},
{
"epoch": 1.3260869565217392,
"grad_norm": 0.24455289542675018,
"learning_rate": 4.1e-06,
"loss": 0.059,
"step": 82
},
{
"epoch": 1.3423913043478262,
"grad_norm": 0.3925253748893738,
"learning_rate": 4.15e-06,
"loss": 0.0648,
"step": 83
},
{
"epoch": 1.358695652173913,
"grad_norm": 0.24822917580604553,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.0618,
"step": 84
},
{
"epoch": 1.375,
"grad_norm": 0.2522635757923126,
"learning_rate": 4.25e-06,
"loss": 0.0568,
"step": 85
},
{
"epoch": 1.391304347826087,
"grad_norm": 0.2465311586856842,
"learning_rate": 4.3e-06,
"loss": 0.0613,
"step": 86
},
{
"epoch": 1.4076086956521738,
"grad_norm": 0.2514893412590027,
"learning_rate": 4.350000000000001e-06,
"loss": 0.0574,
"step": 87
},
{
"epoch": 1.4239130434782608,
"grad_norm": 0.2320777177810669,
"learning_rate": 4.4e-06,
"loss": 0.0502,
"step": 88
},
{
"epoch": 1.440217391304348,
"grad_norm": 0.2494516372680664,
"learning_rate": 4.450000000000001e-06,
"loss": 0.0685,
"step": 89
},
{
"epoch": 1.4565217391304348,
"grad_norm": 0.244571715593338,
"learning_rate": 4.5e-06,
"loss": 0.0588,
"step": 90
},
{
"epoch": 1.4728260869565217,
"grad_norm": 0.22765810787677765,
"learning_rate": 4.5500000000000005e-06,
"loss": 0.0591,
"step": 91
},
{
"epoch": 1.4891304347826086,
"grad_norm": 0.2349582016468048,
"learning_rate": 4.600000000000001e-06,
"loss": 0.0572,
"step": 92
},
{
"epoch": 1.5054347826086958,
"grad_norm": 0.228654682636261,
"learning_rate": 4.65e-06,
"loss": 0.0593,
"step": 93
},
{
"epoch": 1.5217391304347827,
"grad_norm": 0.3090372681617737,
"learning_rate": 4.7e-06,
"loss": 0.0592,
"step": 94
},
{
"epoch": 1.5380434782608696,
"grad_norm": 0.2116968333721161,
"learning_rate": 4.75e-06,
"loss": 0.0575,
"step": 95
},
{
"epoch": 1.5543478260869565,
"grad_norm": 0.32294484972953796,
"learning_rate": 4.800000000000001e-06,
"loss": 0.062,
"step": 96
},
{
"epoch": 1.5706521739130435,
"grad_norm": 0.35240596532821655,
"learning_rate": 4.85e-06,
"loss": 0.0561,
"step": 97
},
{
"epoch": 1.5869565217391304,
"grad_norm": 0.24272935092449188,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.0591,
"step": 98
},
{
"epoch": 1.6032608695652173,
"grad_norm": 0.26952850818634033,
"learning_rate": 4.95e-06,
"loss": 0.0557,
"step": 99
},
{
"epoch": 1.6195652173913042,
"grad_norm": 0.2775789201259613,
"learning_rate": 5e-06,
"loss": 0.0624,
"step": 100
},
{
"epoch": 1.6358695652173914,
"grad_norm": 0.2162039428949356,
"learning_rate": 4.999825642177387e-06,
"loss": 0.0582,
"step": 101
},
{
"epoch": 1.6521739130434783,
"grad_norm": 0.38436195254325867,
"learning_rate": 4.999302593030069e-06,
"loss": 0.0618,
"step": 102
},
{
"epoch": 1.6684782608695652,
"grad_norm": 0.2860764265060425,
"learning_rate": 4.998430925516213e-06,
"loss": 0.0637,
"step": 103
},
{
"epoch": 1.6847826086956523,
"grad_norm": 0.2180222123861313,
"learning_rate": 4.99721076122146e-06,
"loss": 0.0565,
"step": 104
},
{
"epoch": 1.7010869565217392,
"grad_norm": 0.30026087164878845,
"learning_rate": 4.995642270341961e-06,
"loss": 0.0615,
"step": 105
},
{
"epoch": 1.7173913043478262,
"grad_norm": 0.27657821774482727,
"learning_rate": 4.99372567166064e-06,
"loss": 0.0661,
"step": 106
},
{
"epoch": 1.733695652173913,
"grad_norm": 0.23324432969093323,
"learning_rate": 4.991461232516675e-06,
"loss": 0.0563,
"step": 107
},
{
"epoch": 1.75,
"grad_norm": 0.2670609951019287,
"learning_rate": 4.98884926876821e-06,
"loss": 0.0671,
"step": 108
},
{
"epoch": 1.766304347826087,
"grad_norm": 0.2405894696712494,
"learning_rate": 4.9858901447482924e-06,
"loss": 0.0571,
"step": 109
},
{
"epoch": 1.7826086956521738,
"grad_norm": 0.23271754384040833,
"learning_rate": 4.982584273214061e-06,
"loss": 0.0598,
"step": 110
},
{
"epoch": 1.7989130434782608,
"grad_norm": 0.21662941575050354,
"learning_rate": 4.978932115289165e-06,
"loss": 0.0562,
"step": 111
},
{
"epoch": 1.8152173913043477,
"grad_norm": 0.27656564116477966,
"learning_rate": 4.974934180399447e-06,
"loss": 0.0531,
"step": 112
},
{
"epoch": 1.8315217391304348,
"grad_norm": 0.3287908434867859,
"learning_rate": 4.970591026201884e-06,
"loss": 0.0522,
"step": 113
},
{
"epoch": 1.8478260869565217,
"grad_norm": 0.1879669576883316,
"learning_rate": 4.965903258506806e-06,
"loss": 0.0625,
"step": 114
},
{
"epoch": 1.8641304347826086,
"grad_norm": 0.2798942029476166,
"learning_rate": 4.9608715311933865e-06,
"loss": 0.0546,
"step": 115
},
{
"epoch": 1.8804347826086958,
"grad_norm": 0.31130167841911316,
"learning_rate": 4.955496546118439e-06,
"loss": 0.0576,
"step": 116
},
{
"epoch": 1.8967391304347827,
"grad_norm": 0.23934200406074524,
"learning_rate": 4.949779053018519e-06,
"loss": 0.0522,
"step": 117
},
{
"epoch": 1.9130434782608696,
"grad_norm": 0.2684226632118225,
"learning_rate": 4.943719849405347e-06,
"loss": 0.0596,
"step": 118
},
{
"epoch": 1.9293478260869565,
"grad_norm": 0.31276896595954895,
"learning_rate": 4.937319780454559e-06,
"loss": 0.0609,
"step": 119
},
{
"epoch": 1.9456521739130435,
"grad_norm": 0.2928497791290283,
"learning_rate": 4.930579738887827e-06,
"loss": 0.0515,
"step": 120
},
{
"epoch": 1.9619565217391304,
"grad_norm": 0.25487199425697327,
"learning_rate": 4.923500664848327e-06,
"loss": 0.0565,
"step": 121
},
{
"epoch": 1.9782608695652173,
"grad_norm": 0.19076956808567047,
"learning_rate": 4.9160835457696075e-06,
"loss": 0.0541,
"step": 122
}
],
"logging_steps": 1,
"max_steps": 366,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 61,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.7161733923602432e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}