model_88cc3c46 / checkpoint-244 /trainer_state.json
ugaoo's picture
Upload folder using huggingface_hub
9ee3e48 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.9456521739130435,
"eval_steps": 500,
"global_step": 244,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.016304347826086956,
"grad_norm": 45.78702163696289,
"learning_rate": 5.0000000000000004e-08,
"loss": 4.8845,
"step": 1
},
{
"epoch": 0.03260869565217391,
"grad_norm": 45.26464080810547,
"learning_rate": 1.0000000000000001e-07,
"loss": 4.8307,
"step": 2
},
{
"epoch": 0.04891304347826087,
"grad_norm": 45.96015167236328,
"learning_rate": 1.5000000000000002e-07,
"loss": 4.8322,
"step": 3
},
{
"epoch": 0.06521739130434782,
"grad_norm": 45.86792755126953,
"learning_rate": 2.0000000000000002e-07,
"loss": 4.8841,
"step": 4
},
{
"epoch": 0.08152173913043478,
"grad_norm": 44.19926071166992,
"learning_rate": 2.5000000000000004e-07,
"loss": 4.8013,
"step": 5
},
{
"epoch": 0.09782608695652174,
"grad_norm": 45.76038360595703,
"learning_rate": 3.0000000000000004e-07,
"loss": 4.8112,
"step": 6
},
{
"epoch": 0.11413043478260869,
"grad_norm": 45.73483657836914,
"learning_rate": 3.5000000000000004e-07,
"loss": 4.8235,
"step": 7
},
{
"epoch": 0.13043478260869565,
"grad_norm": 46.2863655090332,
"learning_rate": 4.0000000000000003e-07,
"loss": 4.895,
"step": 8
},
{
"epoch": 0.14673913043478262,
"grad_norm": 44.06720733642578,
"learning_rate": 4.5000000000000003e-07,
"loss": 4.7696,
"step": 9
},
{
"epoch": 0.16304347826086957,
"grad_norm": 43.8859977722168,
"learning_rate": 5.000000000000001e-07,
"loss": 4.7098,
"step": 10
},
{
"epoch": 0.1793478260869565,
"grad_norm": 44.059043884277344,
"learning_rate": 5.5e-07,
"loss": 4.7519,
"step": 11
},
{
"epoch": 0.1956521739130435,
"grad_norm": 43.980201721191406,
"learning_rate": 6.000000000000001e-07,
"loss": 4.7465,
"step": 12
},
{
"epoch": 0.21195652173913043,
"grad_norm": 40.88735580444336,
"learning_rate": 6.5e-07,
"loss": 4.5416,
"step": 13
},
{
"epoch": 0.22826086956521738,
"grad_norm": 39.40138244628906,
"learning_rate": 7.000000000000001e-07,
"loss": 4.5273,
"step": 14
},
{
"epoch": 0.24456521739130435,
"grad_norm": 37.21616744995117,
"learning_rate": 7.5e-07,
"loss": 4.4206,
"step": 15
},
{
"epoch": 0.2608695652173913,
"grad_norm": 35.060447692871094,
"learning_rate": 8.000000000000001e-07,
"loss": 4.3318,
"step": 16
},
{
"epoch": 0.27717391304347827,
"grad_norm": 30.492183685302734,
"learning_rate": 8.500000000000001e-07,
"loss": 4.0841,
"step": 17
},
{
"epoch": 0.29347826086956524,
"grad_norm": 28.49239730834961,
"learning_rate": 9.000000000000001e-07,
"loss": 4.0979,
"step": 18
},
{
"epoch": 0.30978260869565216,
"grad_norm": 24.898632049560547,
"learning_rate": 9.500000000000001e-07,
"loss": 3.8752,
"step": 19
},
{
"epoch": 0.32608695652173914,
"grad_norm": 22.521434783935547,
"learning_rate": 1.0000000000000002e-06,
"loss": 3.77,
"step": 20
},
{
"epoch": 0.3423913043478261,
"grad_norm": 20.311031341552734,
"learning_rate": 1.0500000000000001e-06,
"loss": 3.5973,
"step": 21
},
{
"epoch": 0.358695652173913,
"grad_norm": 19.707839965820312,
"learning_rate": 1.1e-06,
"loss": 3.6102,
"step": 22
},
{
"epoch": 0.375,
"grad_norm": 18.431489944458008,
"learning_rate": 1.1500000000000002e-06,
"loss": 3.4147,
"step": 23
},
{
"epoch": 0.391304347826087,
"grad_norm": 17.78904151916504,
"learning_rate": 1.2000000000000002e-06,
"loss": 3.2665,
"step": 24
},
{
"epoch": 0.4076086956521739,
"grad_norm": 18.014617919921875,
"learning_rate": 1.25e-06,
"loss": 3.1941,
"step": 25
},
{
"epoch": 0.42391304347826086,
"grad_norm": 17.8990478515625,
"learning_rate": 1.3e-06,
"loss": 3.0377,
"step": 26
},
{
"epoch": 0.44021739130434784,
"grad_norm": 18.60847282409668,
"learning_rate": 1.3500000000000002e-06,
"loss": 2.8489,
"step": 27
},
{
"epoch": 0.45652173913043476,
"grad_norm": 18.562536239624023,
"learning_rate": 1.4000000000000001e-06,
"loss": 2.6229,
"step": 28
},
{
"epoch": 0.47282608695652173,
"grad_norm": 19.00884246826172,
"learning_rate": 1.45e-06,
"loss": 2.4521,
"step": 29
},
{
"epoch": 0.4891304347826087,
"grad_norm": 20.419940948486328,
"learning_rate": 1.5e-06,
"loss": 2.3907,
"step": 30
},
{
"epoch": 0.5054347826086957,
"grad_norm": 21.181713104248047,
"learning_rate": 1.5500000000000002e-06,
"loss": 2.2032,
"step": 31
},
{
"epoch": 0.5217391304347826,
"grad_norm": 20.592416763305664,
"learning_rate": 1.6000000000000001e-06,
"loss": 2.004,
"step": 32
},
{
"epoch": 0.5380434782608695,
"grad_norm": 20.40924835205078,
"learning_rate": 1.6500000000000003e-06,
"loss": 1.8373,
"step": 33
},
{
"epoch": 0.5543478260869565,
"grad_norm": 18.38524627685547,
"learning_rate": 1.7000000000000002e-06,
"loss": 1.6218,
"step": 34
},
{
"epoch": 0.5706521739130435,
"grad_norm": 18.922391891479492,
"learning_rate": 1.75e-06,
"loss": 1.4708,
"step": 35
},
{
"epoch": 0.5869565217391305,
"grad_norm": 20.200698852539062,
"learning_rate": 1.8000000000000001e-06,
"loss": 1.3044,
"step": 36
},
{
"epoch": 0.6032608695652174,
"grad_norm": 20.70162582397461,
"learning_rate": 1.85e-06,
"loss": 1.1445,
"step": 37
},
{
"epoch": 0.6195652173913043,
"grad_norm": 18.18869400024414,
"learning_rate": 1.9000000000000002e-06,
"loss": 0.9504,
"step": 38
},
{
"epoch": 0.6358695652173914,
"grad_norm": 15.211593627929688,
"learning_rate": 1.9500000000000004e-06,
"loss": 0.8083,
"step": 39
},
{
"epoch": 0.6521739130434783,
"grad_norm": 12.71890640258789,
"learning_rate": 2.0000000000000003e-06,
"loss": 0.6981,
"step": 40
},
{
"epoch": 0.6684782608695652,
"grad_norm": 10.869053840637207,
"learning_rate": 2.05e-06,
"loss": 0.6018,
"step": 41
},
{
"epoch": 0.6847826086956522,
"grad_norm": 9.5787353515625,
"learning_rate": 2.1000000000000002e-06,
"loss": 0.4899,
"step": 42
},
{
"epoch": 0.7010869565217391,
"grad_norm": 9.319293975830078,
"learning_rate": 2.15e-06,
"loss": 0.3986,
"step": 43
},
{
"epoch": 0.717391304347826,
"grad_norm": 9.404044151306152,
"learning_rate": 2.2e-06,
"loss": 0.3084,
"step": 44
},
{
"epoch": 0.7336956521739131,
"grad_norm": 7.995025634765625,
"learning_rate": 2.25e-06,
"loss": 0.2373,
"step": 45
},
{
"epoch": 0.75,
"grad_norm": 4.505397319793701,
"learning_rate": 2.3000000000000004e-06,
"loss": 0.1527,
"step": 46
},
{
"epoch": 0.7663043478260869,
"grad_norm": 2.5063579082489014,
"learning_rate": 2.35e-06,
"loss": 0.1097,
"step": 47
},
{
"epoch": 0.782608695652174,
"grad_norm": 1.5846028327941895,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.102,
"step": 48
},
{
"epoch": 0.7989130434782609,
"grad_norm": 1.1286852359771729,
"learning_rate": 2.4500000000000003e-06,
"loss": 0.093,
"step": 49
},
{
"epoch": 0.8152173913043478,
"grad_norm": 1.0311343669891357,
"learning_rate": 2.5e-06,
"loss": 0.0778,
"step": 50
},
{
"epoch": 0.8315217391304348,
"grad_norm": 0.6458576917648315,
"learning_rate": 2.55e-06,
"loss": 0.0883,
"step": 51
},
{
"epoch": 0.8478260869565217,
"grad_norm": 0.727554202079773,
"learning_rate": 2.6e-06,
"loss": 0.0852,
"step": 52
},
{
"epoch": 0.8641304347826086,
"grad_norm": 0.619137167930603,
"learning_rate": 2.6500000000000005e-06,
"loss": 0.0697,
"step": 53
},
{
"epoch": 0.8804347826086957,
"grad_norm": 0.38241881132125854,
"learning_rate": 2.7000000000000004e-06,
"loss": 0.0665,
"step": 54
},
{
"epoch": 0.8967391304347826,
"grad_norm": 0.541621744632721,
"learning_rate": 2.7500000000000004e-06,
"loss": 0.0658,
"step": 55
},
{
"epoch": 0.9130434782608695,
"grad_norm": 0.3869657516479492,
"learning_rate": 2.8000000000000003e-06,
"loss": 0.0728,
"step": 56
},
{
"epoch": 0.9293478260869565,
"grad_norm": 0.3768727481365204,
"learning_rate": 2.85e-06,
"loss": 0.0741,
"step": 57
},
{
"epoch": 0.9456521739130435,
"grad_norm": 0.31400591135025024,
"learning_rate": 2.9e-06,
"loss": 0.0682,
"step": 58
},
{
"epoch": 0.9619565217391305,
"grad_norm": 0.3604981303215027,
"learning_rate": 2.95e-06,
"loss": 0.0652,
"step": 59
},
{
"epoch": 0.9782608695652174,
"grad_norm": 0.4383264482021332,
"learning_rate": 3e-06,
"loss": 0.0703,
"step": 60
},
{
"epoch": 0.9945652173913043,
"grad_norm": 0.310332328081131,
"learning_rate": 3.05e-06,
"loss": 0.0664,
"step": 61
},
{
"epoch": 1.0,
"grad_norm": 0.310332328081131,
"learning_rate": 3.1000000000000004e-06,
"loss": 0.0509,
"step": 62
},
{
"epoch": 1.016304347826087,
"grad_norm": 0.665212869644165,
"learning_rate": 3.1500000000000003e-06,
"loss": 0.0633,
"step": 63
},
{
"epoch": 1.0326086956521738,
"grad_norm": 0.3108278512954712,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.0648,
"step": 64
},
{
"epoch": 1.048913043478261,
"grad_norm": 0.5986258387565613,
"learning_rate": 3.2500000000000002e-06,
"loss": 0.0769,
"step": 65
},
{
"epoch": 1.065217391304348,
"grad_norm": 0.6987417936325073,
"learning_rate": 3.3000000000000006e-06,
"loss": 0.0728,
"step": 66
},
{
"epoch": 1.0815217391304348,
"grad_norm": 0.36069774627685547,
"learning_rate": 3.3500000000000005e-06,
"loss": 0.0647,
"step": 67
},
{
"epoch": 1.0978260869565217,
"grad_norm": 0.37604954838752747,
"learning_rate": 3.4000000000000005e-06,
"loss": 0.0571,
"step": 68
},
{
"epoch": 1.1141304347826086,
"grad_norm": 0.2985791862010956,
"learning_rate": 3.45e-06,
"loss": 0.0629,
"step": 69
},
{
"epoch": 1.1304347826086956,
"grad_norm": 0.3454388380050659,
"learning_rate": 3.5e-06,
"loss": 0.0644,
"step": 70
},
{
"epoch": 1.1467391304347827,
"grad_norm": 0.3371462821960449,
"learning_rate": 3.5500000000000003e-06,
"loss": 0.0645,
"step": 71
},
{
"epoch": 1.1630434782608696,
"grad_norm": 0.27834194898605347,
"learning_rate": 3.6000000000000003e-06,
"loss": 0.0678,
"step": 72
},
{
"epoch": 1.1793478260869565,
"grad_norm": 0.26285555958747864,
"learning_rate": 3.65e-06,
"loss": 0.0586,
"step": 73
},
{
"epoch": 1.1956521739130435,
"grad_norm": 0.34152188897132874,
"learning_rate": 3.7e-06,
"loss": 0.061,
"step": 74
},
{
"epoch": 1.2119565217391304,
"grad_norm": 0.2939279079437256,
"learning_rate": 3.7500000000000005e-06,
"loss": 0.0643,
"step": 75
},
{
"epoch": 1.2282608695652173,
"grad_norm": 0.395220547914505,
"learning_rate": 3.8000000000000005e-06,
"loss": 0.0644,
"step": 76
},
{
"epoch": 1.2445652173913044,
"grad_norm": 0.29400259256362915,
"learning_rate": 3.85e-06,
"loss": 0.0562,
"step": 77
},
{
"epoch": 1.2608695652173914,
"grad_norm": 0.25938117504119873,
"learning_rate": 3.900000000000001e-06,
"loss": 0.0658,
"step": 78
},
{
"epoch": 1.2771739130434783,
"grad_norm": 0.36941587924957275,
"learning_rate": 3.95e-06,
"loss": 0.0678,
"step": 79
},
{
"epoch": 1.2934782608695652,
"grad_norm": 0.26572781801223755,
"learning_rate": 4.000000000000001e-06,
"loss": 0.0588,
"step": 80
},
{
"epoch": 1.309782608695652,
"grad_norm": 0.22964634001255035,
"learning_rate": 4.05e-06,
"loss": 0.0612,
"step": 81
},
{
"epoch": 1.3260869565217392,
"grad_norm": 0.24455289542675018,
"learning_rate": 4.1e-06,
"loss": 0.059,
"step": 82
},
{
"epoch": 1.3423913043478262,
"grad_norm": 0.3925253748893738,
"learning_rate": 4.15e-06,
"loss": 0.0648,
"step": 83
},
{
"epoch": 1.358695652173913,
"grad_norm": 0.24822917580604553,
"learning_rate": 4.2000000000000004e-06,
"loss": 0.0618,
"step": 84
},
{
"epoch": 1.375,
"grad_norm": 0.2522635757923126,
"learning_rate": 4.25e-06,
"loss": 0.0568,
"step": 85
},
{
"epoch": 1.391304347826087,
"grad_norm": 0.2465311586856842,
"learning_rate": 4.3e-06,
"loss": 0.0613,
"step": 86
},
{
"epoch": 1.4076086956521738,
"grad_norm": 0.2514893412590027,
"learning_rate": 4.350000000000001e-06,
"loss": 0.0574,
"step": 87
},
{
"epoch": 1.4239130434782608,
"grad_norm": 0.2320777177810669,
"learning_rate": 4.4e-06,
"loss": 0.0502,
"step": 88
},
{
"epoch": 1.440217391304348,
"grad_norm": 0.2494516372680664,
"learning_rate": 4.450000000000001e-06,
"loss": 0.0685,
"step": 89
},
{
"epoch": 1.4565217391304348,
"grad_norm": 0.244571715593338,
"learning_rate": 4.5e-06,
"loss": 0.0588,
"step": 90
},
{
"epoch": 1.4728260869565217,
"grad_norm": 0.22765810787677765,
"learning_rate": 4.5500000000000005e-06,
"loss": 0.0591,
"step": 91
},
{
"epoch": 1.4891304347826086,
"grad_norm": 0.2349582016468048,
"learning_rate": 4.600000000000001e-06,
"loss": 0.0572,
"step": 92
},
{
"epoch": 1.5054347826086958,
"grad_norm": 0.228654682636261,
"learning_rate": 4.65e-06,
"loss": 0.0593,
"step": 93
},
{
"epoch": 1.5217391304347827,
"grad_norm": 0.3090372681617737,
"learning_rate": 4.7e-06,
"loss": 0.0592,
"step": 94
},
{
"epoch": 1.5380434782608696,
"grad_norm": 0.2116968333721161,
"learning_rate": 4.75e-06,
"loss": 0.0575,
"step": 95
},
{
"epoch": 1.5543478260869565,
"grad_norm": 0.32294484972953796,
"learning_rate": 4.800000000000001e-06,
"loss": 0.062,
"step": 96
},
{
"epoch": 1.5706521739130435,
"grad_norm": 0.35240596532821655,
"learning_rate": 4.85e-06,
"loss": 0.0561,
"step": 97
},
{
"epoch": 1.5869565217391304,
"grad_norm": 0.24272935092449188,
"learning_rate": 4.9000000000000005e-06,
"loss": 0.0591,
"step": 98
},
{
"epoch": 1.6032608695652173,
"grad_norm": 0.26952850818634033,
"learning_rate": 4.95e-06,
"loss": 0.0557,
"step": 99
},
{
"epoch": 1.6195652173913042,
"grad_norm": 0.2775789201259613,
"learning_rate": 5e-06,
"loss": 0.0624,
"step": 100
},
{
"epoch": 1.6358695652173914,
"grad_norm": 0.2162039428949356,
"learning_rate": 4.999825642177387e-06,
"loss": 0.0582,
"step": 101
},
{
"epoch": 1.6521739130434783,
"grad_norm": 0.38436195254325867,
"learning_rate": 4.999302593030069e-06,
"loss": 0.0618,
"step": 102
},
{
"epoch": 1.6684782608695652,
"grad_norm": 0.2860764265060425,
"learning_rate": 4.998430925516213e-06,
"loss": 0.0637,
"step": 103
},
{
"epoch": 1.6847826086956523,
"grad_norm": 0.2180222123861313,
"learning_rate": 4.99721076122146e-06,
"loss": 0.0565,
"step": 104
},
{
"epoch": 1.7010869565217392,
"grad_norm": 0.30026087164878845,
"learning_rate": 4.995642270341961e-06,
"loss": 0.0615,
"step": 105
},
{
"epoch": 1.7173913043478262,
"grad_norm": 0.27657821774482727,
"learning_rate": 4.99372567166064e-06,
"loss": 0.0661,
"step": 106
},
{
"epoch": 1.733695652173913,
"grad_norm": 0.23324432969093323,
"learning_rate": 4.991461232516675e-06,
"loss": 0.0563,
"step": 107
},
{
"epoch": 1.75,
"grad_norm": 0.2670609951019287,
"learning_rate": 4.98884926876821e-06,
"loss": 0.0671,
"step": 108
},
{
"epoch": 1.766304347826087,
"grad_norm": 0.2405894696712494,
"learning_rate": 4.9858901447482924e-06,
"loss": 0.0571,
"step": 109
},
{
"epoch": 1.7826086956521738,
"grad_norm": 0.23271754384040833,
"learning_rate": 4.982584273214061e-06,
"loss": 0.0598,
"step": 110
},
{
"epoch": 1.7989130434782608,
"grad_norm": 0.21662941575050354,
"learning_rate": 4.978932115289165e-06,
"loss": 0.0562,
"step": 111
},
{
"epoch": 1.8152173913043477,
"grad_norm": 0.27656564116477966,
"learning_rate": 4.974934180399447e-06,
"loss": 0.0531,
"step": 112
},
{
"epoch": 1.8315217391304348,
"grad_norm": 0.3287908434867859,
"learning_rate": 4.970591026201884e-06,
"loss": 0.0522,
"step": 113
},
{
"epoch": 1.8478260869565217,
"grad_norm": 0.1879669576883316,
"learning_rate": 4.965903258506806e-06,
"loss": 0.0625,
"step": 114
},
{
"epoch": 1.8641304347826086,
"grad_norm": 0.2798942029476166,
"learning_rate": 4.9608715311933865e-06,
"loss": 0.0546,
"step": 115
},
{
"epoch": 1.8804347826086958,
"grad_norm": 0.31130167841911316,
"learning_rate": 4.955496546118439e-06,
"loss": 0.0576,
"step": 116
},
{
"epoch": 1.8967391304347827,
"grad_norm": 0.23934200406074524,
"learning_rate": 4.949779053018519e-06,
"loss": 0.0522,
"step": 117
},
{
"epoch": 1.9130434782608696,
"grad_norm": 0.2684226632118225,
"learning_rate": 4.943719849405347e-06,
"loss": 0.0596,
"step": 118
},
{
"epoch": 1.9293478260869565,
"grad_norm": 0.31276896595954895,
"learning_rate": 4.937319780454559e-06,
"loss": 0.0609,
"step": 119
},
{
"epoch": 1.9456521739130435,
"grad_norm": 0.2928497791290283,
"learning_rate": 4.930579738887827e-06,
"loss": 0.0515,
"step": 120
},
{
"epoch": 1.9619565217391304,
"grad_norm": 0.25487199425697327,
"learning_rate": 4.923500664848327e-06,
"loss": 0.0565,
"step": 121
},
{
"epoch": 1.9782608695652173,
"grad_norm": 0.19076956808567047,
"learning_rate": 4.9160835457696075e-06,
"loss": 0.0541,
"step": 122
},
{
"epoch": 1.9945652173913042,
"grad_norm": 0.36202409863471985,
"learning_rate": 4.9083294162378545e-06,
"loss": 0.0572,
"step": 123
},
{
"epoch": 2.0,
"grad_norm": 0.36202409863471985,
"learning_rate": 4.900239357847582e-06,
"loss": 0.0578,
"step": 124
},
{
"epoch": 2.016304347826087,
"grad_norm": 0.5418670177459717,
"learning_rate": 4.891814499050762e-06,
"loss": 0.0504,
"step": 125
},
{
"epoch": 2.032608695652174,
"grad_norm": 0.24317026138305664,
"learning_rate": 4.883056014999423e-06,
"loss": 0.0557,
"step": 126
},
{
"epoch": 2.0489130434782608,
"grad_norm": 0.23591496050357819,
"learning_rate": 4.873965127381734e-06,
"loss": 0.0567,
"step": 127
},
{
"epoch": 2.0652173913043477,
"grad_norm": 0.26839157938957214,
"learning_rate": 4.864543104251587e-06,
"loss": 0.0489,
"step": 128
},
{
"epoch": 2.0815217391304346,
"grad_norm": 0.23912706971168518,
"learning_rate": 4.854791259851735e-06,
"loss": 0.0519,
"step": 129
},
{
"epoch": 2.097826086956522,
"grad_norm": 0.22615790367126465,
"learning_rate": 4.844710954430464e-06,
"loss": 0.0533,
"step": 130
},
{
"epoch": 2.114130434782609,
"grad_norm": 0.18881212174892426,
"learning_rate": 4.834303594051854e-06,
"loss": 0.0509,
"step": 131
},
{
"epoch": 2.130434782608696,
"grad_norm": 0.20288890600204468,
"learning_rate": 4.823570630399665e-06,
"loss": 0.0515,
"step": 132
},
{
"epoch": 2.1467391304347827,
"grad_norm": 0.2495969533920288,
"learning_rate": 4.812513560574832e-06,
"loss": 0.0593,
"step": 133
},
{
"epoch": 2.1630434782608696,
"grad_norm": 0.25076112151145935,
"learning_rate": 4.8011339268866505e-06,
"loss": 0.0546,
"step": 134
},
{
"epoch": 2.1793478260869565,
"grad_norm": 0.26761433482170105,
"learning_rate": 4.789433316637644e-06,
"loss": 0.0494,
"step": 135
},
{
"epoch": 2.1956521739130435,
"grad_norm": 0.22908158600330353,
"learning_rate": 4.777413361902152e-06,
"loss": 0.0556,
"step": 136
},
{
"epoch": 2.2119565217391304,
"grad_norm": 0.24989894032478333,
"learning_rate": 4.765075739298683e-06,
"loss": 0.0457,
"step": 137
},
{
"epoch": 2.2282608695652173,
"grad_norm": 0.19970546662807465,
"learning_rate": 4.752422169756048e-06,
"loss": 0.0483,
"step": 138
},
{
"epoch": 2.244565217391304,
"grad_norm": 0.2547377347946167,
"learning_rate": 4.739454418273314e-06,
"loss": 0.0562,
"step": 139
},
{
"epoch": 2.260869565217391,
"grad_norm": 0.30050888657569885,
"learning_rate": 4.726174293673612e-06,
"loss": 0.0474,
"step": 140
},
{
"epoch": 2.2771739130434785,
"grad_norm": 0.20995593070983887,
"learning_rate": 4.712583648351827e-06,
"loss": 0.0486,
"step": 141
},
{
"epoch": 2.2934782608695654,
"grad_norm": 0.24649159610271454,
"learning_rate": 4.698684378016223e-06,
"loss": 0.0521,
"step": 142
},
{
"epoch": 2.3097826086956523,
"grad_norm": 0.23422546684741974,
"learning_rate": 4.684478421424007e-06,
"loss": 0.0521,
"step": 143
},
{
"epoch": 2.3260869565217392,
"grad_norm": 0.21018289029598236,
"learning_rate": 4.669967760110908e-06,
"loss": 0.0503,
"step": 144
},
{
"epoch": 2.342391304347826,
"grad_norm": 0.20515775680541992,
"learning_rate": 4.655154418114774e-06,
"loss": 0.0545,
"step": 145
},
{
"epoch": 2.358695652173913,
"grad_norm": 0.2237028032541275,
"learning_rate": 4.6400404616932505e-06,
"loss": 0.052,
"step": 146
},
{
"epoch": 2.375,
"grad_norm": 0.24619129300117493,
"learning_rate": 4.624627999035564e-06,
"loss": 0.059,
"step": 147
},
{
"epoch": 2.391304347826087,
"grad_norm": 0.20323814451694489,
"learning_rate": 4.608919179968457e-06,
"loss": 0.0534,
"step": 148
},
{
"epoch": 2.407608695652174,
"grad_norm": 0.21665000915527344,
"learning_rate": 4.592916195656322e-06,
"loss": 0.0505,
"step": 149
},
{
"epoch": 2.4239130434782608,
"grad_norm": 0.21209590137004852,
"learning_rate": 4.576621278295558e-06,
"loss": 0.055,
"step": 150
},
{
"epoch": 2.4402173913043477,
"grad_norm": 0.19232606887817383,
"learning_rate": 4.5600367008032135e-06,
"loss": 0.0533,
"step": 151
},
{
"epoch": 2.4565217391304346,
"grad_norm": 0.22477513551712036,
"learning_rate": 4.543164776499945e-06,
"loss": 0.0544,
"step": 152
},
{
"epoch": 2.4728260869565215,
"grad_norm": 0.21310777962207794,
"learning_rate": 4.5260078587873416e-06,
"loss": 0.0534,
"step": 153
},
{
"epoch": 2.489130434782609,
"grad_norm": 0.2207050621509552,
"learning_rate": 4.508568340819654e-06,
"loss": 0.0516,
"step": 154
},
{
"epoch": 2.505434782608696,
"grad_norm": 0.22430899739265442,
"learning_rate": 4.490848655169986e-06,
"loss": 0.0552,
"step": 155
},
{
"epoch": 2.5217391304347827,
"grad_norm": 0.2665068805217743,
"learning_rate": 4.472851273490985e-06,
"loss": 0.0517,
"step": 156
},
{
"epoch": 2.5380434782608696,
"grad_norm": 0.20904089510440826,
"learning_rate": 4.454578706170075e-06,
"loss": 0.0477,
"step": 157
},
{
"epoch": 2.5543478260869565,
"grad_norm": 0.21974575519561768,
"learning_rate": 4.436033501979299e-06,
"loss": 0.0465,
"step": 158
},
{
"epoch": 2.5706521739130435,
"grad_norm": 0.2737596333026886,
"learning_rate": 4.417218247719794e-06,
"loss": 0.0443,
"step": 159
},
{
"epoch": 2.5869565217391304,
"grad_norm": 0.19713006913661957,
"learning_rate": 4.398135567860972e-06,
"loss": 0.0565,
"step": 160
},
{
"epoch": 2.6032608695652173,
"grad_norm": 0.24970802664756775,
"learning_rate": 4.378788124174441e-06,
"loss": 0.0473,
"step": 161
},
{
"epoch": 2.619565217391304,
"grad_norm": 0.20846787095069885,
"learning_rate": 4.359178615362725e-06,
"loss": 0.0511,
"step": 162
},
{
"epoch": 2.6358695652173916,
"grad_norm": 0.33354219794273376,
"learning_rate": 4.33930977668283e-06,
"loss": 0.0541,
"step": 163
},
{
"epoch": 2.6521739130434785,
"grad_norm": 0.23585955798625946,
"learning_rate": 4.319184379564716e-06,
"loss": 0.0534,
"step": 164
},
{
"epoch": 2.6684782608695654,
"grad_norm": 0.26923519372940063,
"learning_rate": 4.298805231224721e-06,
"loss": 0.052,
"step": 165
},
{
"epoch": 2.6847826086956523,
"grad_norm": 0.20466569066047668,
"learning_rate": 4.278175174273989e-06,
"loss": 0.0532,
"step": 166
},
{
"epoch": 2.7010869565217392,
"grad_norm": 0.27908194065093994,
"learning_rate": 4.257297086321967e-06,
"loss": 0.0497,
"step": 167
},
{
"epoch": 2.717391304347826,
"grad_norm": 0.29970937967300415,
"learning_rate": 4.236173879575022e-06,
"loss": 0.0502,
"step": 168
},
{
"epoch": 2.733695652173913,
"grad_norm": 0.2322796732187271,
"learning_rate": 4.2148085004302205e-06,
"loss": 0.054,
"step": 169
},
{
"epoch": 2.75,
"grad_norm": 0.1984764039516449,
"learning_rate": 4.1932039290643534e-06,
"loss": 0.0491,
"step": 170
},
{
"epoch": 2.766304347826087,
"grad_norm": 0.20822829008102417,
"learning_rate": 4.1713631790182366e-06,
"loss": 0.0555,
"step": 171
},
{
"epoch": 2.782608695652174,
"grad_norm": 0.2537862956523895,
"learning_rate": 4.149289296776369e-06,
"loss": 0.0479,
"step": 172
},
{
"epoch": 2.7989130434782608,
"grad_norm": 0.20386171340942383,
"learning_rate": 4.126985361341984e-06,
"loss": 0.049,
"step": 173
},
{
"epoch": 2.8152173913043477,
"grad_norm": 0.28424644470214844,
"learning_rate": 4.104454483807579e-06,
"loss": 0.0481,
"step": 174
},
{
"epoch": 2.8315217391304346,
"grad_norm": 0.2668056786060333,
"learning_rate": 4.0816998069209516e-06,
"loss": 0.0528,
"step": 175
},
{
"epoch": 2.8478260869565215,
"grad_norm": 0.2771640419960022,
"learning_rate": 4.058724504646834e-06,
"loss": 0.0673,
"step": 176
},
{
"epoch": 2.8641304347826084,
"grad_norm": 0.26568353176116943,
"learning_rate": 4.0355317817241705e-06,
"loss": 0.0438,
"step": 177
},
{
"epoch": 2.880434782608696,
"grad_norm": 0.21058987081050873,
"learning_rate": 4.012124873219094e-06,
"loss": 0.0499,
"step": 178
},
{
"epoch": 2.8967391304347827,
"grad_norm": 0.27742889523506165,
"learning_rate": 3.988507044073687e-06,
"loss": 0.047,
"step": 179
},
{
"epoch": 2.9130434782608696,
"grad_norm": 0.21633900701999664,
"learning_rate": 3.964681588650562e-06,
"loss": 0.0485,
"step": 180
},
{
"epoch": 2.9293478260869565,
"grad_norm": 0.21723268926143646,
"learning_rate": 3.940651830273342e-06,
"loss": 0.0535,
"step": 181
},
{
"epoch": 2.9456521739130435,
"grad_norm": 0.21648381650447845,
"learning_rate": 3.916421120763106e-06,
"loss": 0.0469,
"step": 182
},
{
"epoch": 2.9619565217391304,
"grad_norm": 0.2256888449192047,
"learning_rate": 3.891992839970855e-06,
"loss": 0.0469,
"step": 183
},
{
"epoch": 2.9782608695652173,
"grad_norm": 0.3136005699634552,
"learning_rate": 3.8673703953060685e-06,
"loss": 0.0517,
"step": 184
},
{
"epoch": 2.994565217391304,
"grad_norm": 0.2358432561159134,
"learning_rate": 3.8425572212614155e-06,
"loss": 0.0511,
"step": 185
},
{
"epoch": 3.0,
"grad_norm": 0.4989979565143585,
"learning_rate": 3.817556778933697e-06,
"loss": 0.0502,
"step": 186
},
{
"epoch": 3.016304347826087,
"grad_norm": 0.24074898660182953,
"learning_rate": 3.792372555541064e-06,
"loss": 0.0446,
"step": 187
},
{
"epoch": 3.032608695652174,
"grad_norm": 0.23359227180480957,
"learning_rate": 3.7670080639366e-06,
"loss": 0.0444,
"step": 188
},
{
"epoch": 3.0489130434782608,
"grad_norm": 0.21096405386924744,
"learning_rate": 3.741466842118327e-06,
"loss": 0.0492,
"step": 189
},
{
"epoch": 3.0652173913043477,
"grad_norm": 0.18678264319896698,
"learning_rate": 3.7157524527357036e-06,
"loss": 0.0458,
"step": 190
},
{
"epoch": 3.0815217391304346,
"grad_norm": 0.21836838126182556,
"learning_rate": 3.6898684825926845e-06,
"loss": 0.0471,
"step": 191
},
{
"epoch": 3.097826086956522,
"grad_norm": 0.21061812341213226,
"learning_rate": 3.663818542147409e-06,
"loss": 0.0469,
"step": 192
},
{
"epoch": 3.114130434782609,
"grad_norm": 0.2022838145494461,
"learning_rate": 3.6376062650085918e-06,
"loss": 0.0457,
"step": 193
},
{
"epoch": 3.130434782608696,
"grad_norm": 0.21767382323741913,
"learning_rate": 3.61123530742869e-06,
"loss": 0.0468,
"step": 194
},
{
"epoch": 3.1467391304347827,
"grad_norm": 0.2330484390258789,
"learning_rate": 3.5847093477938955e-06,
"loss": 0.0437,
"step": 195
},
{
"epoch": 3.1630434782608696,
"grad_norm": 0.21127860248088837,
"learning_rate": 3.5580320861110627e-06,
"loss": 0.0459,
"step": 196
},
{
"epoch": 3.1793478260869565,
"grad_norm": 0.20847401022911072,
"learning_rate": 3.5312072434915983e-06,
"loss": 0.0451,
"step": 197
},
{
"epoch": 3.1956521739130435,
"grad_norm": 0.20767691731452942,
"learning_rate": 3.5042385616324243e-06,
"loss": 0.0434,
"step": 198
},
{
"epoch": 3.2119565217391304,
"grad_norm": 0.21209655702114105,
"learning_rate": 3.477129802294057e-06,
"loss": 0.0418,
"step": 199
},
{
"epoch": 3.2282608695652173,
"grad_norm": 0.21463370323181152,
"learning_rate": 3.4498847467759e-06,
"loss": 0.0408,
"step": 200
},
{
"epoch": 3.244565217391304,
"grad_norm": 0.19667920470237732,
"learning_rate": 3.4225071953887977e-06,
"loss": 0.0476,
"step": 201
},
{
"epoch": 3.260869565217391,
"grad_norm": 0.2126225084066391,
"learning_rate": 3.3950009669249502e-06,
"loss": 0.0438,
"step": 202
},
{
"epoch": 3.2771739130434785,
"grad_norm": 0.21141058206558228,
"learning_rate": 3.3673698981252385e-06,
"loss": 0.0443,
"step": 203
},
{
"epoch": 3.2934782608695654,
"grad_norm": 0.3029347360134125,
"learning_rate": 3.3396178431440572e-06,
"loss": 0.0549,
"step": 204
},
{
"epoch": 3.3097826086956523,
"grad_norm": 0.20408247411251068,
"learning_rate": 3.3117486730117092e-06,
"loss": 0.0441,
"step": 205
},
{
"epoch": 3.3260869565217392,
"grad_norm": 0.19485828280448914,
"learning_rate": 3.283766275094454e-06,
"loss": 0.0484,
"step": 206
},
{
"epoch": 3.342391304347826,
"grad_norm": 0.23771652579307556,
"learning_rate": 3.255674552552267e-06,
"loss": 0.0478,
"step": 207
},
{
"epoch": 3.358695652173913,
"grad_norm": 0.21850208938121796,
"learning_rate": 3.227477423794412e-06,
"loss": 0.0487,
"step": 208
},
{
"epoch": 3.375,
"grad_norm": 0.23703360557556152,
"learning_rate": 3.1991788219328657e-06,
"loss": 0.0505,
"step": 209
},
{
"epoch": 3.391304347826087,
"grad_norm": 0.21323414146900177,
"learning_rate": 3.1707826942337124e-06,
"loss": 0.0398,
"step": 210
},
{
"epoch": 3.407608695652174,
"grad_norm": 0.28237420320510864,
"learning_rate": 3.142293001566548e-06,
"loss": 0.0429,
"step": 211
},
{
"epoch": 3.4239130434782608,
"grad_norm": 0.24118061363697052,
"learning_rate": 3.1137137178519983e-06,
"loss": 0.0441,
"step": 212
},
{
"epoch": 3.4402173913043477,
"grad_norm": 0.20298974215984344,
"learning_rate": 3.085048829507406e-06,
"loss": 0.0429,
"step": 213
},
{
"epoch": 3.4565217391304346,
"grad_norm": 0.2397722750902176,
"learning_rate": 3.056302334890786e-06,
"loss": 0.0487,
"step": 214
},
{
"epoch": 3.4728260869565215,
"grad_norm": 0.24425837397575378,
"learning_rate": 3.027478243743106e-06,
"loss": 0.0465,
"step": 215
},
{
"epoch": 3.489130434782609,
"grad_norm": 0.2465757578611374,
"learning_rate": 2.9985805766289815e-06,
"loss": 0.0415,
"step": 216
},
{
"epoch": 3.505434782608696,
"grad_norm": 0.2629953920841217,
"learning_rate": 2.9696133643758663e-06,
"loss": 0.0456,
"step": 217
},
{
"epoch": 3.5217391304347827,
"grad_norm": 0.23994584381580353,
"learning_rate": 2.940580647511805e-06,
"loss": 0.0467,
"step": 218
},
{
"epoch": 3.5380434782608696,
"grad_norm": 0.29615387320518494,
"learning_rate": 2.911486475701835e-06,
"loss": 0.0468,
"step": 219
},
{
"epoch": 3.5543478260869565,
"grad_norm": 0.28419968485832214,
"learning_rate": 2.8823349071831154e-06,
"loss": 0.0479,
"step": 220
},
{
"epoch": 3.5706521739130435,
"grad_norm": 0.2224627584218979,
"learning_rate": 2.853130008198855e-06,
"loss": 0.0436,
"step": 221
},
{
"epoch": 3.5869565217391304,
"grad_norm": 0.2682191729545593,
"learning_rate": 2.8238758524311316e-06,
"loss": 0.0439,
"step": 222
},
{
"epoch": 3.6032608695652173,
"grad_norm": 0.2094065397977829,
"learning_rate": 2.7945765204326664e-06,
"loss": 0.0472,
"step": 223
},
{
"epoch": 3.619565217391304,
"grad_norm": 0.22561469674110413,
"learning_rate": 2.7652360990576457e-06,
"loss": 0.0426,
"step": 224
},
{
"epoch": 3.6358695652173916,
"grad_norm": 0.1920589804649353,
"learning_rate": 2.735858680891656e-06,
"loss": 0.0465,
"step": 225
},
{
"epoch": 3.6521739130434785,
"grad_norm": 0.2274886816740036,
"learning_rate": 2.7064483636808314e-06,
"loss": 0.0471,
"step": 226
},
{
"epoch": 3.6684782608695654,
"grad_norm": 0.27154678106307983,
"learning_rate": 2.677009249760268e-06,
"loss": 0.0537,
"step": 227
},
{
"epoch": 3.6847826086956523,
"grad_norm": 0.19993992149829865,
"learning_rate": 2.6475454454818072e-06,
"loss": 0.0423,
"step": 228
},
{
"epoch": 3.7010869565217392,
"grad_norm": 0.26778337359428406,
"learning_rate": 2.6180610606412587e-06,
"loss": 0.0502,
"step": 229
},
{
"epoch": 3.717391304347826,
"grad_norm": 0.22767065465450287,
"learning_rate": 2.5885602079051354e-06,
"loss": 0.0447,
"step": 230
},
{
"epoch": 3.733695652173913,
"grad_norm": 0.2405000478029251,
"learning_rate": 2.559047002236995e-06,
"loss": 0.0463,
"step": 231
},
{
"epoch": 3.75,
"grad_norm": 0.23877452313899994,
"learning_rate": 2.529525560323462e-06,
"loss": 0.0472,
"step": 232
},
{
"epoch": 3.766304347826087,
"grad_norm": 0.2590119540691376,
"learning_rate": 2.5e-06,
"loss": 0.0474,
"step": 233
},
{
"epoch": 3.782608695652174,
"grad_norm": 0.2477138489484787,
"learning_rate": 2.470474439676539e-06,
"loss": 0.0461,
"step": 234
},
{
"epoch": 3.7989130434782608,
"grad_norm": 0.23399893939495087,
"learning_rate": 2.4409529977630052e-06,
"loss": 0.0446,
"step": 235
},
{
"epoch": 3.8152173913043477,
"grad_norm": 0.24198178946971893,
"learning_rate": 2.411439792094866e-06,
"loss": 0.0482,
"step": 236
},
{
"epoch": 3.8315217391304346,
"grad_norm": 0.22425131499767303,
"learning_rate": 2.381938939358742e-06,
"loss": 0.0477,
"step": 237
},
{
"epoch": 3.8478260869565215,
"grad_norm": 0.21231728792190552,
"learning_rate": 2.3524545545181936e-06,
"loss": 0.0437,
"step": 238
},
{
"epoch": 3.8641304347826084,
"grad_norm": 0.24554376304149628,
"learning_rate": 2.322990750239733e-06,
"loss": 0.0408,
"step": 239
},
{
"epoch": 3.880434782608696,
"grad_norm": 0.20181581377983093,
"learning_rate": 2.2935516363191695e-06,
"loss": 0.0465,
"step": 240
},
{
"epoch": 3.8967391304347827,
"grad_norm": 0.23197294771671295,
"learning_rate": 2.2641413191083445e-06,
"loss": 0.0437,
"step": 241
},
{
"epoch": 3.9130434782608696,
"grad_norm": 0.18796613812446594,
"learning_rate": 2.234763900942355e-06,
"loss": 0.0427,
"step": 242
},
{
"epoch": 3.9293478260869565,
"grad_norm": 0.2553354799747467,
"learning_rate": 2.2054234795673336e-06,
"loss": 0.0526,
"step": 243
},
{
"epoch": 3.9456521739130435,
"grad_norm": 0.20340357720851898,
"learning_rate": 2.1761241475688697e-06,
"loss": 0.0405,
"step": 244
}
],
"logging_steps": 1,
"max_steps": 366,
"num_input_tokens_seen": 0,
"num_train_epochs": 6,
"save_steps": 61,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.415545712190423e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}