Spark2Scale_v2 / checkpoint-500 /trainer_state.json
Dohahemdann's picture
Checkpoint at step 500
1056dc9 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.34584125886218225,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006916825177243646,
"grad_norm": 0.21880632638931274,
"learning_rate": 0.0,
"loss": 2.5104,
"step": 1
},
{
"epoch": 0.0013833650354487291,
"grad_norm": 0.2225637435913086,
"learning_rate": 1.36986301369863e-05,
"loss": 2.7879,
"step": 2
},
{
"epoch": 0.0020750475531730937,
"grad_norm": 0.21454782783985138,
"learning_rate": 2.73972602739726e-05,
"loss": 2.8515,
"step": 3
},
{
"epoch": 0.0027667300708974583,
"grad_norm": 0.285408616065979,
"learning_rate": 4.1095890410958905e-05,
"loss": 2.8123,
"step": 4
},
{
"epoch": 0.0034584125886218224,
"grad_norm": 0.231473907828331,
"learning_rate": 5.47945205479452e-05,
"loss": 2.5181,
"step": 5
},
{
"epoch": 0.004150095106346187,
"grad_norm": 0.20668023824691772,
"learning_rate": 6.84931506849315e-05,
"loss": 2.066,
"step": 6
},
{
"epoch": 0.0048417776240705515,
"grad_norm": 0.3103766441345215,
"learning_rate": 8.219178082191781e-05,
"loss": 2.0647,
"step": 7
},
{
"epoch": 0.0055334601417949165,
"grad_norm": 0.3536206781864166,
"learning_rate": 9.58904109589041e-05,
"loss": 2.1267,
"step": 8
},
{
"epoch": 0.006225142659519281,
"grad_norm": 0.2078174203634262,
"learning_rate": 0.0001095890410958904,
"loss": 2.5859,
"step": 9
},
{
"epoch": 0.006916825177243645,
"grad_norm": 0.2702614367008209,
"learning_rate": 0.0001232876712328767,
"loss": 2.7732,
"step": 10
},
{
"epoch": 0.00760850769496801,
"grad_norm": 0.348145067691803,
"learning_rate": 0.000136986301369863,
"loss": 2.6776,
"step": 11
},
{
"epoch": 0.008300190212692375,
"grad_norm": 0.32872459292411804,
"learning_rate": 0.0001506849315068493,
"loss": 1.5802,
"step": 12
},
{
"epoch": 0.008991872730416739,
"grad_norm": 0.7062669992446899,
"learning_rate": 0.00016438356164383562,
"loss": 1.9955,
"step": 13
},
{
"epoch": 0.009683555248141103,
"grad_norm": 0.252165287733078,
"learning_rate": 0.00017808219178082192,
"loss": 2.9662,
"step": 14
},
{
"epoch": 0.010375237765865467,
"grad_norm": 0.29550454020500183,
"learning_rate": 0.0001917808219178082,
"loss": 2.3048,
"step": 15
},
{
"epoch": 0.011066920283589833,
"grad_norm": 0.3947546184062958,
"learning_rate": 0.0002054794520547945,
"loss": 2.8061,
"step": 16
},
{
"epoch": 0.011758602801314197,
"grad_norm": 0.26285308599472046,
"learning_rate": 0.0002191780821917808,
"loss": 2.7137,
"step": 17
},
{
"epoch": 0.012450285319038561,
"grad_norm": 0.3446462154388428,
"learning_rate": 0.00023287671232876712,
"loss": 2.3531,
"step": 18
},
{
"epoch": 0.013141967836762926,
"grad_norm": 0.2948848307132721,
"learning_rate": 0.0002465753424657534,
"loss": 2.2846,
"step": 19
},
{
"epoch": 0.01383365035448729,
"grad_norm": 0.3657473027706146,
"learning_rate": 0.0002602739726027397,
"loss": 2.4455,
"step": 20
},
{
"epoch": 0.014525332872211656,
"grad_norm": 0.2797200381755829,
"learning_rate": 0.000273972602739726,
"loss": 2.6308,
"step": 21
},
{
"epoch": 0.01521701538993602,
"grad_norm": 0.32914993166923523,
"learning_rate": 0.0002876712328767123,
"loss": 2.3918,
"step": 22
},
{
"epoch": 0.015908697907660384,
"grad_norm": 0.3232629895210266,
"learning_rate": 0.0003013698630136986,
"loss": 1.714,
"step": 23
},
{
"epoch": 0.01660038042538475,
"grad_norm": 0.3496573269367218,
"learning_rate": 0.00031506849315068495,
"loss": 2.634,
"step": 24
},
{
"epoch": 0.017292062943109112,
"grad_norm": 0.34902870655059814,
"learning_rate": 0.00032876712328767124,
"loss": 2.6269,
"step": 25
},
{
"epoch": 0.017983745460833478,
"grad_norm": 0.3312171399593353,
"learning_rate": 0.00034246575342465754,
"loss": 2.5056,
"step": 26
},
{
"epoch": 0.01867542797855784,
"grad_norm": 0.35412952303886414,
"learning_rate": 0.00035616438356164383,
"loss": 2.0546,
"step": 27
},
{
"epoch": 0.019367110496282206,
"grad_norm": 0.3497133255004883,
"learning_rate": 0.0003698630136986301,
"loss": 2.5338,
"step": 28
},
{
"epoch": 0.020058793014006572,
"grad_norm": 0.4878860116004944,
"learning_rate": 0.0003835616438356164,
"loss": 2.4302,
"step": 29
},
{
"epoch": 0.020750475531730934,
"grad_norm": 0.6170843243598938,
"learning_rate": 0.0003972602739726027,
"loss": 2.4316,
"step": 30
},
{
"epoch": 0.0214421580494553,
"grad_norm": 0.5822828412055969,
"learning_rate": 0.000410958904109589,
"loss": 2.2286,
"step": 31
},
{
"epoch": 0.022133840567179666,
"grad_norm": 0.3742135167121887,
"learning_rate": 0.0004246575342465753,
"loss": 2.145,
"step": 32
},
{
"epoch": 0.02282552308490403,
"grad_norm": 0.9055424332618713,
"learning_rate": 0.0004383561643835616,
"loss": 1.6547,
"step": 33
},
{
"epoch": 0.023517205602628394,
"grad_norm": 0.5974112153053284,
"learning_rate": 0.00045205479452054795,
"loss": 1.3416,
"step": 34
},
{
"epoch": 0.024208888120352757,
"grad_norm": 0.5048322677612305,
"learning_rate": 0.00046575342465753425,
"loss": 1.9358,
"step": 35
},
{
"epoch": 0.024900570638077123,
"grad_norm": 0.6585083603858948,
"learning_rate": 0.00047945205479452054,
"loss": 2.5502,
"step": 36
},
{
"epoch": 0.02559225315580149,
"grad_norm": 0.5043433904647827,
"learning_rate": 0.0004931506849315068,
"loss": 1.9789,
"step": 37
},
{
"epoch": 0.02628393567352585,
"grad_norm": 0.7396597266197205,
"learning_rate": 0.0005068493150684932,
"loss": 1.228,
"step": 38
},
{
"epoch": 0.026975618191250217,
"grad_norm": 0.6552051901817322,
"learning_rate": 0.0005205479452054794,
"loss": 2.2174,
"step": 39
},
{
"epoch": 0.02766730070897458,
"grad_norm": 0.5566487908363342,
"learning_rate": 0.0005342465753424658,
"loss": 1.4923,
"step": 40
},
{
"epoch": 0.028358983226698945,
"grad_norm": 0.8825723528862,
"learning_rate": 0.000547945205479452,
"loss": 1.1997,
"step": 41
},
{
"epoch": 0.02905066574442331,
"grad_norm": 0.6939167380332947,
"learning_rate": 0.0005616438356164384,
"loss": 0.9761,
"step": 42
},
{
"epoch": 0.029742348262147673,
"grad_norm": 0.6930441856384277,
"learning_rate": 0.0005753424657534246,
"loss": 1.859,
"step": 43
},
{
"epoch": 0.03043403077987204,
"grad_norm": 0.5989459753036499,
"learning_rate": 0.000589041095890411,
"loss": 2.1215,
"step": 44
},
{
"epoch": 0.0311257132975964,
"grad_norm": 0.5096371173858643,
"learning_rate": 0.0006027397260273972,
"loss": 1.6978,
"step": 45
},
{
"epoch": 0.03181739581532077,
"grad_norm": 0.505210816860199,
"learning_rate": 0.0006164383561643835,
"loss": 2.2416,
"step": 46
},
{
"epoch": 0.03250907833304513,
"grad_norm": 0.4391973316669464,
"learning_rate": 0.0006301369863013699,
"loss": 2.0748,
"step": 47
},
{
"epoch": 0.0332007608507695,
"grad_norm": 0.5823452472686768,
"learning_rate": 0.0006438356164383562,
"loss": 1.9801,
"step": 48
},
{
"epoch": 0.03389244336849386,
"grad_norm": 0.4575086236000061,
"learning_rate": 0.0006575342465753425,
"loss": 1.7658,
"step": 49
},
{
"epoch": 0.034584125886218224,
"grad_norm": 1.1978178024291992,
"learning_rate": 0.0006712328767123288,
"loss": 2.0922,
"step": 50
},
{
"epoch": 0.03527580840394259,
"grad_norm": 0.8242403864860535,
"learning_rate": 0.0006849315068493151,
"loss": 1.4001,
"step": 51
},
{
"epoch": 0.035967490921666956,
"grad_norm": 0.7775748372077942,
"learning_rate": 0.0006986301369863014,
"loss": 2.2013,
"step": 52
},
{
"epoch": 0.03665917343939132,
"grad_norm": 0.6356838941574097,
"learning_rate": 0.0007123287671232877,
"loss": 2.1857,
"step": 53
},
{
"epoch": 0.03735085595711568,
"grad_norm": 0.5681482553482056,
"learning_rate": 0.000726027397260274,
"loss": 1.8066,
"step": 54
},
{
"epoch": 0.03804253847484005,
"grad_norm": 0.5019308924674988,
"learning_rate": 0.0007397260273972603,
"loss": 2.2206,
"step": 55
},
{
"epoch": 0.03873422099256441,
"grad_norm": 0.5633329749107361,
"learning_rate": 0.0007534246575342466,
"loss": 1.8777,
"step": 56
},
{
"epoch": 0.039425903510288775,
"grad_norm": 0.7095340490341187,
"learning_rate": 0.0007671232876712328,
"loss": 1.547,
"step": 57
},
{
"epoch": 0.040117586028013144,
"grad_norm": 0.5671369433403015,
"learning_rate": 0.0007808219178082192,
"loss": 1.621,
"step": 58
},
{
"epoch": 0.040809268545737507,
"grad_norm": 0.6320775747299194,
"learning_rate": 0.0007945205479452054,
"loss": 1.6643,
"step": 59
},
{
"epoch": 0.04150095106346187,
"grad_norm": 0.5812399387359619,
"learning_rate": 0.0008082191780821918,
"loss": 2.1796,
"step": 60
},
{
"epoch": 0.04219263358118624,
"grad_norm": 0.6970055103302002,
"learning_rate": 0.000821917808219178,
"loss": 1.3168,
"step": 61
},
{
"epoch": 0.0428843160989106,
"grad_norm": 0.5469310879707336,
"learning_rate": 0.0008356164383561644,
"loss": 1.9548,
"step": 62
},
{
"epoch": 0.04357599861663496,
"grad_norm": 0.6035028100013733,
"learning_rate": 0.0008493150684931506,
"loss": 2.0096,
"step": 63
},
{
"epoch": 0.04426768113435933,
"grad_norm": 0.5294916033744812,
"learning_rate": 0.000863013698630137,
"loss": 1.9096,
"step": 64
},
{
"epoch": 0.044959363652083695,
"grad_norm": 0.5908515453338623,
"learning_rate": 0.0008767123287671232,
"loss": 1.7588,
"step": 65
},
{
"epoch": 0.04565104616980806,
"grad_norm": 0.6100364327430725,
"learning_rate": 0.0008904109589041097,
"loss": 2.354,
"step": 66
},
{
"epoch": 0.04634272868753242,
"grad_norm": 0.4925366938114166,
"learning_rate": 0.0009041095890410959,
"loss": 1.9282,
"step": 67
},
{
"epoch": 0.04703441120525679,
"grad_norm": 0.6262894868850708,
"learning_rate": 0.0009178082191780823,
"loss": 1.2392,
"step": 68
},
{
"epoch": 0.04772609372298115,
"grad_norm": 0.55129474401474,
"learning_rate": 0.0009315068493150685,
"loss": 2.4297,
"step": 69
},
{
"epoch": 0.048417776240705514,
"grad_norm": 0.5773240327835083,
"learning_rate": 0.0009452054794520548,
"loss": 1.4238,
"step": 70
},
{
"epoch": 0.04910945875842988,
"grad_norm": 0.3298526704311371,
"learning_rate": 0.0009589041095890411,
"loss": 1.5375,
"step": 71
},
{
"epoch": 0.049801141276154245,
"grad_norm": 0.9887644648551941,
"learning_rate": 0.0009726027397260274,
"loss": 2.037,
"step": 72
},
{
"epoch": 0.05049282379387861,
"grad_norm": 0.7895487546920776,
"learning_rate": 0.0009863013698630137,
"loss": 1.8359,
"step": 73
},
{
"epoch": 0.05118450631160298,
"grad_norm": 0.5635783076286316,
"learning_rate": 0.001,
"loss": 1.2403,
"step": 74
},
{
"epoch": 0.05187618882932734,
"grad_norm": 0.5721316933631897,
"learning_rate": 0.0009992716678805535,
"loss": 1.9278,
"step": 75
},
{
"epoch": 0.0525678713470517,
"grad_norm": 0.4850369095802307,
"learning_rate": 0.000998543335761107,
"loss": 1.9441,
"step": 76
},
{
"epoch": 0.053259553864776064,
"grad_norm": 0.5316908955574036,
"learning_rate": 0.0009978150036416607,
"loss": 1.593,
"step": 77
},
{
"epoch": 0.053951236382500434,
"grad_norm": 0.4999512732028961,
"learning_rate": 0.000997086671522214,
"loss": 1.8264,
"step": 78
},
{
"epoch": 0.054642918900224796,
"grad_norm": 0.4769350588321686,
"learning_rate": 0.0009963583394027677,
"loss": 1.6991,
"step": 79
},
{
"epoch": 0.05533460141794916,
"grad_norm": 0.4839954078197479,
"learning_rate": 0.0009956300072833213,
"loss": 1.0442,
"step": 80
},
{
"epoch": 0.05602628393567353,
"grad_norm": 0.7724981307983398,
"learning_rate": 0.0009949016751638748,
"loss": 1.5422,
"step": 81
},
{
"epoch": 0.05671796645339789,
"grad_norm": 0.7546667456626892,
"learning_rate": 0.0009941733430444283,
"loss": 1.7832,
"step": 82
},
{
"epoch": 0.05740964897112225,
"grad_norm": 0.5036157369613647,
"learning_rate": 0.0009934450109249818,
"loss": 1.9083,
"step": 83
},
{
"epoch": 0.05810133148884662,
"grad_norm": 0.5091835260391235,
"learning_rate": 0.0009927166788055353,
"loss": 1.5611,
"step": 84
},
{
"epoch": 0.058793014006570984,
"grad_norm": 0.5591360926628113,
"learning_rate": 0.0009919883466860888,
"loss": 1.517,
"step": 85
},
{
"epoch": 0.05948469652429535,
"grad_norm": 0.5279435515403748,
"learning_rate": 0.0009912600145666425,
"loss": 1.541,
"step": 86
},
{
"epoch": 0.060176379042019716,
"grad_norm": 0.8345214128494263,
"learning_rate": 0.000990531682447196,
"loss": 0.8178,
"step": 87
},
{
"epoch": 0.06086806155974408,
"grad_norm": 0.7917139530181885,
"learning_rate": 0.0009898033503277495,
"loss": 2.0548,
"step": 88
},
{
"epoch": 0.06155974407746844,
"grad_norm": 0.46465063095092773,
"learning_rate": 0.000989075018208303,
"loss": 2.045,
"step": 89
},
{
"epoch": 0.0622514265951928,
"grad_norm": 0.6480844020843506,
"learning_rate": 0.0009883466860888565,
"loss": 2.1368,
"step": 90
},
{
"epoch": 0.06294310911291717,
"grad_norm": 0.5167698264122009,
"learning_rate": 0.00098761835396941,
"loss": 2.2606,
"step": 91
},
{
"epoch": 0.06363479163064154,
"grad_norm": 0.6514157652854919,
"learning_rate": 0.0009868900218499635,
"loss": 2.1443,
"step": 92
},
{
"epoch": 0.0643264741483659,
"grad_norm": 0.9793212413787842,
"learning_rate": 0.0009861616897305172,
"loss": 1.7448,
"step": 93
},
{
"epoch": 0.06501815666609026,
"grad_norm": 0.5196186900138855,
"learning_rate": 0.0009854333576110707,
"loss": 1.6717,
"step": 94
},
{
"epoch": 0.06570983918381462,
"grad_norm": 0.4875952899456024,
"learning_rate": 0.0009847050254916242,
"loss": 1.8078,
"step": 95
},
{
"epoch": 0.066401521701539,
"grad_norm": 0.5111953020095825,
"learning_rate": 0.0009839766933721777,
"loss": 2.263,
"step": 96
},
{
"epoch": 0.06709320421926336,
"grad_norm": 0.6604788303375244,
"learning_rate": 0.0009832483612527312,
"loss": 1.6966,
"step": 97
},
{
"epoch": 0.06778488673698772,
"grad_norm": 0.5474271774291992,
"learning_rate": 0.0009825200291332847,
"loss": 2.3985,
"step": 98
},
{
"epoch": 0.06847656925471209,
"grad_norm": 0.47275879979133606,
"learning_rate": 0.0009817916970138382,
"loss": 1.7751,
"step": 99
},
{
"epoch": 0.06916825177243645,
"grad_norm": 0.5738961696624756,
"learning_rate": 0.000981063364894392,
"loss": 1.5479,
"step": 100
},
{
"epoch": 0.06985993429016081,
"grad_norm": 0.5046308636665344,
"learning_rate": 0.0009803350327749454,
"loss": 1.4889,
"step": 101
},
{
"epoch": 0.07055161680788519,
"grad_norm": 0.45390692353248596,
"learning_rate": 0.000979606700655499,
"loss": 1.3446,
"step": 102
},
{
"epoch": 0.07124329932560955,
"grad_norm": 0.4701155126094818,
"learning_rate": 0.0009788783685360525,
"loss": 1.4525,
"step": 103
},
{
"epoch": 0.07193498184333391,
"grad_norm": 0.6199256181716919,
"learning_rate": 0.000978150036416606,
"loss": 1.5779,
"step": 104
},
{
"epoch": 0.07262666436105827,
"grad_norm": 0.6306092143058777,
"learning_rate": 0.0009774217042971595,
"loss": 2.0143,
"step": 105
},
{
"epoch": 0.07331834687878264,
"grad_norm": 0.5837789177894592,
"learning_rate": 0.000976693372177713,
"loss": 1.2003,
"step": 106
},
{
"epoch": 0.074010029396507,
"grad_norm": 0.9713156223297119,
"learning_rate": 0.0009759650400582666,
"loss": 1.5721,
"step": 107
},
{
"epoch": 0.07470171191423136,
"grad_norm": 0.694187343120575,
"learning_rate": 0.0009752367079388202,
"loss": 2.2187,
"step": 108
},
{
"epoch": 0.07539339443195574,
"grad_norm": 0.465781033039093,
"learning_rate": 0.0009745083758193737,
"loss": 1.768,
"step": 109
},
{
"epoch": 0.0760850769496801,
"grad_norm": 0.5198079347610474,
"learning_rate": 0.0009737800436999272,
"loss": 2.1921,
"step": 110
},
{
"epoch": 0.07677675946740446,
"grad_norm": 0.7641897201538086,
"learning_rate": 0.0009730517115804807,
"loss": 1.7574,
"step": 111
},
{
"epoch": 0.07746844198512882,
"grad_norm": 0.4864037334918976,
"learning_rate": 0.0009723233794610342,
"loss": 1.9409,
"step": 112
},
{
"epoch": 0.07816012450285319,
"grad_norm": 1.0721259117126465,
"learning_rate": 0.0009715950473415878,
"loss": 1.2796,
"step": 113
},
{
"epoch": 0.07885180702057755,
"grad_norm": 0.6161507964134216,
"learning_rate": 0.0009708667152221413,
"loss": 1.646,
"step": 114
},
{
"epoch": 0.07954348953830193,
"grad_norm": 0.6296889185905457,
"learning_rate": 0.0009701383831026949,
"loss": 1.2842,
"step": 115
},
{
"epoch": 0.08023517205602629,
"grad_norm": 0.6511496901512146,
"learning_rate": 0.0009694100509832484,
"loss": 2.0559,
"step": 116
},
{
"epoch": 0.08092685457375065,
"grad_norm": 0.5697126984596252,
"learning_rate": 0.0009686817188638019,
"loss": 1.5121,
"step": 117
},
{
"epoch": 0.08161853709147501,
"grad_norm": 0.506841242313385,
"learning_rate": 0.0009679533867443554,
"loss": 1.6908,
"step": 118
},
{
"epoch": 0.08231021960919938,
"grad_norm": 1.1525691747665405,
"learning_rate": 0.0009672250546249089,
"loss": 2.0817,
"step": 119
},
{
"epoch": 0.08300190212692374,
"grad_norm": 0.6273766756057739,
"learning_rate": 0.0009664967225054625,
"loss": 1.783,
"step": 120
},
{
"epoch": 0.0836935846446481,
"grad_norm": 0.8089930415153503,
"learning_rate": 0.000965768390386016,
"loss": 1.9208,
"step": 121
},
{
"epoch": 0.08438526716237248,
"grad_norm": 0.6257967948913574,
"learning_rate": 0.0009650400582665696,
"loss": 1.8797,
"step": 122
},
{
"epoch": 0.08507694968009684,
"grad_norm": 0.6704832911491394,
"learning_rate": 0.0009643117261471231,
"loss": 1.6969,
"step": 123
},
{
"epoch": 0.0857686321978212,
"grad_norm": 0.8226727843284607,
"learning_rate": 0.0009635833940276765,
"loss": 1.291,
"step": 124
},
{
"epoch": 0.08646031471554556,
"grad_norm": 0.45218008756637573,
"learning_rate": 0.0009628550619082302,
"loss": 1.6168,
"step": 125
},
{
"epoch": 0.08715199723326993,
"grad_norm": 0.9265746474266052,
"learning_rate": 0.0009621267297887837,
"loss": 1.5465,
"step": 126
},
{
"epoch": 0.08784367975099429,
"grad_norm": 0.7594870924949646,
"learning_rate": 0.0009613983976693373,
"loss": 1.2877,
"step": 127
},
{
"epoch": 0.08853536226871866,
"grad_norm": 0.5055251121520996,
"learning_rate": 0.0009606700655498908,
"loss": 1.8451,
"step": 128
},
{
"epoch": 0.08922704478644303,
"grad_norm": 0.5842559337615967,
"learning_rate": 0.0009599417334304444,
"loss": 2.4324,
"step": 129
},
{
"epoch": 0.08991872730416739,
"grad_norm": 0.42893463373184204,
"learning_rate": 0.0009592134013109979,
"loss": 1.8858,
"step": 130
},
{
"epoch": 0.09061040982189175,
"grad_norm": 0.5879374146461487,
"learning_rate": 0.0009584850691915513,
"loss": 2.1098,
"step": 131
},
{
"epoch": 0.09130209233961611,
"grad_norm": 1.0884597301483154,
"learning_rate": 0.0009577567370721049,
"loss": 1.1548,
"step": 132
},
{
"epoch": 0.09199377485734048,
"grad_norm": 0.4452207684516907,
"learning_rate": 0.0009570284049526584,
"loss": 1.5784,
"step": 133
},
{
"epoch": 0.09268545737506484,
"grad_norm": 0.5032292604446411,
"learning_rate": 0.000956300072833212,
"loss": 1.8767,
"step": 134
},
{
"epoch": 0.09337713989278922,
"grad_norm": 0.6190866827964783,
"learning_rate": 0.0009555717407137655,
"loss": 1.7698,
"step": 135
},
{
"epoch": 0.09406882241051358,
"grad_norm": 0.559252142906189,
"learning_rate": 0.0009548434085943191,
"loss": 2.063,
"step": 136
},
{
"epoch": 0.09476050492823794,
"grad_norm": 0.7464174032211304,
"learning_rate": 0.0009541150764748726,
"loss": 1.334,
"step": 137
},
{
"epoch": 0.0954521874459623,
"grad_norm": 0.5302634835243225,
"learning_rate": 0.000953386744355426,
"loss": 1.838,
"step": 138
},
{
"epoch": 0.09614386996368667,
"grad_norm": 0.5212066173553467,
"learning_rate": 0.0009526584122359796,
"loss": 1.9793,
"step": 139
},
{
"epoch": 0.09683555248141103,
"grad_norm": 0.7148857116699219,
"learning_rate": 0.0009519300801165331,
"loss": 1.6045,
"step": 140
},
{
"epoch": 0.0975272349991354,
"grad_norm": 1.0729445219039917,
"learning_rate": 0.0009512017479970867,
"loss": 1.5728,
"step": 141
},
{
"epoch": 0.09821891751685977,
"grad_norm": 0.438503235578537,
"learning_rate": 0.0009504734158776402,
"loss": 1.5064,
"step": 142
},
{
"epoch": 0.09891060003458413,
"grad_norm": 0.6026888489723206,
"learning_rate": 0.0009497450837581938,
"loss": 1.5297,
"step": 143
},
{
"epoch": 0.09960228255230849,
"grad_norm": 0.4339958727359772,
"learning_rate": 0.0009490167516387472,
"loss": 1.2461,
"step": 144
},
{
"epoch": 0.10029396507003285,
"grad_norm": 0.8123407363891602,
"learning_rate": 0.0009482884195193007,
"loss": 1.7749,
"step": 145
},
{
"epoch": 0.10098564758775722,
"grad_norm": 0.938025951385498,
"learning_rate": 0.0009475600873998543,
"loss": 1.2157,
"step": 146
},
{
"epoch": 0.10167733010548158,
"grad_norm": 0.8118213415145874,
"learning_rate": 0.0009468317552804079,
"loss": 1.3722,
"step": 147
},
{
"epoch": 0.10236901262320595,
"grad_norm": 0.6156368851661682,
"learning_rate": 0.0009461034231609615,
"loss": 1.9435,
"step": 148
},
{
"epoch": 0.10306069514093032,
"grad_norm": 0.43706831336021423,
"learning_rate": 0.000945375091041515,
"loss": 1.7467,
"step": 149
},
{
"epoch": 0.10375237765865468,
"grad_norm": 0.5463519096374512,
"learning_rate": 0.0009446467589220686,
"loss": 1.8991,
"step": 150
},
{
"epoch": 0.10444406017637904,
"grad_norm": 0.4798230826854706,
"learning_rate": 0.000943918426802622,
"loss": 2.1618,
"step": 151
},
{
"epoch": 0.1051357426941034,
"grad_norm": 0.4733302891254425,
"learning_rate": 0.0009431900946831755,
"loss": 1.8547,
"step": 152
},
{
"epoch": 0.10582742521182777,
"grad_norm": 0.558428168296814,
"learning_rate": 0.0009424617625637291,
"loss": 2.314,
"step": 153
},
{
"epoch": 0.10651910772955213,
"grad_norm": 0.5310361385345459,
"learning_rate": 0.0009417334304442826,
"loss": 1.9073,
"step": 154
},
{
"epoch": 0.1072107902472765,
"grad_norm": 0.4204038679599762,
"learning_rate": 0.0009410050983248362,
"loss": 1.9635,
"step": 155
},
{
"epoch": 0.10790247276500087,
"grad_norm": 0.5052216649055481,
"learning_rate": 0.0009402767662053897,
"loss": 1.0511,
"step": 156
},
{
"epoch": 0.10859415528272523,
"grad_norm": 0.5589479804039001,
"learning_rate": 0.0009395484340859433,
"loss": 1.4608,
"step": 157
},
{
"epoch": 0.10928583780044959,
"grad_norm": 0.7388360500335693,
"learning_rate": 0.0009388201019664967,
"loss": 1.9785,
"step": 158
},
{
"epoch": 0.10997752031817395,
"grad_norm": 0.5995668172836304,
"learning_rate": 0.0009380917698470502,
"loss": 1.4633,
"step": 159
},
{
"epoch": 0.11066920283589832,
"grad_norm": 0.8107509613037109,
"learning_rate": 0.0009373634377276038,
"loss": 1.6685,
"step": 160
},
{
"epoch": 0.1113608853536227,
"grad_norm": 0.6110396981239319,
"learning_rate": 0.0009366351056081573,
"loss": 1.8113,
"step": 161
},
{
"epoch": 0.11205256787134706,
"grad_norm": 0.5032293796539307,
"learning_rate": 0.0009359067734887109,
"loss": 1.7344,
"step": 162
},
{
"epoch": 0.11274425038907142,
"grad_norm": 1.456254243850708,
"learning_rate": 0.0009351784413692644,
"loss": 1.2546,
"step": 163
},
{
"epoch": 0.11343593290679578,
"grad_norm": 0.8283969163894653,
"learning_rate": 0.0009344501092498179,
"loss": 1.8466,
"step": 164
},
{
"epoch": 0.11412761542452014,
"grad_norm": 0.8178532123565674,
"learning_rate": 0.0009337217771303714,
"loss": 1.7566,
"step": 165
},
{
"epoch": 0.1148192979422445,
"grad_norm": 0.5897772908210754,
"learning_rate": 0.0009329934450109249,
"loss": 1.9611,
"step": 166
},
{
"epoch": 0.11551098045996887,
"grad_norm": 0.4763628840446472,
"learning_rate": 0.0009322651128914785,
"loss": 1.6854,
"step": 167
},
{
"epoch": 0.11620266297769324,
"grad_norm": 0.5219669938087463,
"learning_rate": 0.000931536780772032,
"loss": 1.5156,
"step": 168
},
{
"epoch": 0.1168943454954176,
"grad_norm": 0.7750780582427979,
"learning_rate": 0.0009308084486525857,
"loss": 1.2845,
"step": 169
},
{
"epoch": 0.11758602801314197,
"grad_norm": 0.5357050895690918,
"learning_rate": 0.0009300801165331392,
"loss": 1.1729,
"step": 170
},
{
"epoch": 0.11827771053086633,
"grad_norm": 0.5962219834327698,
"learning_rate": 0.0009293517844136927,
"loss": 1.4915,
"step": 171
},
{
"epoch": 0.1189693930485907,
"grad_norm": 0.4935504198074341,
"learning_rate": 0.0009286234522942462,
"loss": 1.7863,
"step": 172
},
{
"epoch": 0.11966107556631506,
"grad_norm": 0.5719547867774963,
"learning_rate": 0.0009278951201747997,
"loss": 1.7086,
"step": 173
},
{
"epoch": 0.12035275808403943,
"grad_norm": 0.614291787147522,
"learning_rate": 0.0009271667880553533,
"loss": 2.0141,
"step": 174
},
{
"epoch": 0.1210444406017638,
"grad_norm": 0.4415907859802246,
"learning_rate": 0.0009264384559359068,
"loss": 1.4772,
"step": 175
},
{
"epoch": 0.12173612311948816,
"grad_norm": 0.518036961555481,
"learning_rate": 0.0009257101238164604,
"loss": 1.1856,
"step": 176
},
{
"epoch": 0.12242780563721252,
"grad_norm": 0.39714357256889343,
"learning_rate": 0.0009249817916970139,
"loss": 1.1254,
"step": 177
},
{
"epoch": 0.12311948815493688,
"grad_norm": 0.5234679579734802,
"learning_rate": 0.0009242534595775674,
"loss": 2.0139,
"step": 178
},
{
"epoch": 0.12381117067266124,
"grad_norm": 0.548357367515564,
"learning_rate": 0.0009235251274581209,
"loss": 1.5552,
"step": 179
},
{
"epoch": 0.1245028531903856,
"grad_norm": 0.6111085414886475,
"learning_rate": 0.0009227967953386744,
"loss": 2.1376,
"step": 180
},
{
"epoch": 0.12519453570810998,
"grad_norm": 11.656793594360352,
"learning_rate": 0.000922068463219228,
"loss": 2.0174,
"step": 181
},
{
"epoch": 0.12588621822583435,
"grad_norm": 0.7396730184555054,
"learning_rate": 0.0009213401310997815,
"loss": 2.2195,
"step": 182
},
{
"epoch": 0.1265779007435587,
"grad_norm": 0.7623037099838257,
"learning_rate": 0.0009206117989803351,
"loss": 1.8883,
"step": 183
},
{
"epoch": 0.12726958326128307,
"grad_norm": 4.827798366546631,
"learning_rate": 0.0009198834668608885,
"loss": 1.4435,
"step": 184
},
{
"epoch": 0.12796126577900743,
"grad_norm": 6.8200836181640625,
"learning_rate": 0.0009191551347414421,
"loss": 2.4153,
"step": 185
},
{
"epoch": 0.1286529482967318,
"grad_norm": 10.740931510925293,
"learning_rate": 0.0009184268026219956,
"loss": 2.1309,
"step": 186
},
{
"epoch": 0.12934463081445616,
"grad_norm": 31.872066497802734,
"learning_rate": 0.0009176984705025491,
"loss": 2.2398,
"step": 187
},
{
"epoch": 0.13003631333218052,
"grad_norm": 31.492610931396484,
"learning_rate": 0.0009169701383831027,
"loss": 1.7102,
"step": 188
},
{
"epoch": 0.13072799584990488,
"grad_norm": 14.984453201293945,
"learning_rate": 0.0009162418062636562,
"loss": 1.4899,
"step": 189
},
{
"epoch": 0.13141967836762924,
"grad_norm": 60.037567138671875,
"learning_rate": 0.0009155134741442099,
"loss": 1.4512,
"step": 190
},
{
"epoch": 0.13211136088535363,
"grad_norm": 13.009904861450195,
"learning_rate": 0.0009147851420247633,
"loss": 1.7607,
"step": 191
},
{
"epoch": 0.132803043403078,
"grad_norm": 37.90861511230469,
"learning_rate": 0.0009140568099053169,
"loss": 1.9484,
"step": 192
},
{
"epoch": 0.13349472592080236,
"grad_norm": 25.40981674194336,
"learning_rate": 0.0009133284777858704,
"loss": 2.4415,
"step": 193
},
{
"epoch": 0.13418640843852672,
"grad_norm": 6.186267375946045,
"learning_rate": 0.0009126001456664239,
"loss": 1.1091,
"step": 194
},
{
"epoch": 0.13487809095625108,
"grad_norm": 0.7662860155105591,
"learning_rate": 0.0009118718135469775,
"loss": 1.8422,
"step": 195
},
{
"epoch": 0.13556977347397545,
"grad_norm": 0.6533941626548767,
"learning_rate": 0.000911143481427531,
"loss": 1.9951,
"step": 196
},
{
"epoch": 0.1362614559916998,
"grad_norm": 0.6851759552955627,
"learning_rate": 0.0009104151493080846,
"loss": 2.2665,
"step": 197
},
{
"epoch": 0.13695313850942417,
"grad_norm": 0.49062949419021606,
"learning_rate": 0.000909686817188638,
"loss": 1.1709,
"step": 198
},
{
"epoch": 0.13764482102714853,
"grad_norm": 0.5005449056625366,
"learning_rate": 0.0009089584850691916,
"loss": 1.6217,
"step": 199
},
{
"epoch": 0.1383365035448729,
"grad_norm": 0.5429890751838684,
"learning_rate": 0.0009082301529497451,
"loss": 2.0511,
"step": 200
},
{
"epoch": 0.13902818606259726,
"grad_norm": 0.652536153793335,
"learning_rate": 0.0009075018208302986,
"loss": 1.2122,
"step": 201
},
{
"epoch": 0.13971986858032162,
"grad_norm": 0.4541880488395691,
"learning_rate": 0.0009067734887108522,
"loss": 1.1111,
"step": 202
},
{
"epoch": 0.14041155109804598,
"grad_norm": 0.5066574811935425,
"learning_rate": 0.0009060451565914057,
"loss": 1.0966,
"step": 203
},
{
"epoch": 0.14110323361577037,
"grad_norm": 0.5900403261184692,
"learning_rate": 0.0009053168244719592,
"loss": 1.9502,
"step": 204
},
{
"epoch": 0.14179491613349474,
"grad_norm": 0.5873029828071594,
"learning_rate": 0.0009045884923525127,
"loss": 2.1726,
"step": 205
},
{
"epoch": 0.1424865986512191,
"grad_norm": 0.46297940611839294,
"learning_rate": 0.0009038601602330663,
"loss": 1.5529,
"step": 206
},
{
"epoch": 0.14317828116894346,
"grad_norm": 0.6434882283210754,
"learning_rate": 0.0009031318281136198,
"loss": 1.1717,
"step": 207
},
{
"epoch": 0.14386996368666782,
"grad_norm": 0.5225998163223267,
"learning_rate": 0.0009024034959941733,
"loss": 1.5854,
"step": 208
},
{
"epoch": 0.14456164620439219,
"grad_norm": 0.5846410989761353,
"learning_rate": 0.0009016751638747269,
"loss": 1.5399,
"step": 209
},
{
"epoch": 0.14525332872211655,
"grad_norm": 0.6395654082298279,
"learning_rate": 0.0009009468317552804,
"loss": 1.7706,
"step": 210
},
{
"epoch": 0.1459450112398409,
"grad_norm": 4.408266067504883,
"learning_rate": 0.000900218499635834,
"loss": 2.841,
"step": 211
},
{
"epoch": 0.14663669375756527,
"grad_norm": 0.5043503642082214,
"learning_rate": 0.0008994901675163874,
"loss": 1.7936,
"step": 212
},
{
"epoch": 0.14732837627528964,
"grad_norm": 0.4562769830226898,
"learning_rate": 0.0008987618353969411,
"loss": 1.8362,
"step": 213
},
{
"epoch": 0.148020058793014,
"grad_norm": 0.7404221296310425,
"learning_rate": 0.0008980335032774946,
"loss": 1.6168,
"step": 214
},
{
"epoch": 0.14871174131073836,
"grad_norm": 0.7720257043838501,
"learning_rate": 0.0008973051711580481,
"loss": 1.2365,
"step": 215
},
{
"epoch": 0.14940342382846272,
"grad_norm": 0.9425879716873169,
"learning_rate": 0.0008965768390386017,
"loss": 1.5786,
"step": 216
},
{
"epoch": 0.1500951063461871,
"grad_norm": 0.5764768719673157,
"learning_rate": 0.0008958485069191552,
"loss": 2.0833,
"step": 217
},
{
"epoch": 0.15078678886391148,
"grad_norm": 0.49806153774261475,
"learning_rate": 0.0008951201747997087,
"loss": 1.1311,
"step": 218
},
{
"epoch": 0.15147847138163584,
"grad_norm": 0.5747334361076355,
"learning_rate": 0.0008943918426802622,
"loss": 2.2558,
"step": 219
},
{
"epoch": 0.1521701538993602,
"grad_norm": 1.0881627798080444,
"learning_rate": 0.0008936635105608158,
"loss": 1.7196,
"step": 220
},
{
"epoch": 0.15286183641708456,
"grad_norm": 0.6077120900154114,
"learning_rate": 0.0008929351784413693,
"loss": 1.8436,
"step": 221
},
{
"epoch": 0.15355351893480892,
"grad_norm": 1.4011138677597046,
"learning_rate": 0.0008922068463219228,
"loss": 1.4092,
"step": 222
},
{
"epoch": 0.1542452014525333,
"grad_norm": 0.6316831707954407,
"learning_rate": 0.0008914785142024764,
"loss": 1.6801,
"step": 223
},
{
"epoch": 0.15493688397025765,
"grad_norm": 0.6225351691246033,
"learning_rate": 0.0008907501820830298,
"loss": 1.575,
"step": 224
},
{
"epoch": 0.155628566487982,
"grad_norm": 0.45079120993614197,
"learning_rate": 0.0008900218499635834,
"loss": 1.7441,
"step": 225
},
{
"epoch": 0.15632024900570637,
"grad_norm": 0.5602415204048157,
"learning_rate": 0.0008892935178441369,
"loss": 1.8509,
"step": 226
},
{
"epoch": 0.15701193152343074,
"grad_norm": 0.43019142746925354,
"learning_rate": 0.0008885651857246905,
"loss": 2.0136,
"step": 227
},
{
"epoch": 0.1577036140411551,
"grad_norm": 0.48303139209747314,
"learning_rate": 0.000887836853605244,
"loss": 1.7679,
"step": 228
},
{
"epoch": 0.15839529655887946,
"grad_norm": 0.5987271666526794,
"learning_rate": 0.0008871085214857975,
"loss": 1.3894,
"step": 229
},
{
"epoch": 0.15908697907660385,
"grad_norm": 0.6672357320785522,
"learning_rate": 0.0008863801893663511,
"loss": 2.0173,
"step": 230
},
{
"epoch": 0.15977866159432821,
"grad_norm": 0.5140132904052734,
"learning_rate": 0.0008856518572469045,
"loss": 1.9006,
"step": 231
},
{
"epoch": 0.16047034411205258,
"grad_norm": 0.7984848022460938,
"learning_rate": 0.0008849235251274581,
"loss": 1.7669,
"step": 232
},
{
"epoch": 0.16116202662977694,
"grad_norm": 1.279133677482605,
"learning_rate": 0.0008841951930080116,
"loss": 1.3613,
"step": 233
},
{
"epoch": 0.1618537091475013,
"grad_norm": 0.37104475498199463,
"learning_rate": 0.0008834668608885653,
"loss": 1.118,
"step": 234
},
{
"epoch": 0.16254539166522566,
"grad_norm": 0.5247305631637573,
"learning_rate": 0.0008827385287691188,
"loss": 2.0458,
"step": 235
},
{
"epoch": 0.16323707418295003,
"grad_norm": 0.837685227394104,
"learning_rate": 0.0008820101966496723,
"loss": 2.1954,
"step": 236
},
{
"epoch": 0.1639287567006744,
"grad_norm": 0.5766549706459045,
"learning_rate": 0.0008812818645302259,
"loss": 1.4839,
"step": 237
},
{
"epoch": 0.16462043921839875,
"grad_norm": 0.9044421315193176,
"learning_rate": 0.0008805535324107793,
"loss": 1.6802,
"step": 238
},
{
"epoch": 0.1653121217361231,
"grad_norm": 0.6272666454315186,
"learning_rate": 0.0008798252002913329,
"loss": 1.9889,
"step": 239
},
{
"epoch": 0.16600380425384748,
"grad_norm": 0.5650503039360046,
"learning_rate": 0.0008790968681718864,
"loss": 1.9305,
"step": 240
},
{
"epoch": 0.16669548677157184,
"grad_norm": 0.605739176273346,
"learning_rate": 0.00087836853605244,
"loss": 1.4619,
"step": 241
},
{
"epoch": 0.1673871692892962,
"grad_norm": 0.654289186000824,
"learning_rate": 0.0008776402039329935,
"loss": 2.1283,
"step": 242
},
{
"epoch": 0.1680788518070206,
"grad_norm": 0.5998426079750061,
"learning_rate": 0.000876911871813547,
"loss": 2.0628,
"step": 243
},
{
"epoch": 0.16877053432474495,
"grad_norm": 0.5341598391532898,
"learning_rate": 0.0008761835396941005,
"loss": 1.8415,
"step": 244
},
{
"epoch": 0.16946221684246932,
"grad_norm": 0.9030768275260925,
"learning_rate": 0.000875455207574654,
"loss": 1.3423,
"step": 245
},
{
"epoch": 0.17015389936019368,
"grad_norm": 0.7384636998176575,
"learning_rate": 0.0008747268754552076,
"loss": 1.6916,
"step": 246
},
{
"epoch": 0.17084558187791804,
"grad_norm": 0.9748024940490723,
"learning_rate": 0.0008739985433357611,
"loss": 1.1592,
"step": 247
},
{
"epoch": 0.1715372643956424,
"grad_norm": 0.49209123849868774,
"learning_rate": 0.0008732702112163147,
"loss": 1.5281,
"step": 248
},
{
"epoch": 0.17222894691336676,
"grad_norm": 0.6235657930374146,
"learning_rate": 0.0008725418790968682,
"loss": 1.6423,
"step": 249
},
{
"epoch": 0.17292062943109113,
"grad_norm": 0.8116986751556396,
"learning_rate": 0.0008718135469774217,
"loss": 1.6386,
"step": 250
},
{
"epoch": 0.1736123119488155,
"grad_norm": 0.643518328666687,
"learning_rate": 0.0008710852148579752,
"loss": 1.1341,
"step": 251
},
{
"epoch": 0.17430399446653985,
"grad_norm": 0.826726496219635,
"learning_rate": 0.0008703568827385287,
"loss": 1.4637,
"step": 252
},
{
"epoch": 0.17499567698426421,
"grad_norm": 0.6371028423309326,
"learning_rate": 0.0008696285506190823,
"loss": 1.8159,
"step": 253
},
{
"epoch": 0.17568735950198858,
"grad_norm": 0.7354971766471863,
"learning_rate": 0.0008689002184996358,
"loss": 1.5482,
"step": 254
},
{
"epoch": 0.17637904201971294,
"grad_norm": 0.5614224672317505,
"learning_rate": 0.0008681718863801895,
"loss": 1.6824,
"step": 255
},
{
"epoch": 0.17707072453743733,
"grad_norm": 0.7730950117111206,
"learning_rate": 0.000867443554260743,
"loss": 1.6996,
"step": 256
},
{
"epoch": 0.1777624070551617,
"grad_norm": 0.5419211983680725,
"learning_rate": 0.0008667152221412965,
"loss": 1.8659,
"step": 257
},
{
"epoch": 0.17845408957288605,
"grad_norm": 0.5566856861114502,
"learning_rate": 0.00086598689002185,
"loss": 1.1103,
"step": 258
},
{
"epoch": 0.17914577209061042,
"grad_norm": 0.773952841758728,
"learning_rate": 0.0008652585579024035,
"loss": 1.2759,
"step": 259
},
{
"epoch": 0.17983745460833478,
"grad_norm": 0.49450692534446716,
"learning_rate": 0.0008645302257829571,
"loss": 1.913,
"step": 260
},
{
"epoch": 0.18052913712605914,
"grad_norm": 0.565629243850708,
"learning_rate": 0.0008638018936635106,
"loss": 1.0691,
"step": 261
},
{
"epoch": 0.1812208196437835,
"grad_norm": 0.5907365679740906,
"learning_rate": 0.0008630735615440642,
"loss": 2.1622,
"step": 262
},
{
"epoch": 0.18191250216150787,
"grad_norm": 0.6517736911773682,
"learning_rate": 0.0008623452294246177,
"loss": 1.9742,
"step": 263
},
{
"epoch": 0.18260418467923223,
"grad_norm": 0.7100114822387695,
"learning_rate": 0.0008616168973051711,
"loss": 1.6081,
"step": 264
},
{
"epoch": 0.1832958671969566,
"grad_norm": 0.5431230068206787,
"learning_rate": 0.0008608885651857247,
"loss": 2.0853,
"step": 265
},
{
"epoch": 0.18398754971468095,
"grad_norm": 0.4722400903701782,
"learning_rate": 0.0008601602330662782,
"loss": 1.1004,
"step": 266
},
{
"epoch": 0.18467923223240532,
"grad_norm": 0.6258965730667114,
"learning_rate": 0.0008594319009468318,
"loss": 1.9442,
"step": 267
},
{
"epoch": 0.18537091475012968,
"grad_norm": 0.6985493898391724,
"learning_rate": 0.0008587035688273853,
"loss": 1.5243,
"step": 268
},
{
"epoch": 0.18606259726785407,
"grad_norm": 0.6129814386367798,
"learning_rate": 0.0008579752367079389,
"loss": 1.5075,
"step": 269
},
{
"epoch": 0.18675427978557843,
"grad_norm": 0.49683645367622375,
"learning_rate": 0.0008572469045884924,
"loss": 2.0752,
"step": 270
},
{
"epoch": 0.1874459623033028,
"grad_norm": 0.48728471994400024,
"learning_rate": 0.0008565185724690458,
"loss": 2.2337,
"step": 271
},
{
"epoch": 0.18813764482102716,
"grad_norm": 0.8094476461410522,
"learning_rate": 0.0008557902403495994,
"loss": 2.007,
"step": 272
},
{
"epoch": 0.18882932733875152,
"grad_norm": 0.558074951171875,
"learning_rate": 0.0008550619082301529,
"loss": 1.4369,
"step": 273
},
{
"epoch": 0.18952100985647588,
"grad_norm": 0.6702684760093689,
"learning_rate": 0.0008543335761107065,
"loss": 1.8943,
"step": 274
},
{
"epoch": 0.19021269237420024,
"grad_norm": 0.7045763731002808,
"learning_rate": 0.00085360524399126,
"loss": 2.0621,
"step": 275
},
{
"epoch": 0.1909043748919246,
"grad_norm": 0.5553760528564453,
"learning_rate": 0.0008528769118718137,
"loss": 1.9012,
"step": 276
},
{
"epoch": 0.19159605740964897,
"grad_norm": 0.651685893535614,
"learning_rate": 0.0008521485797523672,
"loss": 1.826,
"step": 277
},
{
"epoch": 0.19228773992737333,
"grad_norm": 0.46926578879356384,
"learning_rate": 0.0008514202476329205,
"loss": 1.8451,
"step": 278
},
{
"epoch": 0.1929794224450977,
"grad_norm": 0.5306689739227295,
"learning_rate": 0.0008506919155134742,
"loss": 1.9559,
"step": 279
},
{
"epoch": 0.19367110496282205,
"grad_norm": 0.437308669090271,
"learning_rate": 0.0008499635833940277,
"loss": 1.1798,
"step": 280
},
{
"epoch": 0.19436278748054642,
"grad_norm": 0.5720314383506775,
"learning_rate": 0.0008492352512745813,
"loss": 1.8443,
"step": 281
},
{
"epoch": 0.1950544699982708,
"grad_norm": 0.6609981060028076,
"learning_rate": 0.0008485069191551348,
"loss": 2.1044,
"step": 282
},
{
"epoch": 0.19574615251599517,
"grad_norm": 0.7185072302818298,
"learning_rate": 0.0008477785870356884,
"loss": 1.309,
"step": 283
},
{
"epoch": 0.19643783503371953,
"grad_norm": 0.9821947813034058,
"learning_rate": 0.0008470502549162418,
"loss": 1.5823,
"step": 284
},
{
"epoch": 0.1971295175514439,
"grad_norm": 0.6811301112174988,
"learning_rate": 0.0008463219227967953,
"loss": 1.4655,
"step": 285
},
{
"epoch": 0.19782120006916826,
"grad_norm": 0.5955311059951782,
"learning_rate": 0.0008455935906773489,
"loss": 1.5187,
"step": 286
},
{
"epoch": 0.19851288258689262,
"grad_norm": 0.568804919719696,
"learning_rate": 0.0008448652585579024,
"loss": 1.3988,
"step": 287
},
{
"epoch": 0.19920456510461698,
"grad_norm": 0.7858214974403381,
"learning_rate": 0.000844136926438456,
"loss": 1.5478,
"step": 288
},
{
"epoch": 0.19989624762234134,
"grad_norm": 0.5844207406044006,
"learning_rate": 0.0008434085943190095,
"loss": 1.904,
"step": 289
},
{
"epoch": 0.2005879301400657,
"grad_norm": 0.7172948122024536,
"learning_rate": 0.0008426802621995631,
"loss": 1.4772,
"step": 290
},
{
"epoch": 0.20127961265779007,
"grad_norm": 0.6408190727233887,
"learning_rate": 0.0008419519300801165,
"loss": 1.4348,
"step": 291
},
{
"epoch": 0.20197129517551443,
"grad_norm": 0.9460310339927673,
"learning_rate": 0.00084122359796067,
"loss": 1.7674,
"step": 292
},
{
"epoch": 0.2026629776932388,
"grad_norm": 0.6002872586250305,
"learning_rate": 0.0008404952658412236,
"loss": 1.4278,
"step": 293
},
{
"epoch": 0.20335466021096316,
"grad_norm": 1.0076587200164795,
"learning_rate": 0.0008397669337217771,
"loss": 1.5417,
"step": 294
},
{
"epoch": 0.20404634272868752,
"grad_norm": 1.3005017042160034,
"learning_rate": 0.0008390386016023307,
"loss": 1.6232,
"step": 295
},
{
"epoch": 0.2047380252464119,
"grad_norm": 0.751641035079956,
"learning_rate": 0.0008383102694828842,
"loss": 1.9296,
"step": 296
},
{
"epoch": 0.20542970776413627,
"grad_norm": 0.6361163258552551,
"learning_rate": 0.0008375819373634378,
"loss": 2.0047,
"step": 297
},
{
"epoch": 0.20612139028186063,
"grad_norm": 0.9554282426834106,
"learning_rate": 0.0008368536052439912,
"loss": 1.7218,
"step": 298
},
{
"epoch": 0.206813072799585,
"grad_norm": 0.7240822911262512,
"learning_rate": 0.0008361252731245447,
"loss": 1.6413,
"step": 299
},
{
"epoch": 0.20750475531730936,
"grad_norm": 0.46834996342658997,
"learning_rate": 0.0008353969410050984,
"loss": 1.2228,
"step": 300
},
{
"epoch": 0.20819643783503372,
"grad_norm": 0.7188776731491089,
"learning_rate": 0.0008346686088856519,
"loss": 1.4258,
"step": 301
},
{
"epoch": 0.20888812035275808,
"grad_norm": 0.588649332523346,
"learning_rate": 0.0008339402767662055,
"loss": 1.7551,
"step": 302
},
{
"epoch": 0.20957980287048245,
"grad_norm": 0.6962491273880005,
"learning_rate": 0.000833211944646759,
"loss": 2.1571,
"step": 303
},
{
"epoch": 0.2102714853882068,
"grad_norm": 0.6146702170372009,
"learning_rate": 0.0008324836125273124,
"loss": 2.0106,
"step": 304
},
{
"epoch": 0.21096316790593117,
"grad_norm": 0.6004481315612793,
"learning_rate": 0.000831755280407866,
"loss": 1.3429,
"step": 305
},
{
"epoch": 0.21165485042365553,
"grad_norm": 0.6162500381469727,
"learning_rate": 0.0008310269482884195,
"loss": 1.8476,
"step": 306
},
{
"epoch": 0.2123465329413799,
"grad_norm": 0.5027235746383667,
"learning_rate": 0.0008302986161689731,
"loss": 1.7516,
"step": 307
},
{
"epoch": 0.21303821545910426,
"grad_norm": 0.5416428446769714,
"learning_rate": 0.0008295702840495266,
"loss": 1.8363,
"step": 308
},
{
"epoch": 0.21372989797682865,
"grad_norm": 0.6236619353294373,
"learning_rate": 0.0008288419519300802,
"loss": 1.6738,
"step": 309
},
{
"epoch": 0.214421580494553,
"grad_norm": 0.5952901244163513,
"learning_rate": 0.0008281136198106337,
"loss": 1.4978,
"step": 310
},
{
"epoch": 0.21511326301227737,
"grad_norm": 0.7139809131622314,
"learning_rate": 0.0008273852876911871,
"loss": 1.593,
"step": 311
},
{
"epoch": 0.21580494553000173,
"grad_norm": 0.6548435091972351,
"learning_rate": 0.0008266569555717407,
"loss": 1.7464,
"step": 312
},
{
"epoch": 0.2164966280477261,
"grad_norm": 0.6812461018562317,
"learning_rate": 0.0008259286234522942,
"loss": 1.9677,
"step": 313
},
{
"epoch": 0.21718831056545046,
"grad_norm": 0.7574117183685303,
"learning_rate": 0.0008252002913328478,
"loss": 1.6102,
"step": 314
},
{
"epoch": 0.21787999308317482,
"grad_norm": 0.5767763257026672,
"learning_rate": 0.0008244719592134013,
"loss": 2.0047,
"step": 315
},
{
"epoch": 0.21857167560089918,
"grad_norm": 0.864742636680603,
"learning_rate": 0.0008237436270939549,
"loss": 1.6079,
"step": 316
},
{
"epoch": 0.21926335811862355,
"grad_norm": 1.1354854106903076,
"learning_rate": 0.0008230152949745084,
"loss": 1.3049,
"step": 317
},
{
"epoch": 0.2199550406363479,
"grad_norm": 0.8098461031913757,
"learning_rate": 0.0008222869628550618,
"loss": 1.1503,
"step": 318
},
{
"epoch": 0.22064672315407227,
"grad_norm": 0.7209709286689758,
"learning_rate": 0.0008215586307356154,
"loss": 1.9659,
"step": 319
},
{
"epoch": 0.22133840567179663,
"grad_norm": 0.6464136838912964,
"learning_rate": 0.0008208302986161689,
"loss": 1.8093,
"step": 320
},
{
"epoch": 0.222030088189521,
"grad_norm": 2.2832796573638916,
"learning_rate": 0.0008201019664967226,
"loss": 1.2252,
"step": 321
},
{
"epoch": 0.2227217707072454,
"grad_norm": 0.6651481986045837,
"learning_rate": 0.0008193736343772761,
"loss": 1.8967,
"step": 322
},
{
"epoch": 0.22341345322496975,
"grad_norm": 0.5639248490333557,
"learning_rate": 0.0008186453022578297,
"loss": 1.1423,
"step": 323
},
{
"epoch": 0.2241051357426941,
"grad_norm": 0.6734063029289246,
"learning_rate": 0.0008179169701383831,
"loss": 1.7355,
"step": 324
},
{
"epoch": 0.22479681826041847,
"grad_norm": 0.8061289191246033,
"learning_rate": 0.0008171886380189366,
"loss": 0.8081,
"step": 325
},
{
"epoch": 0.22548850077814284,
"grad_norm": 0.584674060344696,
"learning_rate": 0.0008164603058994902,
"loss": 1.1589,
"step": 326
},
{
"epoch": 0.2261801832958672,
"grad_norm": 0.6683285236358643,
"learning_rate": 0.0008157319737800437,
"loss": 1.7779,
"step": 327
},
{
"epoch": 0.22687186581359156,
"grad_norm": 0.7037453055381775,
"learning_rate": 0.0008150036416605973,
"loss": 2.0579,
"step": 328
},
{
"epoch": 0.22756354833131592,
"grad_norm": 0.6727277636528015,
"learning_rate": 0.0008142753095411508,
"loss": 1.7619,
"step": 329
},
{
"epoch": 0.22825523084904029,
"grad_norm": 0.7048072218894958,
"learning_rate": 0.0008135469774217044,
"loss": 1.7445,
"step": 330
},
{
"epoch": 0.22894691336676465,
"grad_norm": 0.5675456523895264,
"learning_rate": 0.0008128186453022578,
"loss": 1.8732,
"step": 331
},
{
"epoch": 0.229638595884489,
"grad_norm": 0.5742422342300415,
"learning_rate": 0.0008120903131828113,
"loss": 1.9362,
"step": 332
},
{
"epoch": 0.23033027840221337,
"grad_norm": 0.612397313117981,
"learning_rate": 0.0008113619810633649,
"loss": 1.3922,
"step": 333
},
{
"epoch": 0.23102196091993774,
"grad_norm": 0.5459281802177429,
"learning_rate": 0.0008106336489439184,
"loss": 1.823,
"step": 334
},
{
"epoch": 0.23171364343766213,
"grad_norm": 0.6739487051963806,
"learning_rate": 0.000809905316824472,
"loss": 1.9681,
"step": 335
},
{
"epoch": 0.2324053259553865,
"grad_norm": 0.550207257270813,
"learning_rate": 0.0008091769847050255,
"loss": 1.8516,
"step": 336
},
{
"epoch": 0.23309700847311085,
"grad_norm": 0.45742911100387573,
"learning_rate": 0.0008084486525855791,
"loss": 1.6626,
"step": 337
},
{
"epoch": 0.2337886909908352,
"grad_norm": 0.764325737953186,
"learning_rate": 0.0008077203204661325,
"loss": 1.7487,
"step": 338
},
{
"epoch": 0.23448037350855958,
"grad_norm": 0.5911192297935486,
"learning_rate": 0.000806991988346686,
"loss": 1.2527,
"step": 339
},
{
"epoch": 0.23517205602628394,
"grad_norm": 0.5554788708686829,
"learning_rate": 0.0008062636562272396,
"loss": 1.2681,
"step": 340
},
{
"epoch": 0.2358637385440083,
"grad_norm": 0.5522503852844238,
"learning_rate": 0.0008055353241077931,
"loss": 1.4677,
"step": 341
},
{
"epoch": 0.23655542106173266,
"grad_norm": 0.5872853994369507,
"learning_rate": 0.0008048069919883468,
"loss": 1.7502,
"step": 342
},
{
"epoch": 0.23724710357945702,
"grad_norm": 0.6910445690155029,
"learning_rate": 0.0008040786598689003,
"loss": 2.1711,
"step": 343
},
{
"epoch": 0.2379387860971814,
"grad_norm": 0.7674363255500793,
"learning_rate": 0.0008033503277494538,
"loss": 1.0802,
"step": 344
},
{
"epoch": 0.23863046861490575,
"grad_norm": 0.8880471587181091,
"learning_rate": 0.0008026219956300073,
"loss": 1.3592,
"step": 345
},
{
"epoch": 0.2393221511326301,
"grad_norm": 0.6581029295921326,
"learning_rate": 0.0008018936635105608,
"loss": 1.6392,
"step": 346
},
{
"epoch": 0.24001383365035447,
"grad_norm": 1.1078494787216187,
"learning_rate": 0.0008011653313911144,
"loss": 1.5557,
"step": 347
},
{
"epoch": 0.24070551616807886,
"grad_norm": 0.5563998818397522,
"learning_rate": 0.0008004369992716679,
"loss": 1.3359,
"step": 348
},
{
"epoch": 0.24139719868580323,
"grad_norm": 0.6263977885246277,
"learning_rate": 0.0007997086671522215,
"loss": 2.1702,
"step": 349
},
{
"epoch": 0.2420888812035276,
"grad_norm": 0.5776513814926147,
"learning_rate": 0.000798980335032775,
"loss": 1.9447,
"step": 350
},
{
"epoch": 0.24278056372125195,
"grad_norm": 0.748920202255249,
"learning_rate": 0.0007982520029133285,
"loss": 1.4751,
"step": 351
},
{
"epoch": 0.24347224623897631,
"grad_norm": 1.3247708082199097,
"learning_rate": 0.000797523670793882,
"loss": 1.7557,
"step": 352
},
{
"epoch": 0.24416392875670068,
"grad_norm": 0.7095309495925903,
"learning_rate": 0.0007967953386744355,
"loss": 2.2349,
"step": 353
},
{
"epoch": 0.24485561127442504,
"grad_norm": 0.532289981842041,
"learning_rate": 0.0007960670065549891,
"loss": 1.1916,
"step": 354
},
{
"epoch": 0.2455472937921494,
"grad_norm": 0.6105953454971313,
"learning_rate": 0.0007953386744355426,
"loss": 1.6084,
"step": 355
},
{
"epoch": 0.24623897630987376,
"grad_norm": 0.6233397126197815,
"learning_rate": 0.0007946103423160962,
"loss": 1.7177,
"step": 356
},
{
"epoch": 0.24693065882759813,
"grad_norm": 10.080041885375977,
"learning_rate": 0.0007938820101966497,
"loss": 2.1116,
"step": 357
},
{
"epoch": 0.2476223413453225,
"grad_norm": 0.5390161275863647,
"learning_rate": 0.0007931536780772032,
"loss": 1.7296,
"step": 358
},
{
"epoch": 0.24831402386304685,
"grad_norm": 1.2583034038543701,
"learning_rate": 0.0007924253459577567,
"loss": 1.4504,
"step": 359
},
{
"epoch": 0.2490057063807712,
"grad_norm": 0.6620193719863892,
"learning_rate": 0.0007916970138383102,
"loss": 1.9403,
"step": 360
},
{
"epoch": 0.2496973888984956,
"grad_norm": 0.8169893622398376,
"learning_rate": 0.0007909686817188638,
"loss": 1.2534,
"step": 361
},
{
"epoch": 0.25038907141621997,
"grad_norm": 0.693074643611908,
"learning_rate": 0.0007902403495994173,
"loss": 2.112,
"step": 362
},
{
"epoch": 0.2510807539339443,
"grad_norm": 0.628724217414856,
"learning_rate": 0.000789512017479971,
"loss": 2.0005,
"step": 363
},
{
"epoch": 0.2517724364516687,
"grad_norm": 0.6025403141975403,
"learning_rate": 0.0007887836853605243,
"loss": 1.0489,
"step": 364
},
{
"epoch": 0.252464118969393,
"grad_norm": 0.6881316900253296,
"learning_rate": 0.000788055353241078,
"loss": 1.1542,
"step": 365
},
{
"epoch": 0.2531558014871174,
"grad_norm": 1.035561442375183,
"learning_rate": 0.0007873270211216315,
"loss": 1.5017,
"step": 366
},
{
"epoch": 0.25384748400484175,
"grad_norm": 0.5408887267112732,
"learning_rate": 0.000786598689002185,
"loss": 2.0277,
"step": 367
},
{
"epoch": 0.25453916652256614,
"grad_norm": 0.5508919358253479,
"learning_rate": 0.0007858703568827386,
"loss": 1.5144,
"step": 368
},
{
"epoch": 0.25523084904029053,
"grad_norm": 0.9890360236167908,
"learning_rate": 0.0007851420247632921,
"loss": 1.0548,
"step": 369
},
{
"epoch": 0.25592253155801487,
"grad_norm": 0.6218384504318237,
"learning_rate": 0.0007844136926438457,
"loss": 1.4775,
"step": 370
},
{
"epoch": 0.25661421407573926,
"grad_norm": 0.5427407622337341,
"learning_rate": 0.0007836853605243991,
"loss": 1.6648,
"step": 371
},
{
"epoch": 0.2573058965934636,
"grad_norm": 0.6376339793205261,
"learning_rate": 0.0007829570284049527,
"loss": 1.7319,
"step": 372
},
{
"epoch": 0.257997579111188,
"grad_norm": 0.5155366063117981,
"learning_rate": 0.0007822286962855062,
"loss": 1.7143,
"step": 373
},
{
"epoch": 0.2586892616289123,
"grad_norm": 1.0346859693527222,
"learning_rate": 0.0007815003641660597,
"loss": 1.96,
"step": 374
},
{
"epoch": 0.2593809441466367,
"grad_norm": 0.5473276376724243,
"learning_rate": 0.0007807720320466133,
"loss": 1.8269,
"step": 375
},
{
"epoch": 0.26007262666436104,
"grad_norm": 0.9501216411590576,
"learning_rate": 0.0007800436999271668,
"loss": 1.5286,
"step": 376
},
{
"epoch": 0.26076430918208543,
"grad_norm": 0.4338766634464264,
"learning_rate": 0.0007793153678077204,
"loss": 0.8967,
"step": 377
},
{
"epoch": 0.26145599169980976,
"grad_norm": 12.023887634277344,
"learning_rate": 0.0007785870356882738,
"loss": 1.8385,
"step": 378
},
{
"epoch": 0.26214767421753415,
"grad_norm": 0.5424131155014038,
"learning_rate": 0.0007778587035688274,
"loss": 1.7216,
"step": 379
},
{
"epoch": 0.2628393567352585,
"grad_norm": 0.6199079751968384,
"learning_rate": 0.0007771303714493809,
"loss": 1.9606,
"step": 380
},
{
"epoch": 0.2635310392529829,
"grad_norm": 0.6037024259567261,
"learning_rate": 0.0007764020393299344,
"loss": 1.4306,
"step": 381
},
{
"epoch": 0.26422272177070727,
"grad_norm": 0.6312823295593262,
"learning_rate": 0.000775673707210488,
"loss": 1.0085,
"step": 382
},
{
"epoch": 0.2649144042884316,
"grad_norm": 0.5497464537620544,
"learning_rate": 0.0007749453750910415,
"loss": 1.4443,
"step": 383
},
{
"epoch": 0.265606086806156,
"grad_norm": 0.9736106395721436,
"learning_rate": 0.000774217042971595,
"loss": 1.7324,
"step": 384
},
{
"epoch": 0.26629776932388033,
"grad_norm": 0.6415931582450867,
"learning_rate": 0.0007734887108521485,
"loss": 0.9575,
"step": 385
},
{
"epoch": 0.2669894518416047,
"grad_norm": 0.570580244064331,
"learning_rate": 0.0007727603787327021,
"loss": 1.7195,
"step": 386
},
{
"epoch": 0.26768113435932905,
"grad_norm": 0.7033479809761047,
"learning_rate": 0.0007720320466132557,
"loss": 2.0542,
"step": 387
},
{
"epoch": 0.26837281687705344,
"grad_norm": 0.7575972676277161,
"learning_rate": 0.0007713037144938092,
"loss": 1.7199,
"step": 388
},
{
"epoch": 0.2690644993947778,
"grad_norm": 0.5389835238456726,
"learning_rate": 0.0007705753823743628,
"loss": 1.5259,
"step": 389
},
{
"epoch": 0.26975618191250217,
"grad_norm": 0.574540913105011,
"learning_rate": 0.0007698470502549163,
"loss": 1.5391,
"step": 390
},
{
"epoch": 0.2704478644302265,
"grad_norm": 0.5298869013786316,
"learning_rate": 0.0007691187181354698,
"loss": 1.9361,
"step": 391
},
{
"epoch": 0.2711395469479509,
"grad_norm": 0.5654643177986145,
"learning_rate": 0.0007683903860160233,
"loss": 1.8952,
"step": 392
},
{
"epoch": 0.27183122946567523,
"grad_norm": 0.7499473094940186,
"learning_rate": 0.0007676620538965769,
"loss": 1.0416,
"step": 393
},
{
"epoch": 0.2725229119833996,
"grad_norm": 0.6296089887619019,
"learning_rate": 0.0007669337217771304,
"loss": 1.4852,
"step": 394
},
{
"epoch": 0.273214594501124,
"grad_norm": 0.5401056408882141,
"learning_rate": 0.0007662053896576839,
"loss": 1.8153,
"step": 395
},
{
"epoch": 0.27390627701884834,
"grad_norm": 0.5954565405845642,
"learning_rate": 0.0007654770575382375,
"loss": 1.7855,
"step": 396
},
{
"epoch": 0.27459795953657273,
"grad_norm": 0.9156423211097717,
"learning_rate": 0.000764748725418791,
"loss": 1.9173,
"step": 397
},
{
"epoch": 0.27528964205429707,
"grad_norm": 0.6210983395576477,
"learning_rate": 0.0007640203932993445,
"loss": 1.9079,
"step": 398
},
{
"epoch": 0.27598132457202146,
"grad_norm": 0.529227077960968,
"learning_rate": 0.000763292061179898,
"loss": 1.8594,
"step": 399
},
{
"epoch": 0.2766730070897458,
"grad_norm": 0.80283522605896,
"learning_rate": 0.0007625637290604516,
"loss": 1.5913,
"step": 400
},
{
"epoch": 0.2773646896074702,
"grad_norm": 0.5629950761795044,
"learning_rate": 0.0007618353969410051,
"loss": 1.5373,
"step": 401
},
{
"epoch": 0.2780563721251945,
"grad_norm": 0.6493797898292542,
"learning_rate": 0.0007611070648215586,
"loss": 2.2277,
"step": 402
},
{
"epoch": 0.2787480546429189,
"grad_norm": 0.5912362933158875,
"learning_rate": 0.0007603787327021122,
"loss": 1.4388,
"step": 403
},
{
"epoch": 0.27943973716064324,
"grad_norm": 0.7361041307449341,
"learning_rate": 0.0007596504005826656,
"loss": 0.9273,
"step": 404
},
{
"epoch": 0.28013141967836763,
"grad_norm": 0.8257749676704407,
"learning_rate": 0.0007589220684632192,
"loss": 0.7073,
"step": 405
},
{
"epoch": 0.28082310219609197,
"grad_norm": 0.8185616731643677,
"learning_rate": 0.0007581937363437727,
"loss": 1.9928,
"step": 406
},
{
"epoch": 0.28151478471381636,
"grad_norm": 0.5865523219108582,
"learning_rate": 0.0007574654042243263,
"loss": 1.1638,
"step": 407
},
{
"epoch": 0.28220646723154075,
"grad_norm": 0.5210615396499634,
"learning_rate": 0.0007567370721048798,
"loss": 1.1934,
"step": 408
},
{
"epoch": 0.2828981497492651,
"grad_norm": 0.6531309485435486,
"learning_rate": 0.0007560087399854334,
"loss": 1.8601,
"step": 409
},
{
"epoch": 0.28358983226698947,
"grad_norm": 0.7874279022216797,
"learning_rate": 0.000755280407865987,
"loss": 1.4043,
"step": 410
},
{
"epoch": 0.2842815147847138,
"grad_norm": 1.121983289718628,
"learning_rate": 0.0007545520757465404,
"loss": 1.7112,
"step": 411
},
{
"epoch": 0.2849731973024382,
"grad_norm": 0.5046870708465576,
"learning_rate": 0.000753823743627094,
"loss": 1.6255,
"step": 412
},
{
"epoch": 0.28566487982016253,
"grad_norm": 0.4254264831542969,
"learning_rate": 0.0007530954115076475,
"loss": 1.3181,
"step": 413
},
{
"epoch": 0.2863565623378869,
"grad_norm": 0.8146479725837708,
"learning_rate": 0.0007523670793882011,
"loss": 1.7557,
"step": 414
},
{
"epoch": 0.28704824485561126,
"grad_norm": 0.47856444120407104,
"learning_rate": 0.0007516387472687546,
"loss": 1.6217,
"step": 415
},
{
"epoch": 0.28773992737333565,
"grad_norm": 0.5287722945213318,
"learning_rate": 0.0007509104151493081,
"loss": 1.8146,
"step": 416
},
{
"epoch": 0.28843160989106,
"grad_norm": 0.8364676833152771,
"learning_rate": 0.0007501820830298617,
"loss": 1.1081,
"step": 417
},
{
"epoch": 0.28912329240878437,
"grad_norm": 0.6923417448997498,
"learning_rate": 0.0007494537509104151,
"loss": 1.0822,
"step": 418
},
{
"epoch": 0.2898149749265087,
"grad_norm": 0.7535339593887329,
"learning_rate": 0.0007487254187909687,
"loss": 1.8013,
"step": 419
},
{
"epoch": 0.2905066574442331,
"grad_norm": 1.226645588874817,
"learning_rate": 0.0007479970866715222,
"loss": 1.1891,
"step": 420
},
{
"epoch": 0.2911983399619575,
"grad_norm": 0.7388406991958618,
"learning_rate": 0.0007472687545520758,
"loss": 1.0448,
"step": 421
},
{
"epoch": 0.2918900224796818,
"grad_norm": 0.6585919260978699,
"learning_rate": 0.0007465404224326293,
"loss": 1.0951,
"step": 422
},
{
"epoch": 0.2925817049974062,
"grad_norm": 0.7637200355529785,
"learning_rate": 0.0007458120903131828,
"loss": 1.3508,
"step": 423
},
{
"epoch": 0.29327338751513055,
"grad_norm": 0.5754939913749695,
"learning_rate": 0.0007450837581937363,
"loss": 1.4937,
"step": 424
},
{
"epoch": 0.29396507003285494,
"grad_norm": 0.6434321999549866,
"learning_rate": 0.0007443554260742898,
"loss": 2.0318,
"step": 425
},
{
"epoch": 0.29465675255057927,
"grad_norm": 0.7063912749290466,
"learning_rate": 0.0007436270939548434,
"loss": 2.0036,
"step": 426
},
{
"epoch": 0.29534843506830366,
"grad_norm": 0.5120965242385864,
"learning_rate": 0.0007428987618353969,
"loss": 1.7687,
"step": 427
},
{
"epoch": 0.296040117586028,
"grad_norm": 0.7403333187103271,
"learning_rate": 0.0007421704297159505,
"loss": 1.5747,
"step": 428
},
{
"epoch": 0.2967318001037524,
"grad_norm": 0.5760396122932434,
"learning_rate": 0.000741442097596504,
"loss": 1.7351,
"step": 429
},
{
"epoch": 0.2974234826214767,
"grad_norm": 0.6725696325302124,
"learning_rate": 0.0007407137654770575,
"loss": 1.6643,
"step": 430
},
{
"epoch": 0.2981151651392011,
"grad_norm": 0.5612234473228455,
"learning_rate": 0.000739985433357611,
"loss": 1.47,
"step": 431
},
{
"epoch": 0.29880684765692545,
"grad_norm": 0.48072177171707153,
"learning_rate": 0.0007392571012381646,
"loss": 1.2806,
"step": 432
},
{
"epoch": 0.29949853017464984,
"grad_norm": 0.6465651988983154,
"learning_rate": 0.0007385287691187182,
"loss": 1.7875,
"step": 433
},
{
"epoch": 0.3001902126923742,
"grad_norm": 0.7127341628074646,
"learning_rate": 0.0007378004369992717,
"loss": 1.9259,
"step": 434
},
{
"epoch": 0.30088189521009856,
"grad_norm": 0.5775954127311707,
"learning_rate": 0.0007370721048798253,
"loss": 1.6652,
"step": 435
},
{
"epoch": 0.30157357772782295,
"grad_norm": 0.6212239861488342,
"learning_rate": 0.0007363437727603788,
"loss": 1.7045,
"step": 436
},
{
"epoch": 0.3022652602455473,
"grad_norm": 0.6443663835525513,
"learning_rate": 0.0007356154406409323,
"loss": 1.9416,
"step": 437
},
{
"epoch": 0.3029569427632717,
"grad_norm": 0.6374452710151672,
"learning_rate": 0.0007348871085214858,
"loss": 1.7767,
"step": 438
},
{
"epoch": 0.303648625280996,
"grad_norm": 0.7170910835266113,
"learning_rate": 0.0007341587764020393,
"loss": 1.1563,
"step": 439
},
{
"epoch": 0.3043403077987204,
"grad_norm": 1.249282717704773,
"learning_rate": 0.0007334304442825929,
"loss": 1.7667,
"step": 440
},
{
"epoch": 0.30503199031644473,
"grad_norm": 0.6763089895248413,
"learning_rate": 0.0007327021121631464,
"loss": 1.9292,
"step": 441
},
{
"epoch": 0.3057236728341691,
"grad_norm": 0.7364800572395325,
"learning_rate": 0.0007319737800437,
"loss": 1.6102,
"step": 442
},
{
"epoch": 0.30641535535189346,
"grad_norm": 0.8224323987960815,
"learning_rate": 0.0007312454479242535,
"loss": 1.0648,
"step": 443
},
{
"epoch": 0.30710703786961785,
"grad_norm": 0.752155065536499,
"learning_rate": 0.0007305171158048069,
"loss": 1.788,
"step": 444
},
{
"epoch": 0.3077987203873422,
"grad_norm": 0.5755220651626587,
"learning_rate": 0.0007297887836853605,
"loss": 1.2096,
"step": 445
},
{
"epoch": 0.3084904029050666,
"grad_norm": 0.8400484323501587,
"learning_rate": 0.000729060451565914,
"loss": 1.3676,
"step": 446
},
{
"epoch": 0.30918208542279096,
"grad_norm": 0.5796182155609131,
"learning_rate": 0.0007283321194464676,
"loss": 1.5668,
"step": 447
},
{
"epoch": 0.3098737679405153,
"grad_norm": 0.985273003578186,
"learning_rate": 0.0007276037873270211,
"loss": 1.4033,
"step": 448
},
{
"epoch": 0.3105654504582397,
"grad_norm": 0.6488866209983826,
"learning_rate": 0.0007268754552075747,
"loss": 1.5978,
"step": 449
},
{
"epoch": 0.311257132975964,
"grad_norm": 0.5811178088188171,
"learning_rate": 0.0007261471230881282,
"loss": 2.1722,
"step": 450
},
{
"epoch": 0.3119488154936884,
"grad_norm": 0.5769645571708679,
"learning_rate": 0.0007254187909686816,
"loss": 1.9041,
"step": 451
},
{
"epoch": 0.31264049801141275,
"grad_norm": 0.769631028175354,
"learning_rate": 0.0007246904588492352,
"loss": 1.3963,
"step": 452
},
{
"epoch": 0.31333218052913714,
"grad_norm": 0.8301665186882019,
"learning_rate": 0.0007239621267297888,
"loss": 1.623,
"step": 453
},
{
"epoch": 0.3140238630468615,
"grad_norm": 0.6046326756477356,
"learning_rate": 0.0007232337946103424,
"loss": 1.8703,
"step": 454
},
{
"epoch": 0.31471554556458586,
"grad_norm": 0.5623071193695068,
"learning_rate": 0.0007225054624908959,
"loss": 1.6112,
"step": 455
},
{
"epoch": 0.3154072280823102,
"grad_norm": 0.7813363671302795,
"learning_rate": 0.0007217771303714495,
"loss": 1.681,
"step": 456
},
{
"epoch": 0.3160989106000346,
"grad_norm": 0.6935021877288818,
"learning_rate": 0.000721048798252003,
"loss": 1.5276,
"step": 457
},
{
"epoch": 0.3167905931177589,
"grad_norm": 1.0678547620773315,
"learning_rate": 0.0007203204661325564,
"loss": 1.2066,
"step": 458
},
{
"epoch": 0.3174822756354833,
"grad_norm": 0.985817551612854,
"learning_rate": 0.00071959213401311,
"loss": 1.5923,
"step": 459
},
{
"epoch": 0.3181739581532077,
"grad_norm": 0.6185691356658936,
"learning_rate": 0.0007188638018936635,
"loss": 1.1597,
"step": 460
},
{
"epoch": 0.31886564067093204,
"grad_norm": 0.6517722010612488,
"learning_rate": 0.0007181354697742171,
"loss": 1.0909,
"step": 461
},
{
"epoch": 0.31955732318865643,
"grad_norm": 0.6660693883895874,
"learning_rate": 0.0007174071376547706,
"loss": 1.8785,
"step": 462
},
{
"epoch": 0.32024900570638076,
"grad_norm": 0.8916088938713074,
"learning_rate": 0.0007166788055353242,
"loss": 1.0246,
"step": 463
},
{
"epoch": 0.32094068822410515,
"grad_norm": 0.6262109875679016,
"learning_rate": 0.0007159504734158776,
"loss": 1.8117,
"step": 464
},
{
"epoch": 0.3216323707418295,
"grad_norm": 0.5265359878540039,
"learning_rate": 0.0007152221412964311,
"loss": 1.8935,
"step": 465
},
{
"epoch": 0.3223240532595539,
"grad_norm": 0.584057629108429,
"learning_rate": 0.0007144938091769847,
"loss": 2.1743,
"step": 466
},
{
"epoch": 0.3230157357772782,
"grad_norm": 0.6198194026947021,
"learning_rate": 0.0007137654770575382,
"loss": 1.0213,
"step": 467
},
{
"epoch": 0.3237074182950026,
"grad_norm": 0.8011792898178101,
"learning_rate": 0.0007130371449380918,
"loss": 1.3635,
"step": 468
},
{
"epoch": 0.32439910081272694,
"grad_norm": 0.6226928234100342,
"learning_rate": 0.0007123088128186453,
"loss": 1.5389,
"step": 469
},
{
"epoch": 0.3250907833304513,
"grad_norm": 0.6563382744789124,
"learning_rate": 0.0007115804806991989,
"loss": 1.6626,
"step": 470
},
{
"epoch": 0.32578246584817566,
"grad_norm": 0.6689289808273315,
"learning_rate": 0.0007108521485797523,
"loss": 0.2715,
"step": 471
},
{
"epoch": 0.32647414836590005,
"grad_norm": 0.7439026832580566,
"learning_rate": 0.0007101238164603058,
"loss": 1.9645,
"step": 472
},
{
"epoch": 0.32716583088362444,
"grad_norm": 0.6306619048118591,
"learning_rate": 0.0007093954843408594,
"loss": 2.026,
"step": 473
},
{
"epoch": 0.3278575134013488,
"grad_norm": 0.5557575225830078,
"learning_rate": 0.000708667152221413,
"loss": 1.7433,
"step": 474
},
{
"epoch": 0.32854919591907317,
"grad_norm": 0.6485971212387085,
"learning_rate": 0.0007079388201019666,
"loss": 1.8697,
"step": 475
},
{
"epoch": 0.3292408784367975,
"grad_norm": 0.6541025042533875,
"learning_rate": 0.0007072104879825201,
"loss": 1.8979,
"step": 476
},
{
"epoch": 0.3299325609545219,
"grad_norm": 0.617359459400177,
"learning_rate": 0.0007064821558630737,
"loss": 2.1617,
"step": 477
},
{
"epoch": 0.3306242434722462,
"grad_norm": 0.5855705142021179,
"learning_rate": 0.0007057538237436271,
"loss": 1.8079,
"step": 478
},
{
"epoch": 0.3313159259899706,
"grad_norm": 0.5983846187591553,
"learning_rate": 0.0007050254916241806,
"loss": 1.8796,
"step": 479
},
{
"epoch": 0.33200760850769495,
"grad_norm": 1.1877782344818115,
"learning_rate": 0.0007042971595047342,
"loss": 1.5439,
"step": 480
},
{
"epoch": 0.33269929102541934,
"grad_norm": 0.48594361543655396,
"learning_rate": 0.0007035688273852877,
"loss": 1.3418,
"step": 481
},
{
"epoch": 0.3333909735431437,
"grad_norm": 1.506446123123169,
"learning_rate": 0.0007028404952658413,
"loss": 1.2715,
"step": 482
},
{
"epoch": 0.33408265606086807,
"grad_norm": 1.1047416925430298,
"learning_rate": 0.0007021121631463948,
"loss": 1.6349,
"step": 483
},
{
"epoch": 0.3347743385785924,
"grad_norm": 0.6083149909973145,
"learning_rate": 0.0007013838310269483,
"loss": 1.9314,
"step": 484
},
{
"epoch": 0.3354660210963168,
"grad_norm": 1.35393226146698,
"learning_rate": 0.0007006554989075018,
"loss": 1.8499,
"step": 485
},
{
"epoch": 0.3361577036140412,
"grad_norm": 0.9483690857887268,
"learning_rate": 0.0006999271667880553,
"loss": 1.51,
"step": 486
},
{
"epoch": 0.3368493861317655,
"grad_norm": 0.5228135585784912,
"learning_rate": 0.0006991988346686089,
"loss": 1.9898,
"step": 487
},
{
"epoch": 0.3375410686494899,
"grad_norm": 0.5586177706718445,
"learning_rate": 0.0006984705025491624,
"loss": 1.3041,
"step": 488
},
{
"epoch": 0.33823275116721424,
"grad_norm": 0.5928153991699219,
"learning_rate": 0.000697742170429716,
"loss": 2.0132,
"step": 489
},
{
"epoch": 0.33892443368493863,
"grad_norm": 1.3419654369354248,
"learning_rate": 0.0006970138383102695,
"loss": 1.0039,
"step": 490
},
{
"epoch": 0.33961611620266297,
"grad_norm": 0.6874161958694458,
"learning_rate": 0.000696285506190823,
"loss": 1.2347,
"step": 491
},
{
"epoch": 0.34030779872038736,
"grad_norm": 0.6180602312088013,
"learning_rate": 0.0006955571740713765,
"loss": 1.1032,
"step": 492
},
{
"epoch": 0.3409994812381117,
"grad_norm": 0.6274152994155884,
"learning_rate": 0.00069482884195193,
"loss": 1.8178,
"step": 493
},
{
"epoch": 0.3416911637558361,
"grad_norm": 0.6605129837989807,
"learning_rate": 0.0006941005098324836,
"loss": 1.4469,
"step": 494
},
{
"epoch": 0.3423828462735604,
"grad_norm": 0.5513582229614258,
"learning_rate": 0.0006933721777130371,
"loss": 1.6028,
"step": 495
},
{
"epoch": 0.3430745287912848,
"grad_norm": 0.4937622845172882,
"learning_rate": 0.0006926438455935908,
"loss": 1.676,
"step": 496
},
{
"epoch": 0.34376621130900914,
"grad_norm": 0.6980156302452087,
"learning_rate": 0.0006919155134741443,
"loss": 1.8214,
"step": 497
},
{
"epoch": 0.34445789382673353,
"grad_norm": 0.589282214641571,
"learning_rate": 0.0006911871813546978,
"loss": 1.5903,
"step": 498
},
{
"epoch": 0.3451495763444579,
"grad_norm": 0.5733899474143982,
"learning_rate": 0.0006904588492352513,
"loss": 2.0085,
"step": 499
},
{
"epoch": 0.34584125886218225,
"grad_norm": 13.85383129119873,
"learning_rate": 0.0006897305171158048,
"loss": 1.3935,
"step": 500
},
{
"epoch": 0.34584125886218225,
"eval_loss": 1.4428730010986328,
"eval_runtime": 586.3776,
"eval_samples_per_second": 2.191,
"eval_steps_per_second": 1.097,
"step": 500
}
],
"logging_steps": 1,
"max_steps": 1446,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.596867200925696e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}