mm_homework / EXP_2.1_3b /trainer_state.json
North2ICESea's picture
Upload my folder using huggingface_hub
97d6317 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 2111,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.014214641080312722,
"grad_norm": 2.834075681079323,
"learning_rate": 4.2452830188679244e-07,
"loss": 0.8888,
"step": 10
},
{
"epoch": 0.028429282160625444,
"grad_norm": 2.382206917176712,
"learning_rate": 8.962264150943397e-07,
"loss": 0.8863,
"step": 20
},
{
"epoch": 0.042643923240938165,
"grad_norm": 1.5553139981482849,
"learning_rate": 1.3679245283018869e-06,
"loss": 0.8307,
"step": 30
},
{
"epoch": 0.05685856432125089,
"grad_norm": 1.2537358335844357,
"learning_rate": 1.839622641509434e-06,
"loss": 0.7826,
"step": 40
},
{
"epoch": 0.07107320540156362,
"grad_norm": 0.766804883609826,
"learning_rate": 2.3113207547169815e-06,
"loss": 0.7347,
"step": 50
},
{
"epoch": 0.08528784648187633,
"grad_norm": 0.7667121953750998,
"learning_rate": 2.7830188679245286e-06,
"loss": 0.6931,
"step": 60
},
{
"epoch": 0.09950248756218906,
"grad_norm": 0.6131128302034111,
"learning_rate": 3.2547169811320758e-06,
"loss": 0.6639,
"step": 70
},
{
"epoch": 0.11371712864250177,
"grad_norm": 0.6988128095327644,
"learning_rate": 3.726415094339623e-06,
"loss": 0.6375,
"step": 80
},
{
"epoch": 0.1279317697228145,
"grad_norm": 0.6317415794697651,
"learning_rate": 4.19811320754717e-06,
"loss": 0.6236,
"step": 90
},
{
"epoch": 0.14214641080312723,
"grad_norm": 0.6086478743343868,
"learning_rate": 4.6698113207547175e-06,
"loss": 0.6138,
"step": 100
},
{
"epoch": 0.15636105188343993,
"grad_norm": 0.5886423623895793,
"learning_rate": 5.1415094339622655e-06,
"loss": 0.6086,
"step": 110
},
{
"epoch": 0.17057569296375266,
"grad_norm": 0.6688530721380745,
"learning_rate": 5.613207547169813e-06,
"loss": 0.603,
"step": 120
},
{
"epoch": 0.1847903340440654,
"grad_norm": 0.6884356457949358,
"learning_rate": 6.08490566037736e-06,
"loss": 0.5903,
"step": 130
},
{
"epoch": 0.19900497512437812,
"grad_norm": 0.609548931554606,
"learning_rate": 6.556603773584907e-06,
"loss": 0.5966,
"step": 140
},
{
"epoch": 0.21321961620469082,
"grad_norm": 0.5949327566115654,
"learning_rate": 7.028301886792454e-06,
"loss": 0.5904,
"step": 150
},
{
"epoch": 0.22743425728500355,
"grad_norm": 0.6242334967031955,
"learning_rate": 7.500000000000001e-06,
"loss": 0.579,
"step": 160
},
{
"epoch": 0.24164889836531628,
"grad_norm": 0.6241786288297929,
"learning_rate": 7.971698113207547e-06,
"loss": 0.5791,
"step": 170
},
{
"epoch": 0.255863539445629,
"grad_norm": 0.6521923298448227,
"learning_rate": 8.443396226415095e-06,
"loss": 0.5772,
"step": 180
},
{
"epoch": 0.27007818052594174,
"grad_norm": 0.72406379743765,
"learning_rate": 8.915094339622642e-06,
"loss": 0.5783,
"step": 190
},
{
"epoch": 0.28429282160625446,
"grad_norm": 0.6865581822637807,
"learning_rate": 9.38679245283019e-06,
"loss": 0.5667,
"step": 200
},
{
"epoch": 0.29850746268656714,
"grad_norm": 0.7370553596384785,
"learning_rate": 9.858490566037736e-06,
"loss": 0.5787,
"step": 210
},
{
"epoch": 0.31272210376687987,
"grad_norm": 0.680822415396935,
"learning_rate": 9.999665093340166e-06,
"loss": 0.5693,
"step": 220
},
{
"epoch": 0.3269367448471926,
"grad_norm": 0.7111908345205171,
"learning_rate": 9.998024842193876e-06,
"loss": 0.5513,
"step": 230
},
{
"epoch": 0.3411513859275053,
"grad_norm": 0.7236426819082362,
"learning_rate": 9.995018180960701e-06,
"loss": 0.5713,
"step": 240
},
{
"epoch": 0.35536602700781805,
"grad_norm": 0.5852089481414202,
"learning_rate": 9.990645931631796e-06,
"loss": 0.5538,
"step": 250
},
{
"epoch": 0.3695806680881308,
"grad_norm": 0.6824815552672151,
"learning_rate": 9.984909289536473e-06,
"loss": 0.5532,
"step": 260
},
{
"epoch": 0.3837953091684435,
"grad_norm": 0.6415502778484192,
"learning_rate": 9.9778098230154e-06,
"loss": 0.5651,
"step": 270
},
{
"epoch": 0.39800995024875624,
"grad_norm": 0.6951097230627817,
"learning_rate": 9.969349472991838e-06,
"loss": 0.5545,
"step": 280
},
{
"epoch": 0.41222459132906897,
"grad_norm": 0.6940350949391038,
"learning_rate": 9.959530552441006e-06,
"loss": 0.5618,
"step": 290
},
{
"epoch": 0.42643923240938164,
"grad_norm": 0.6795280607455072,
"learning_rate": 9.94835574575774e-06,
"loss": 0.5678,
"step": 300
},
{
"epoch": 0.44065387348969437,
"grad_norm": 0.740615761131801,
"learning_rate": 9.93582810802261e-06,
"loss": 0.5589,
"step": 310
},
{
"epoch": 0.4548685145700071,
"grad_norm": 0.6190142422707086,
"learning_rate": 9.921951064166685e-06,
"loss": 0.5545,
"step": 320
},
{
"epoch": 0.4690831556503198,
"grad_norm": 0.6133098155280035,
"learning_rate": 9.90672840803519e-06,
"loss": 0.5424,
"step": 330
},
{
"epoch": 0.48329779673063256,
"grad_norm": 0.6671428298287642,
"learning_rate": 9.890164301350318e-06,
"loss": 0.5543,
"step": 340
},
{
"epoch": 0.4975124378109453,
"grad_norm": 0.6195386955465261,
"learning_rate": 9.872263272573443e-06,
"loss": 0.5436,
"step": 350
},
{
"epoch": 0.511727078891258,
"grad_norm": 0.5930299278047146,
"learning_rate": 9.853030215667095e-06,
"loss": 0.5419,
"step": 360
},
{
"epoch": 0.5259417199715707,
"grad_norm": 0.5808813232166387,
"learning_rate": 9.832470388756987e-06,
"loss": 0.5355,
"step": 370
},
{
"epoch": 0.5401563610518835,
"grad_norm": 0.6451836622188146,
"learning_rate": 9.81058941269451e-06,
"loss": 0.5411,
"step": 380
},
{
"epoch": 0.5543710021321961,
"grad_norm": 0.6430253547664657,
"learning_rate": 9.787393269520039e-06,
"loss": 0.5424,
"step": 390
},
{
"epoch": 0.5685856432125089,
"grad_norm": 0.6434468036531783,
"learning_rate": 9.762888300827507e-06,
"loss": 0.5375,
"step": 400
},
{
"epoch": 0.5828002842928216,
"grad_norm": 0.6545071562387185,
"learning_rate": 9.737081206030671e-06,
"loss": 0.5392,
"step": 410
},
{
"epoch": 0.5970149253731343,
"grad_norm": 0.6520857107905386,
"learning_rate": 9.709979040531568e-06,
"loss": 0.5394,
"step": 420
},
{
"epoch": 0.6112295664534471,
"grad_norm": 0.6850212115229948,
"learning_rate": 9.681589213791633e-06,
"loss": 0.5466,
"step": 430
},
{
"epoch": 0.6254442075337597,
"grad_norm": 0.6269054102432758,
"learning_rate": 9.651919487306025e-06,
"loss": 0.5322,
"step": 440
},
{
"epoch": 0.6396588486140725,
"grad_norm": 0.736787330144809,
"learning_rate": 9.620977972481715e-06,
"loss": 0.5429,
"step": 450
},
{
"epoch": 0.6538734896943852,
"grad_norm": 0.662925500196175,
"learning_rate": 9.588773128419907e-06,
"loss": 0.5312,
"step": 460
},
{
"epoch": 0.668088130774698,
"grad_norm": 0.6148143038929397,
"learning_rate": 9.555313759603403e-06,
"loss": 0.5316,
"step": 470
},
{
"epoch": 0.6823027718550106,
"grad_norm": 0.6077969318281708,
"learning_rate": 9.520609013489548e-06,
"loss": 0.5223,
"step": 480
},
{
"epoch": 0.6965174129353234,
"grad_norm": 0.6168040167150479,
"learning_rate": 9.484668378009407e-06,
"loss": 0.5405,
"step": 490
},
{
"epoch": 0.7107320540156361,
"grad_norm": 0.6128096552430267,
"learning_rate": 9.447501678973853e-06,
"loss": 0.5338,
"step": 500
},
{
"epoch": 0.7107320540156361,
"eval_loss": 0.5341117978096008,
"eval_runtime": 234.2375,
"eval_samples_per_second": 21.346,
"eval_steps_per_second": 2.668,
"step": 500
},
{
"epoch": 0.7249466950959488,
"grad_norm": 0.6234640184402841,
"learning_rate": 9.409119077387295e-06,
"loss": 0.5377,
"step": 510
},
{
"epoch": 0.7391613361762616,
"grad_norm": 0.6131203844453222,
"learning_rate": 9.369531066669759e-06,
"loss": 0.5259,
"step": 520
},
{
"epoch": 0.7533759772565742,
"grad_norm": 0.6262740746023064,
"learning_rate": 9.328748469788094e-06,
"loss": 0.5328,
"step": 530
},
{
"epoch": 0.767590618336887,
"grad_norm": 0.6262806652099724,
"learning_rate": 9.286782436297072e-06,
"loss": 0.5344,
"step": 540
},
{
"epoch": 0.7818052594171997,
"grad_norm": 0.6615647825526851,
"learning_rate": 9.243644439291223e-06,
"loss": 0.5257,
"step": 550
},
{
"epoch": 0.7960199004975125,
"grad_norm": 0.6072216321225935,
"learning_rate": 9.1993462722682e-06,
"loss": 0.5339,
"step": 560
},
{
"epoch": 0.8102345415778252,
"grad_norm": 0.5963884112129639,
"learning_rate": 9.15390004590455e-06,
"loss": 0.5235,
"step": 570
},
{
"epoch": 0.8244491826581379,
"grad_norm": 0.6479032652506073,
"learning_rate": 9.107318184744782e-06,
"loss": 0.5244,
"step": 580
},
{
"epoch": 0.8386638237384506,
"grad_norm": 0.6241228532450154,
"learning_rate": 9.059613423804623e-06,
"loss": 0.5279,
"step": 590
},
{
"epoch": 0.8528784648187633,
"grad_norm": 0.6346658666864172,
"learning_rate": 9.010798805089385e-06,
"loss": 0.5315,
"step": 600
},
{
"epoch": 0.8670931058990761,
"grad_norm": 0.6189100879810271,
"learning_rate": 8.960887674028411e-06,
"loss": 0.516,
"step": 610
},
{
"epoch": 0.8813077469793887,
"grad_norm": 0.6310100666801625,
"learning_rate": 8.909893675826575e-06,
"loss": 0.5343,
"step": 620
},
{
"epoch": 0.8955223880597015,
"grad_norm": 0.5705654281594257,
"learning_rate": 8.857830751733815e-06,
"loss": 0.5196,
"step": 630
},
{
"epoch": 0.9097370291400142,
"grad_norm": 0.581798609611747,
"learning_rate": 8.80471313523373e-06,
"loss": 0.5252,
"step": 640
},
{
"epoch": 0.923951670220327,
"grad_norm": 0.6021206642948125,
"learning_rate": 8.750555348152299e-06,
"loss": 0.5301,
"step": 650
},
{
"epoch": 0.9381663113006397,
"grad_norm": 0.6258455776720466,
"learning_rate": 8.695372196687743e-06,
"loss": 0.5247,
"step": 660
},
{
"epoch": 0.9523809523809523,
"grad_norm": 0.5984715139941876,
"learning_rate": 8.639178767362677e-06,
"loss": 0.5166,
"step": 670
},
{
"epoch": 0.9665955934612651,
"grad_norm": 0.6174130149894701,
"learning_rate": 8.581990422899586e-06,
"loss": 0.5186,
"step": 680
},
{
"epoch": 0.9808102345415778,
"grad_norm": 0.669919825928536,
"learning_rate": 8.523822798020827e-06,
"loss": 0.5232,
"step": 690
},
{
"epoch": 0.9950248756218906,
"grad_norm": 0.5789470998307686,
"learning_rate": 8.46469179517424e-06,
"loss": 0.5128,
"step": 700
},
{
"epoch": 1.0085287846481876,
"grad_norm": 0.6391934862223132,
"learning_rate": 8.404613580185586e-06,
"loss": 0.4571,
"step": 710
},
{
"epoch": 1.0227434257285004,
"grad_norm": 0.646088830119043,
"learning_rate": 8.343604577838965e-06,
"loss": 0.4844,
"step": 720
},
{
"epoch": 1.036958066808813,
"grad_norm": 0.5785525652959986,
"learning_rate": 8.281681467386447e-06,
"loss": 0.4731,
"step": 730
},
{
"epoch": 1.0511727078891258,
"grad_norm": 0.6609376724143528,
"learning_rate": 8.21886117798813e-06,
"loss": 0.483,
"step": 740
},
{
"epoch": 1.0653873489694385,
"grad_norm": 0.6092591984384597,
"learning_rate": 8.155160884083881e-06,
"loss": 0.4868,
"step": 750
},
{
"epoch": 1.0796019900497513,
"grad_norm": 0.6374135491483588,
"learning_rate": 8.090598000698009e-06,
"loss": 0.492,
"step": 760
},
{
"epoch": 1.0938166311300639,
"grad_norm": 0.6008076415379893,
"learning_rate": 8.025190178678175e-06,
"loss": 0.4814,
"step": 770
},
{
"epoch": 1.1080312722103767,
"grad_norm": 0.6018194277584734,
"learning_rate": 7.958955299869826e-06,
"loss": 0.4768,
"step": 780
},
{
"epoch": 1.1222459132906895,
"grad_norm": 0.5917444038650317,
"learning_rate": 7.891911472227478e-06,
"loss": 0.4841,
"step": 790
},
{
"epoch": 1.136460554371002,
"grad_norm": 0.6728799540384178,
"learning_rate": 7.82407702486418e-06,
"loss": 0.4797,
"step": 800
},
{
"epoch": 1.1506751954513148,
"grad_norm": 0.5791378894590185,
"learning_rate": 7.755470503040516e-06,
"loss": 0.4842,
"step": 810
},
{
"epoch": 1.1648898365316276,
"grad_norm": 0.6318987068220235,
"learning_rate": 7.686110663094527e-06,
"loss": 0.4752,
"step": 820
},
{
"epoch": 1.1791044776119404,
"grad_norm": 0.5836896353182887,
"learning_rate": 7.616016467313891e-06,
"loss": 0.4783,
"step": 830
},
{
"epoch": 1.193319118692253,
"grad_norm": 0.6133166239448045,
"learning_rate": 7.545207078751858e-06,
"loss": 0.4804,
"step": 840
},
{
"epoch": 1.2075337597725657,
"grad_norm": 0.6647464026501058,
"learning_rate": 7.473701855988227e-06,
"loss": 0.4837,
"step": 850
},
{
"epoch": 1.2217484008528785,
"grad_norm": 0.6037242762628189,
"learning_rate": 7.4015203478369266e-06,
"loss": 0.48,
"step": 860
},
{
"epoch": 1.235963041933191,
"grad_norm": 0.6060908786113385,
"learning_rate": 7.328682288001561e-06,
"loss": 0.4828,
"step": 870
},
{
"epoch": 1.2501776830135038,
"grad_norm": 0.5748244127007777,
"learning_rate": 7.255207589680403e-06,
"loss": 0.4737,
"step": 880
},
{
"epoch": 1.2643923240938166,
"grad_norm": 0.5739667849410425,
"learning_rate": 7.181116340122336e-06,
"loss": 0.4692,
"step": 890
},
{
"epoch": 1.2786069651741294,
"grad_norm": 0.5356752158374642,
"learning_rate": 7.10642879513519e-06,
"loss": 0.467,
"step": 900
},
{
"epoch": 1.2928216062544422,
"grad_norm": 0.6231787580072905,
"learning_rate": 7.0311653735480136e-06,
"loss": 0.4787,
"step": 910
},
{
"epoch": 1.3070362473347548,
"grad_norm": 0.5539031871696426,
"learning_rate": 6.95534665162877e-06,
"loss": 0.4803,
"step": 920
},
{
"epoch": 1.3212508884150675,
"grad_norm": 0.5545024040236839,
"learning_rate": 6.878993357458986e-06,
"loss": 0.4727,
"step": 930
},
{
"epoch": 1.33546552949538,
"grad_norm": 0.5526343563080722,
"learning_rate": 6.8021263652669055e-06,
"loss": 0.4749,
"step": 940
},
{
"epoch": 1.349680170575693,
"grad_norm": 0.5949065425774154,
"learning_rate": 6.7247666897206795e-06,
"loss": 0.466,
"step": 950
},
{
"epoch": 1.3638948116560057,
"grad_norm": 0.6109499791395212,
"learning_rate": 6.646935480183173e-06,
"loss": 0.4721,
"step": 960
},
{
"epoch": 1.3781094527363185,
"grad_norm": 0.5761776478178481,
"learning_rate": 6.568654014929933e-06,
"loss": 0.4773,
"step": 970
},
{
"epoch": 1.3923240938166312,
"grad_norm": 0.6309785557491375,
"learning_rate": 6.4899436953319235e-06,
"loss": 0.4783,
"step": 980
},
{
"epoch": 1.4065387348969438,
"grad_norm": 0.6079839523261087,
"learning_rate": 6.410826040004607e-06,
"loss": 0.4874,
"step": 990
},
{
"epoch": 1.4207533759772566,
"grad_norm": 0.5694417979836612,
"learning_rate": 6.331322678924963e-06,
"loss": 0.4883,
"step": 1000
},
{
"epoch": 1.4207533759772566,
"eval_loss": 0.5142297148704529,
"eval_runtime": 233.9812,
"eval_samples_per_second": 21.369,
"eval_steps_per_second": 2.671,
"step": 1000
},
{
"epoch": 1.4349680170575694,
"grad_norm": 0.590623858748834,
"learning_rate": 6.251455347518074e-06,
"loss": 0.4674,
"step": 1010
},
{
"epoch": 1.449182658137882,
"grad_norm": 0.6492265919143085,
"learning_rate": 6.1712458807148804e-06,
"loss": 0.4566,
"step": 1020
},
{
"epoch": 1.4633972992181947,
"grad_norm": 0.6690825078821129,
"learning_rate": 6.090716206982714e-06,
"loss": 0.4415,
"step": 1030
},
{
"epoch": 1.4776119402985075,
"grad_norm": 0.6207866027112856,
"learning_rate": 6.009888342330292e-06,
"loss": 0.4419,
"step": 1040
},
{
"epoch": 1.4918265813788203,
"grad_norm": 0.5983146438640481,
"learning_rate": 5.92878438428875e-06,
"loss": 0.4529,
"step": 1050
},
{
"epoch": 1.5060412224591329,
"grad_norm": 0.6251595437559933,
"learning_rate": 5.847426505870399e-06,
"loss": 0.4433,
"step": 1060
},
{
"epoch": 1.5202558635394456,
"grad_norm": 0.5973997328290039,
"learning_rate": 5.765836949506843e-06,
"loss": 0.4465,
"step": 1070
},
{
"epoch": 1.5344705046197582,
"grad_norm": 0.61605264507367,
"learning_rate": 5.684038020968126e-06,
"loss": 0.4354,
"step": 1080
},
{
"epoch": 1.548685145700071,
"grad_norm": 0.6356804350566382,
"learning_rate": 5.6020520832645555e-06,
"loss": 0.4474,
"step": 1090
},
{
"epoch": 1.5628997867803838,
"grad_norm": 0.5409307423191971,
"learning_rate": 5.519901550532871e-06,
"loss": 0.4442,
"step": 1100
},
{
"epoch": 1.5771144278606966,
"grad_norm": 0.5628516264470729,
"learning_rate": 5.437608881908456e-06,
"loss": 0.4469,
"step": 1110
},
{
"epoch": 1.5913290689410093,
"grad_norm": 0.5646776831646609,
"learning_rate": 5.3551965753852255e-06,
"loss": 0.4444,
"step": 1120
},
{
"epoch": 1.6055437100213221,
"grad_norm": 0.6070095692660494,
"learning_rate": 5.2726871616649e-06,
"loss": 0.4546,
"step": 1130
},
{
"epoch": 1.6197583511016347,
"grad_norm": 0.5955403854226203,
"learning_rate": 5.190103197997339e-06,
"loss": 0.4453,
"step": 1140
},
{
"epoch": 1.6339729921819472,
"grad_norm": 0.5847049727426836,
"learning_rate": 5.107467262013614e-06,
"loss": 0.4456,
"step": 1150
},
{
"epoch": 1.64818763326226,
"grad_norm": 0.6464335190947482,
"learning_rate": 5.02480194555351e-06,
"loss": 0.4431,
"step": 1160
},
{
"epoch": 1.6624022743425728,
"grad_norm": 0.5976768465351757,
"learning_rate": 4.942129848489137e-06,
"loss": 0.4459,
"step": 1170
},
{
"epoch": 1.6766169154228856,
"grad_norm": 0.5759461226301025,
"learning_rate": 4.8594735725463575e-06,
"loss": 0.4331,
"step": 1180
},
{
"epoch": 1.6908315565031984,
"grad_norm": 0.5618161675831925,
"learning_rate": 4.776855715125694e-06,
"loss": 0.4459,
"step": 1190
},
{
"epoch": 1.7050461975835112,
"grad_norm": 0.5665638890009612,
"learning_rate": 4.694298863124435e-06,
"loss": 0.4434,
"step": 1200
},
{
"epoch": 1.7192608386638237,
"grad_norm": 0.5897127590620418,
"learning_rate": 4.611825586761591e-06,
"loss": 0.4526,
"step": 1210
},
{
"epoch": 1.7334754797441365,
"grad_norm": 0.5964155647113037,
"learning_rate": 4.529458433407429e-06,
"loss": 0.4404,
"step": 1220
},
{
"epoch": 1.747690120824449,
"grad_norm": 0.557832612527273,
"learning_rate": 4.447219921419244e-06,
"loss": 0.4466,
"step": 1230
},
{
"epoch": 1.7619047619047619,
"grad_norm": 0.6542060047375909,
"learning_rate": 4.365132533985071e-06,
"loss": 0.4456,
"step": 1240
},
{
"epoch": 1.7761194029850746,
"grad_norm": 0.5917352620945386,
"learning_rate": 4.283218712976992e-06,
"loss": 0.4452,
"step": 1250
},
{
"epoch": 1.7903340440653874,
"grad_norm": 0.561379370088737,
"learning_rate": 4.201500852815769e-06,
"loss": 0.442,
"step": 1260
},
{
"epoch": 1.8045486851457002,
"grad_norm": 0.5491467540898044,
"learning_rate": 4.12000129434842e-06,
"loss": 0.4444,
"step": 1270
},
{
"epoch": 1.8187633262260128,
"grad_norm": 0.5872206166430523,
"learning_rate": 4.0387423187404656e-06,
"loss": 0.4375,
"step": 1280
},
{
"epoch": 1.8329779673063256,
"grad_norm": 0.5927050301786448,
"learning_rate": 3.957746141384469e-06,
"loss": 0.4471,
"step": 1290
},
{
"epoch": 1.8471926083866381,
"grad_norm": 0.5414882779351676,
"learning_rate": 3.877034905826577e-06,
"loss": 0.4466,
"step": 1300
},
{
"epoch": 1.861407249466951,
"grad_norm": 0.5612180051513147,
"learning_rate": 3.796630677712697e-06,
"loss": 0.4388,
"step": 1310
},
{
"epoch": 1.8756218905472637,
"grad_norm": 0.5529411457142649,
"learning_rate": 3.716555438755961e-06,
"loss": 0.4472,
"step": 1320
},
{
"epoch": 1.8898365316275765,
"grad_norm": 0.5616746991079874,
"learning_rate": 3.6368310807271546e-06,
"loss": 0.4446,
"step": 1330
},
{
"epoch": 1.9040511727078893,
"grad_norm": 0.5799752713225496,
"learning_rate": 3.557479399469721e-06,
"loss": 0.4493,
"step": 1340
},
{
"epoch": 1.9182658137882018,
"grad_norm": 0.6619814912951459,
"learning_rate": 3.4785220889409934e-06,
"loss": 0.4405,
"step": 1350
},
{
"epoch": 1.9324804548685146,
"grad_norm": 0.5613562889297112,
"learning_rate": 3.3999807352812862e-06,
"loss": 0.4541,
"step": 1360
},
{
"epoch": 1.9466950959488272,
"grad_norm": 0.5713920903616314,
"learning_rate": 3.321876810912461e-06,
"loss": 0.4428,
"step": 1370
},
{
"epoch": 1.96090973702914,
"grad_norm": 0.5665905645368096,
"learning_rate": 3.2442316686675783e-06,
"loss": 0.4437,
"step": 1380
},
{
"epoch": 1.9751243781094527,
"grad_norm": 0.595196616315948,
"learning_rate": 3.1670665359532415e-06,
"loss": 0.4438,
"step": 1390
},
{
"epoch": 1.9893390191897655,
"grad_norm": 0.5920175988784884,
"learning_rate": 3.090402508946249e-06,
"loss": 0.4373,
"step": 1400
},
{
"epoch": 2.0042643923240937,
"grad_norm": 0.5358117816177491,
"learning_rate": 3.0142605468260976e-06,
"loss": 0.4572,
"step": 1410
},
{
"epoch": 2.0184790334044065,
"grad_norm": 0.5576529014623839,
"learning_rate": 2.9386614660449598e-06,
"loss": 0.4263,
"step": 1420
},
{
"epoch": 2.0326936744847193,
"grad_norm": 0.5822207756082265,
"learning_rate": 2.8636259346366666e-06,
"loss": 0.4385,
"step": 1430
},
{
"epoch": 2.046908315565032,
"grad_norm": 0.5829696226017161,
"learning_rate": 2.7891744665662824e-06,
"loss": 0.4218,
"step": 1440
},
{
"epoch": 2.061122956645345,
"grad_norm": 0.581038802974167,
"learning_rate": 2.7153274161217847e-06,
"loss": 0.4404,
"step": 1450
},
{
"epoch": 2.0753375977256576,
"grad_norm": 0.5347733423294494,
"learning_rate": 2.642104972349403e-06,
"loss": 0.4198,
"step": 1460
},
{
"epoch": 2.08955223880597,
"grad_norm": 0.543588717662816,
"learning_rate": 2.5695271535341443e-06,
"loss": 0.4411,
"step": 1470
},
{
"epoch": 2.1037668798862827,
"grad_norm": 0.5720716950974386,
"learning_rate": 2.4976138017269906e-06,
"loss": 0.4412,
"step": 1480
},
{
"epoch": 2.1179815209665955,
"grad_norm": 0.5603303814513405,
"learning_rate": 2.4263845773202738e-06,
"loss": 0.4298,
"step": 1490
},
{
"epoch": 2.1321961620469083,
"grad_norm": 0.5598020164471934,
"learning_rate": 2.355858953672728e-06,
"loss": 0.4397,
"step": 1500
},
{
"epoch": 2.1321961620469083,
"eval_loss": 0.5144844651222229,
"eval_runtime": 231.3372,
"eval_samples_per_second": 21.613,
"eval_steps_per_second": 2.702,
"step": 1500
},
{
"epoch": 2.146410803127221,
"grad_norm": 0.552503902368828,
"learning_rate": 2.286056211785665e-06,
"loss": 0.4339,
"step": 1510
},
{
"epoch": 2.160625444207534,
"grad_norm": 0.5606006250953287,
"learning_rate": 2.2169954350317372e-06,
"loss": 0.4408,
"step": 1520
},
{
"epoch": 2.1748400852878467,
"grad_norm": 0.5478812953721691,
"learning_rate": 2.148695503937745e-06,
"loss": 0.4387,
"step": 1530
},
{
"epoch": 2.189054726368159,
"grad_norm": 0.5327946624830445,
"learning_rate": 2.081175091022877e-06,
"loss": 0.4395,
"step": 1540
},
{
"epoch": 2.203269367448472,
"grad_norm": 0.5793441763139854,
"learning_rate": 2.014452655693839e-06,
"loss": 0.4394,
"step": 1550
},
{
"epoch": 2.2174840085287846,
"grad_norm": 0.5656062787295659,
"learning_rate": 1.9485464391982282e-06,
"loss": 0.4357,
"step": 1560
},
{
"epoch": 2.2316986496090974,
"grad_norm": 0.575605417624959,
"learning_rate": 1.8834744596375664e-06,
"loss": 0.4364,
"step": 1570
},
{
"epoch": 2.24591329068941,
"grad_norm": 0.5511255544375402,
"learning_rate": 1.8192545070413281e-06,
"loss": 0.4278,
"step": 1580
},
{
"epoch": 2.260127931769723,
"grad_norm": 0.5527437049458982,
"learning_rate": 1.755904138503316e-06,
"loss": 0.4281,
"step": 1590
},
{
"epoch": 2.2743425728500357,
"grad_norm": 0.5567466633261947,
"learning_rate": 1.6934406733817417e-06,
"loss": 0.4326,
"step": 1600
},
{
"epoch": 2.288557213930348,
"grad_norm": 0.5542280645431442,
"learning_rate": 1.6318811885642749e-06,
"loss": 0.4426,
"step": 1610
},
{
"epoch": 2.302771855010661,
"grad_norm": 0.5385541713578466,
"learning_rate": 1.5712425137993976e-06,
"loss": 0.4363,
"step": 1620
},
{
"epoch": 2.3169864960909736,
"grad_norm": 0.5568902806764913,
"learning_rate": 1.5115412270953166e-06,
"loss": 0.4346,
"step": 1630
},
{
"epoch": 2.3312011371712864,
"grad_norm": 0.6058817336949707,
"learning_rate": 1.4527936501877033e-06,
"loss": 0.4321,
"step": 1640
},
{
"epoch": 2.345415778251599,
"grad_norm": 0.5318061336854996,
"learning_rate": 1.3950158440774958e-06,
"loss": 0.418,
"step": 1650
},
{
"epoch": 2.359630419331912,
"grad_norm": 0.5553934307354601,
"learning_rate": 1.3382236046399722e-06,
"loss": 0.4143,
"step": 1660
},
{
"epoch": 2.3738450604122248,
"grad_norm": 0.5429114693602697,
"learning_rate": 1.2824324583063303e-06,
"loss": 0.4413,
"step": 1670
},
{
"epoch": 2.388059701492537,
"grad_norm": 0.5286255307861315,
"learning_rate": 1.2276576578189065e-06,
"loss": 0.4218,
"step": 1680
},
{
"epoch": 2.40227434257285,
"grad_norm": 0.5513151927730859,
"learning_rate": 1.1739141780612306e-06,
"loss": 0.4278,
"step": 1690
},
{
"epoch": 2.4164889836531627,
"grad_norm": 0.5523983447772243,
"learning_rate": 1.1212167119640439e-06,
"loss": 0.4364,
"step": 1700
},
{
"epoch": 2.4307036247334755,
"grad_norm": 0.5707718364766191,
"learning_rate": 1.069579666488395e-06,
"loss": 0.429,
"step": 1710
},
{
"epoch": 2.4449182658137882,
"grad_norm": 0.5620343463380818,
"learning_rate": 1.0190171586869258e-06,
"loss": 0.4239,
"step": 1720
},
{
"epoch": 2.459132906894101,
"grad_norm": 0.5335024558606225,
"learning_rate": 9.695430118444049e-07,
"loss": 0.4199,
"step": 1730
},
{
"epoch": 2.473347547974414,
"grad_norm": 0.5088660962728315,
"learning_rate": 9.21170751698583e-07,
"loss": 0.4294,
"step": 1740
},
{
"epoch": 2.487562189054726,
"grad_norm": 0.5854213827338935,
"learning_rate": 8.739136027423894e-07,
"loss": 0.4407,
"step": 1750
},
{
"epoch": 2.501776830135039,
"grad_norm": 0.570296065993969,
"learning_rate": 8.277844846084898e-07,
"loss": 0.4273,
"step": 1760
},
{
"epoch": 2.5159914712153517,
"grad_norm": 0.5432303322927625,
"learning_rate": 7.827960085371855e-07,
"loss": 0.4372,
"step": 1770
},
{
"epoch": 2.5302061122956645,
"grad_norm": 0.5462735332010124,
"learning_rate": 7.389604739286271e-07,
"loss": 0.4384,
"step": 1780
},
{
"epoch": 2.5444207533759773,
"grad_norm": 0.5514867453544212,
"learning_rate": 6.962898649802824e-07,
"loss": 0.4279,
"step": 1790
},
{
"epoch": 2.55863539445629,
"grad_norm": 0.5374953523760396,
"learning_rate": 6.547958474105726e-07,
"loss": 0.4207,
"step": 1800
},
{
"epoch": 2.572850035536603,
"grad_norm": 0.5368534822557964,
"learning_rate": 6.144897652695864e-07,
"loss": 0.4425,
"step": 1810
},
{
"epoch": 2.587064676616915,
"grad_norm": 0.5331851936220864,
"learning_rate": 5.753826378377287e-07,
"loss": 0.4227,
"step": 1820
},
{
"epoch": 2.6012793176972284,
"grad_norm": 0.532292119043563,
"learning_rate": 5.374851566131561e-07,
"loss": 0.4332,
"step": 1830
},
{
"epoch": 2.6154939587775408,
"grad_norm": 0.522004918322321,
"learning_rate": 5.008076823888319e-07,
"loss": 0.4363,
"step": 1840
},
{
"epoch": 2.6297085998578535,
"grad_norm": 0.5747875880967467,
"learning_rate": 4.653602424199877e-07,
"loss": 0.441,
"step": 1850
},
{
"epoch": 2.6439232409381663,
"grad_norm": 0.5701129333105889,
"learning_rate": 4.3115252768276827e-07,
"loss": 0.4281,
"step": 1860
},
{
"epoch": 2.658137882018479,
"grad_norm": 0.5171567622519524,
"learning_rate": 3.9819389022482226e-07,
"loss": 0.439,
"step": 1870
},
{
"epoch": 2.672352523098792,
"grad_norm": 0.5506453302407677,
"learning_rate": 3.6649334060854027e-07,
"loss": 0.4261,
"step": 1880
},
{
"epoch": 2.6865671641791042,
"grad_norm": 0.5611217928624682,
"learning_rate": 3.360595454476595e-07,
"loss": 0.4296,
"step": 1890
},
{
"epoch": 2.7007818052594175,
"grad_norm": 0.5693324256729151,
"learning_rate": 3.069008250378974e-07,
"loss": 0.4349,
"step": 1900
},
{
"epoch": 2.71499644633973,
"grad_norm": 0.5448625830798527,
"learning_rate": 2.7902515108226613e-07,
"loss": 0.4167,
"step": 1910
},
{
"epoch": 2.7292110874200426,
"grad_norm": 0.5178772865930678,
"learning_rate": 2.5244014451168863e-07,
"loss": 0.4335,
"step": 1920
},
{
"epoch": 2.7434257285003554,
"grad_norm": 0.555692357887426,
"learning_rate": 2.271530734015104e-07,
"loss": 0.435,
"step": 1930
},
{
"epoch": 2.757640369580668,
"grad_norm": 0.5575021836492726,
"learning_rate": 2.0317085098448373e-07,
"loss": 0.4222,
"step": 1940
},
{
"epoch": 2.771855010660981,
"grad_norm": 0.5354458107562179,
"learning_rate": 1.8050003376075708e-07,
"loss": 0.4426,
"step": 1950
},
{
"epoch": 2.7860696517412933,
"grad_norm": 0.5182277398666688,
"learning_rate": 1.591468197053919e-07,
"loss": 0.4359,
"step": 1960
},
{
"epoch": 2.8002842928216065,
"grad_norm": 0.5422707453993397,
"learning_rate": 1.3911704657390113e-07,
"loss": 0.4243,
"step": 1970
},
{
"epoch": 2.814498933901919,
"grad_norm": 0.5242061992724598,
"learning_rate": 1.2041619030626283e-07,
"loss": 0.4323,
"step": 1980
},
{
"epoch": 2.8287135749822316,
"grad_norm": 0.5113276517453622,
"learning_rate": 1.0304936352985351e-07,
"loss": 0.4273,
"step": 1990
},
{
"epoch": 2.8429282160625444,
"grad_norm": 0.5355864223633939,
"learning_rate": 8.702131416170657e-08,
"loss": 0.436,
"step": 2000
},
{
"epoch": 2.8429282160625444,
"eval_loss": 0.5102300047874451,
"eval_runtime": 231.9212,
"eval_samples_per_second": 21.559,
"eval_steps_per_second": 2.695,
"step": 2000
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.5388940284634288,
"learning_rate": 7.233642411048014e-08,
"loss": 0.437,
"step": 2010
},
{
"epoch": 2.87135749822317,
"grad_norm": 0.5557321412758267,
"learning_rate": 5.899870807848762e-08,
"loss": 0.4304,
"step": 2020
},
{
"epoch": 2.8855721393034823,
"grad_norm": 0.5448610951969772,
"learning_rate": 4.701181246411501e-08,
"loss": 0.4275,
"step": 2030
},
{
"epoch": 2.8997867803837956,
"grad_norm": 0.5737836534540497,
"learning_rate": 3.6379014364935075e-08,
"loss": 0.4349,
"step": 2040
},
{
"epoch": 2.914001421464108,
"grad_norm": 0.55622828041129,
"learning_rate": 2.7103220681780616e-08,
"loss": 0.4291,
"step": 2050
},
{
"epoch": 2.9282160625444207,
"grad_norm": 0.5234759202957545,
"learning_rate": 1.9186967324026364e-08,
"loss": 0.4283,
"step": 2060
},
{
"epoch": 2.9424307036247335,
"grad_norm": 0.533033568944861,
"learning_rate": 1.2632418516296263e-08,
"loss": 0.4269,
"step": 2070
},
{
"epoch": 2.9566453447050463,
"grad_norm": 0.5413416159064122,
"learning_rate": 7.4413662067884806e-09,
"loss": 0.4291,
"step": 2080
},
{
"epoch": 2.970859985785359,
"grad_norm": 0.5523224114708861,
"learning_rate": 3.615229577371149e-09,
"loss": 0.432,
"step": 2090
},
{
"epoch": 2.9850746268656714,
"grad_norm": 0.5328203745561761,
"learning_rate": 1.1550546555960662e-09,
"loss": 0.4398,
"step": 2100
},
{
"epoch": 2.9992892679459846,
"grad_norm": 0.5266787263911724,
"learning_rate": 6.151402872134337e-11,
"loss": 0.422,
"step": 2110
},
{
"epoch": 3.0,
"step": 2111,
"total_flos": 1256820975730688.0,
"train_loss": 0.22985298537010831,
"train_runtime": 4731.3341,
"train_samples_per_second": 28.533,
"train_steps_per_second": 0.446
}
],
"logging_steps": 10,
"max_steps": 2112,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1000,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1256820975730688.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}