hp_ablations_mistral_lr5e-6 / trainer_state.json
sedrickkeh's picture
End of training
3890c5c verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9954430379746837,
"eval_steps": 500,
"global_step": 1479,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.020253164556962026,
"grad_norm": 2.88220611268731,
"learning_rate": 5e-06,
"loss": 0.7569,
"step": 10
},
{
"epoch": 0.04050632911392405,
"grad_norm": 2.480067861484698,
"learning_rate": 5e-06,
"loss": 0.6504,
"step": 20
},
{
"epoch": 0.060759493670886074,
"grad_norm": 2.774980923077334,
"learning_rate": 5e-06,
"loss": 0.629,
"step": 30
},
{
"epoch": 0.0810126582278481,
"grad_norm": 1.7240706644388326,
"learning_rate": 5e-06,
"loss": 0.6134,
"step": 40
},
{
"epoch": 0.10126582278481013,
"grad_norm": 1.4536613830159735,
"learning_rate": 5e-06,
"loss": 0.6034,
"step": 50
},
{
"epoch": 0.12151898734177215,
"grad_norm": 1.450284463333836,
"learning_rate": 5e-06,
"loss": 0.5974,
"step": 60
},
{
"epoch": 0.14177215189873418,
"grad_norm": 1.6558140046758392,
"learning_rate": 5e-06,
"loss": 0.5925,
"step": 70
},
{
"epoch": 0.1620253164556962,
"grad_norm": 1.7520091331371848,
"learning_rate": 5e-06,
"loss": 0.5878,
"step": 80
},
{
"epoch": 0.18227848101265823,
"grad_norm": 1.6029120553311094,
"learning_rate": 5e-06,
"loss": 0.5865,
"step": 90
},
{
"epoch": 0.20253164556962025,
"grad_norm": 1.567363166768414,
"learning_rate": 5e-06,
"loss": 0.5893,
"step": 100
},
{
"epoch": 0.22278481012658227,
"grad_norm": 1.4694354120450246,
"learning_rate": 5e-06,
"loss": 0.5819,
"step": 110
},
{
"epoch": 0.2430379746835443,
"grad_norm": 1.4681818695060211,
"learning_rate": 5e-06,
"loss": 0.5791,
"step": 120
},
{
"epoch": 0.26329113924050634,
"grad_norm": 1.5191462196175605,
"learning_rate": 5e-06,
"loss": 0.5776,
"step": 130
},
{
"epoch": 0.28354430379746837,
"grad_norm": 1.3078880356772522,
"learning_rate": 5e-06,
"loss": 0.567,
"step": 140
},
{
"epoch": 0.3037974683544304,
"grad_norm": 1.486087097355444,
"learning_rate": 5e-06,
"loss": 0.5784,
"step": 150
},
{
"epoch": 0.3240506329113924,
"grad_norm": 1.1938431479309142,
"learning_rate": 5e-06,
"loss": 0.5766,
"step": 160
},
{
"epoch": 0.34430379746835443,
"grad_norm": 1.702252307631795,
"learning_rate": 5e-06,
"loss": 0.5777,
"step": 170
},
{
"epoch": 0.36455696202531646,
"grad_norm": 2.724877827601554,
"learning_rate": 5e-06,
"loss": 0.5728,
"step": 180
},
{
"epoch": 0.3848101265822785,
"grad_norm": 1.9757209917803782,
"learning_rate": 5e-06,
"loss": 0.5718,
"step": 190
},
{
"epoch": 0.4050632911392405,
"grad_norm": 2.0877685409506817,
"learning_rate": 5e-06,
"loss": 0.5651,
"step": 200
},
{
"epoch": 0.4253164556962025,
"grad_norm": 1.562380367481087,
"learning_rate": 5e-06,
"loss": 0.5608,
"step": 210
},
{
"epoch": 0.44556962025316454,
"grad_norm": 1.6767077949693185,
"learning_rate": 5e-06,
"loss": 0.5668,
"step": 220
},
{
"epoch": 0.46582278481012657,
"grad_norm": 1.2635841301185957,
"learning_rate": 5e-06,
"loss": 0.5661,
"step": 230
},
{
"epoch": 0.4860759493670886,
"grad_norm": 1.6125913827261849,
"learning_rate": 5e-06,
"loss": 0.5624,
"step": 240
},
{
"epoch": 0.5063291139240507,
"grad_norm": 2.097566919302689,
"learning_rate": 5e-06,
"loss": 0.5671,
"step": 250
},
{
"epoch": 0.5265822784810127,
"grad_norm": 1.225603648815228,
"learning_rate": 5e-06,
"loss": 0.5536,
"step": 260
},
{
"epoch": 0.5468354430379747,
"grad_norm": 2.6536486037387843,
"learning_rate": 5e-06,
"loss": 0.5626,
"step": 270
},
{
"epoch": 0.5670886075949367,
"grad_norm": 2.061044706332861,
"learning_rate": 5e-06,
"loss": 0.5578,
"step": 280
},
{
"epoch": 0.5873417721518988,
"grad_norm": 1.3385785865849447,
"learning_rate": 5e-06,
"loss": 0.556,
"step": 290
},
{
"epoch": 0.6075949367088608,
"grad_norm": 1.4454247513778045,
"learning_rate": 5e-06,
"loss": 0.556,
"step": 300
},
{
"epoch": 0.6278481012658228,
"grad_norm": 1.7204398183201466,
"learning_rate": 5e-06,
"loss": 0.562,
"step": 310
},
{
"epoch": 0.6481012658227848,
"grad_norm": 1.9793942130611824,
"learning_rate": 5e-06,
"loss": 0.5578,
"step": 320
},
{
"epoch": 0.6683544303797468,
"grad_norm": 1.7805954913632007,
"learning_rate": 5e-06,
"loss": 0.5573,
"step": 330
},
{
"epoch": 0.6886075949367089,
"grad_norm": 1.6398542655595056,
"learning_rate": 5e-06,
"loss": 0.5568,
"step": 340
},
{
"epoch": 0.7088607594936709,
"grad_norm": 1.4219457540606182,
"learning_rate": 5e-06,
"loss": 0.5587,
"step": 350
},
{
"epoch": 0.7291139240506329,
"grad_norm": 1.205446257048794,
"learning_rate": 5e-06,
"loss": 0.5621,
"step": 360
},
{
"epoch": 0.7493670886075949,
"grad_norm": 1.5519028320645483,
"learning_rate": 5e-06,
"loss": 0.5558,
"step": 370
},
{
"epoch": 0.769620253164557,
"grad_norm": 1.3405556096272386,
"learning_rate": 5e-06,
"loss": 0.5529,
"step": 380
},
{
"epoch": 0.789873417721519,
"grad_norm": 1.1970424265714363,
"learning_rate": 5e-06,
"loss": 0.556,
"step": 390
},
{
"epoch": 0.810126582278481,
"grad_norm": 1.4143181030859675,
"learning_rate": 5e-06,
"loss": 0.5568,
"step": 400
},
{
"epoch": 0.830379746835443,
"grad_norm": 1.2868527954949525,
"learning_rate": 5e-06,
"loss": 0.5519,
"step": 410
},
{
"epoch": 0.850632911392405,
"grad_norm": 1.307968999368857,
"learning_rate": 5e-06,
"loss": 0.5593,
"step": 420
},
{
"epoch": 0.8708860759493671,
"grad_norm": 1.1315498091126373,
"learning_rate": 5e-06,
"loss": 0.5527,
"step": 430
},
{
"epoch": 0.8911392405063291,
"grad_norm": 1.333424078805594,
"learning_rate": 5e-06,
"loss": 0.5585,
"step": 440
},
{
"epoch": 0.9113924050632911,
"grad_norm": 1.2209294815766687,
"learning_rate": 5e-06,
"loss": 0.5496,
"step": 450
},
{
"epoch": 0.9316455696202531,
"grad_norm": 1.1522504991077107,
"learning_rate": 5e-06,
"loss": 0.554,
"step": 460
},
{
"epoch": 0.9518987341772152,
"grad_norm": 1.111876381365586,
"learning_rate": 5e-06,
"loss": 0.5549,
"step": 470
},
{
"epoch": 0.9721518987341772,
"grad_norm": 1.0648075841701097,
"learning_rate": 5e-06,
"loss": 0.5508,
"step": 480
},
{
"epoch": 0.9924050632911392,
"grad_norm": 1.2354318647376354,
"learning_rate": 5e-06,
"loss": 0.5479,
"step": 490
},
{
"epoch": 0.9984810126582279,
"eval_loss": 0.06881221383810043,
"eval_runtime": 505.2683,
"eval_samples_per_second": 26.331,
"eval_steps_per_second": 0.412,
"step": 493
},
{
"epoch": 1.0126582278481013,
"grad_norm": 2.1556327804060715,
"learning_rate": 5e-06,
"loss": 0.5079,
"step": 500
},
{
"epoch": 1.0329113924050632,
"grad_norm": 1.4832590670609553,
"learning_rate": 5e-06,
"loss": 0.4736,
"step": 510
},
{
"epoch": 1.0531645569620254,
"grad_norm": 1.6271105981188392,
"learning_rate": 5e-06,
"loss": 0.4689,
"step": 520
},
{
"epoch": 1.0734177215189873,
"grad_norm": 1.6518604720423575,
"learning_rate": 5e-06,
"loss": 0.4721,
"step": 530
},
{
"epoch": 1.0936708860759494,
"grad_norm": 1.6306680005909122,
"learning_rate": 5e-06,
"loss": 0.4685,
"step": 540
},
{
"epoch": 1.1139240506329113,
"grad_norm": 1.5490864800468693,
"learning_rate": 5e-06,
"loss": 0.4715,
"step": 550
},
{
"epoch": 1.1341772151898735,
"grad_norm": 1.4729491530388399,
"learning_rate": 5e-06,
"loss": 0.4691,
"step": 560
},
{
"epoch": 1.1544303797468354,
"grad_norm": 1.2846484466684625,
"learning_rate": 5e-06,
"loss": 0.4759,
"step": 570
},
{
"epoch": 1.1746835443037975,
"grad_norm": 1.210269025129285,
"learning_rate": 5e-06,
"loss": 0.4748,
"step": 580
},
{
"epoch": 1.1949367088607594,
"grad_norm": 1.1994849011298538,
"learning_rate": 5e-06,
"loss": 0.4686,
"step": 590
},
{
"epoch": 1.2151898734177216,
"grad_norm": 1.3109012503883226,
"learning_rate": 5e-06,
"loss": 0.4763,
"step": 600
},
{
"epoch": 1.2354430379746835,
"grad_norm": 1.361668676774227,
"learning_rate": 5e-06,
"loss": 0.4728,
"step": 610
},
{
"epoch": 1.2556962025316456,
"grad_norm": 1.464666048188549,
"learning_rate": 5e-06,
"loss": 0.4772,
"step": 620
},
{
"epoch": 1.2759493670886077,
"grad_norm": 1.4141750327340763,
"learning_rate": 5e-06,
"loss": 0.477,
"step": 630
},
{
"epoch": 1.2962025316455696,
"grad_norm": 1.143860086864536,
"learning_rate": 5e-06,
"loss": 0.4758,
"step": 640
},
{
"epoch": 1.3164556962025316,
"grad_norm": 1.268594443588833,
"learning_rate": 5e-06,
"loss": 0.4796,
"step": 650
},
{
"epoch": 1.3367088607594937,
"grad_norm": 1.2924687305619194,
"learning_rate": 5e-06,
"loss": 0.469,
"step": 660
},
{
"epoch": 1.3569620253164558,
"grad_norm": 1.2741990803305674,
"learning_rate": 5e-06,
"loss": 0.4829,
"step": 670
},
{
"epoch": 1.3772151898734177,
"grad_norm": 1.4171124530128463,
"learning_rate": 5e-06,
"loss": 0.4792,
"step": 680
},
{
"epoch": 1.3974683544303796,
"grad_norm": 1.1461018430295604,
"learning_rate": 5e-06,
"loss": 0.4784,
"step": 690
},
{
"epoch": 1.4177215189873418,
"grad_norm": 1.2574652457742375,
"learning_rate": 5e-06,
"loss": 0.4765,
"step": 700
},
{
"epoch": 1.437974683544304,
"grad_norm": 1.2661309102833118,
"learning_rate": 5e-06,
"loss": 0.4814,
"step": 710
},
{
"epoch": 1.4582278481012658,
"grad_norm": 1.1981819216436724,
"learning_rate": 5e-06,
"loss": 0.4802,
"step": 720
},
{
"epoch": 1.4784810126582277,
"grad_norm": 1.55940798073831,
"learning_rate": 5e-06,
"loss": 0.4824,
"step": 730
},
{
"epoch": 1.4987341772151899,
"grad_norm": 1.2417973039908028,
"learning_rate": 5e-06,
"loss": 0.4756,
"step": 740
},
{
"epoch": 1.518987341772152,
"grad_norm": 1.1387382430999369,
"learning_rate": 5e-06,
"loss": 0.4781,
"step": 750
},
{
"epoch": 1.539240506329114,
"grad_norm": 1.104717023280076,
"learning_rate": 5e-06,
"loss": 0.4795,
"step": 760
},
{
"epoch": 1.5594936708860758,
"grad_norm": 1.3153249018888995,
"learning_rate": 5e-06,
"loss": 0.4795,
"step": 770
},
{
"epoch": 1.579746835443038,
"grad_norm": 1.1992568131579366,
"learning_rate": 5e-06,
"loss": 0.4797,
"step": 780
},
{
"epoch": 1.6,
"grad_norm": 1.136040651878267,
"learning_rate": 5e-06,
"loss": 0.482,
"step": 790
},
{
"epoch": 1.620253164556962,
"grad_norm": 1.3304817212078788,
"learning_rate": 5e-06,
"loss": 0.4785,
"step": 800
},
{
"epoch": 1.640506329113924,
"grad_norm": 1.142527655189943,
"learning_rate": 5e-06,
"loss": 0.4715,
"step": 810
},
{
"epoch": 1.660759493670886,
"grad_norm": 1.4128828061541685,
"learning_rate": 5e-06,
"loss": 0.4801,
"step": 820
},
{
"epoch": 1.6810126582278482,
"grad_norm": 1.1980978216324385,
"learning_rate": 5e-06,
"loss": 0.4756,
"step": 830
},
{
"epoch": 1.70126582278481,
"grad_norm": 1.262563541243328,
"learning_rate": 5e-06,
"loss": 0.4776,
"step": 840
},
{
"epoch": 1.721518987341772,
"grad_norm": 1.2436924983133957,
"learning_rate": 5e-06,
"loss": 0.4801,
"step": 850
},
{
"epoch": 1.7417721518987341,
"grad_norm": 1.39666723634599,
"learning_rate": 5e-06,
"loss": 0.4797,
"step": 860
},
{
"epoch": 1.7620253164556963,
"grad_norm": 1.2842653152149013,
"learning_rate": 5e-06,
"loss": 0.4825,
"step": 870
},
{
"epoch": 1.7822784810126582,
"grad_norm": 1.165392419546629,
"learning_rate": 5e-06,
"loss": 0.4836,
"step": 880
},
{
"epoch": 1.80253164556962,
"grad_norm": 1.1855160731969316,
"learning_rate": 5e-06,
"loss": 0.4797,
"step": 890
},
{
"epoch": 1.8227848101265822,
"grad_norm": 1.2464436535452346,
"learning_rate": 5e-06,
"loss": 0.4794,
"step": 900
},
{
"epoch": 1.8430379746835444,
"grad_norm": 1.1709360612380009,
"learning_rate": 5e-06,
"loss": 0.4815,
"step": 910
},
{
"epoch": 1.8632911392405065,
"grad_norm": 1.2208910287918922,
"learning_rate": 5e-06,
"loss": 0.4805,
"step": 920
},
{
"epoch": 1.8835443037974684,
"grad_norm": 1.0888496188915995,
"learning_rate": 5e-06,
"loss": 0.4775,
"step": 930
},
{
"epoch": 1.9037974683544303,
"grad_norm": 1.1994340934717456,
"learning_rate": 5e-06,
"loss": 0.4814,
"step": 940
},
{
"epoch": 1.9240506329113924,
"grad_norm": 1.2945338868463476,
"learning_rate": 5e-06,
"loss": 0.4818,
"step": 950
},
{
"epoch": 1.9443037974683546,
"grad_norm": 1.6313486116488276,
"learning_rate": 5e-06,
"loss": 0.4858,
"step": 960
},
{
"epoch": 1.9645569620253165,
"grad_norm": 1.2011513392392508,
"learning_rate": 5e-06,
"loss": 0.4844,
"step": 970
},
{
"epoch": 1.9848101265822784,
"grad_norm": 1.5089522220254115,
"learning_rate": 5e-06,
"loss": 0.4838,
"step": 980
},
{
"epoch": 1.998987341772152,
"eval_loss": 0.06875146180391312,
"eval_runtime": 505.4208,
"eval_samples_per_second": 26.323,
"eval_steps_per_second": 0.412,
"step": 987
},
{
"epoch": 2.0050632911392405,
"grad_norm": 1.7875364396427433,
"learning_rate": 5e-06,
"loss": 0.4553,
"step": 990
},
{
"epoch": 2.0253164556962027,
"grad_norm": 1.6493678627855817,
"learning_rate": 5e-06,
"loss": 0.386,
"step": 1000
},
{
"epoch": 2.0455696202531644,
"grad_norm": 1.42447013081262,
"learning_rate": 5e-06,
"loss": 0.3825,
"step": 1010
},
{
"epoch": 2.0658227848101265,
"grad_norm": 1.4117353579299758,
"learning_rate": 5e-06,
"loss": 0.3786,
"step": 1020
},
{
"epoch": 2.0860759493670886,
"grad_norm": 1.3771094109220838,
"learning_rate": 5e-06,
"loss": 0.3791,
"step": 1030
},
{
"epoch": 2.1063291139240508,
"grad_norm": 1.343133315036709,
"learning_rate": 5e-06,
"loss": 0.3771,
"step": 1040
},
{
"epoch": 2.1265822784810124,
"grad_norm": 1.4448432034872896,
"learning_rate": 5e-06,
"loss": 0.387,
"step": 1050
},
{
"epoch": 2.1468354430379746,
"grad_norm": 1.6456444944430404,
"learning_rate": 5e-06,
"loss": 0.3827,
"step": 1060
},
{
"epoch": 2.1670886075949367,
"grad_norm": 1.3452334022152583,
"learning_rate": 5e-06,
"loss": 0.3865,
"step": 1070
},
{
"epoch": 2.187341772151899,
"grad_norm": 1.3446640669823824,
"learning_rate": 5e-06,
"loss": 0.3842,
"step": 1080
},
{
"epoch": 2.207594936708861,
"grad_norm": 1.3748598440301938,
"learning_rate": 5e-06,
"loss": 0.3826,
"step": 1090
},
{
"epoch": 2.2278481012658227,
"grad_norm": 1.4087618626218648,
"learning_rate": 5e-06,
"loss": 0.3918,
"step": 1100
},
{
"epoch": 2.248101265822785,
"grad_norm": 1.2293657724154228,
"learning_rate": 5e-06,
"loss": 0.3876,
"step": 1110
},
{
"epoch": 2.268354430379747,
"grad_norm": 1.2846672502227803,
"learning_rate": 5e-06,
"loss": 0.3903,
"step": 1120
},
{
"epoch": 2.2886075949367086,
"grad_norm": 1.4377466610216114,
"learning_rate": 5e-06,
"loss": 0.3914,
"step": 1130
},
{
"epoch": 2.3088607594936708,
"grad_norm": 1.4175742620767358,
"learning_rate": 5e-06,
"loss": 0.388,
"step": 1140
},
{
"epoch": 2.329113924050633,
"grad_norm": 1.5759675575520993,
"learning_rate": 5e-06,
"loss": 0.3933,
"step": 1150
},
{
"epoch": 2.349367088607595,
"grad_norm": 1.508836056446397,
"learning_rate": 5e-06,
"loss": 0.3932,
"step": 1160
},
{
"epoch": 2.369620253164557,
"grad_norm": 1.825295421372421,
"learning_rate": 5e-06,
"loss": 0.3944,
"step": 1170
},
{
"epoch": 2.389873417721519,
"grad_norm": 1.3216739690036472,
"learning_rate": 5e-06,
"loss": 0.3943,
"step": 1180
},
{
"epoch": 2.410126582278481,
"grad_norm": 1.3805684615677538,
"learning_rate": 5e-06,
"loss": 0.394,
"step": 1190
},
{
"epoch": 2.430379746835443,
"grad_norm": 1.3512854374041294,
"learning_rate": 5e-06,
"loss": 0.3923,
"step": 1200
},
{
"epoch": 2.4506329113924052,
"grad_norm": 1.295908581682868,
"learning_rate": 5e-06,
"loss": 0.3923,
"step": 1210
},
{
"epoch": 2.470886075949367,
"grad_norm": 1.2255128770109138,
"learning_rate": 5e-06,
"loss": 0.3955,
"step": 1220
},
{
"epoch": 2.491139240506329,
"grad_norm": 1.3685809804833307,
"learning_rate": 5e-06,
"loss": 0.3957,
"step": 1230
},
{
"epoch": 2.511392405063291,
"grad_norm": 1.4762862327728246,
"learning_rate": 5e-06,
"loss": 0.3999,
"step": 1240
},
{
"epoch": 2.5316455696202533,
"grad_norm": 1.3415537378198834,
"learning_rate": 5e-06,
"loss": 0.3939,
"step": 1250
},
{
"epoch": 2.5518987341772155,
"grad_norm": 1.4898621089248945,
"learning_rate": 5e-06,
"loss": 0.3895,
"step": 1260
},
{
"epoch": 2.572151898734177,
"grad_norm": 1.8618471579741513,
"learning_rate": 5e-06,
"loss": 0.3962,
"step": 1270
},
{
"epoch": 2.5924050632911393,
"grad_norm": 1.639428963357603,
"learning_rate": 5e-06,
"loss": 0.3976,
"step": 1280
},
{
"epoch": 2.6126582278481014,
"grad_norm": 1.3573195616422664,
"learning_rate": 5e-06,
"loss": 0.4009,
"step": 1290
},
{
"epoch": 2.632911392405063,
"grad_norm": 1.4054413316327985,
"learning_rate": 5e-06,
"loss": 0.3958,
"step": 1300
},
{
"epoch": 2.6531645569620252,
"grad_norm": 1.360657435359846,
"learning_rate": 5e-06,
"loss": 0.3965,
"step": 1310
},
{
"epoch": 2.6734177215189874,
"grad_norm": 1.4646728739670725,
"learning_rate": 5e-06,
"loss": 0.3997,
"step": 1320
},
{
"epoch": 2.6936708860759495,
"grad_norm": 1.2841526066493858,
"learning_rate": 5e-06,
"loss": 0.4017,
"step": 1330
},
{
"epoch": 2.7139240506329116,
"grad_norm": 1.4416140658783074,
"learning_rate": 5e-06,
"loss": 0.4,
"step": 1340
},
{
"epoch": 2.7341772151898733,
"grad_norm": 1.289169639101135,
"learning_rate": 5e-06,
"loss": 0.3997,
"step": 1350
},
{
"epoch": 2.7544303797468355,
"grad_norm": 1.2983193210908834,
"learning_rate": 5e-06,
"loss": 0.3956,
"step": 1360
},
{
"epoch": 2.7746835443037976,
"grad_norm": 1.4790452157324334,
"learning_rate": 5e-06,
"loss": 0.4022,
"step": 1370
},
{
"epoch": 2.7949367088607593,
"grad_norm": 1.3662958435730228,
"learning_rate": 5e-06,
"loss": 0.3993,
"step": 1380
},
{
"epoch": 2.8151898734177214,
"grad_norm": 1.5324942996261537,
"learning_rate": 5e-06,
"loss": 0.4018,
"step": 1390
},
{
"epoch": 2.8354430379746836,
"grad_norm": 1.363475696210741,
"learning_rate": 5e-06,
"loss": 0.3953,
"step": 1400
},
{
"epoch": 2.8556962025316457,
"grad_norm": 1.2915820963572053,
"learning_rate": 5e-06,
"loss": 0.4038,
"step": 1410
},
{
"epoch": 2.875949367088608,
"grad_norm": 1.2674230912928623,
"learning_rate": 5e-06,
"loss": 0.4001,
"step": 1420
},
{
"epoch": 2.8962025316455695,
"grad_norm": 1.2889699399012808,
"learning_rate": 5e-06,
"loss": 0.4006,
"step": 1430
},
{
"epoch": 2.9164556962025316,
"grad_norm": 1.2789428851698343,
"learning_rate": 5e-06,
"loss": 0.4026,
"step": 1440
},
{
"epoch": 2.9367088607594938,
"grad_norm": 1.386940313351875,
"learning_rate": 5e-06,
"loss": 0.4118,
"step": 1450
},
{
"epoch": 2.9569620253164555,
"grad_norm": 1.4388170101564672,
"learning_rate": 5e-06,
"loss": 0.4,
"step": 1460
},
{
"epoch": 2.9772151898734176,
"grad_norm": 1.256484387745372,
"learning_rate": 5e-06,
"loss": 0.4038,
"step": 1470
},
{
"epoch": 2.9954430379746837,
"eval_loss": 0.07278138399124146,
"eval_runtime": 507.1081,
"eval_samples_per_second": 26.235,
"eval_steps_per_second": 0.41,
"step": 1479
},
{
"epoch": 2.9954430379746837,
"step": 1479,
"total_flos": 2477170706350080.0,
"train_loss": 0.48194751488025966,
"train_runtime": 84452.6037,
"train_samples_per_second": 8.979,
"train_steps_per_second": 0.018
}
],
"logging_steps": 10,
"max_steps": 1479,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2477170706350080.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}