hp_ablations_mistral_lr1e-5 / trainer_state.json
sedrickkeh's picture
End of training
f863aac verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9954430379746837,
"eval_steps": 500,
"global_step": 1479,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.020253164556962026,
"grad_norm": 12.120276772132307,
"learning_rate": 1e-05,
"loss": 1.3197,
"step": 10
},
{
"epoch": 0.04050632911392405,
"grad_norm": 3.3282798950621606,
"learning_rate": 1e-05,
"loss": 0.7042,
"step": 20
},
{
"epoch": 0.060759493670886074,
"grad_norm": 1.852628562201542,
"learning_rate": 1e-05,
"loss": 0.6472,
"step": 30
},
{
"epoch": 0.0810126582278481,
"grad_norm": 1.8595883040832657,
"learning_rate": 1e-05,
"loss": 0.6266,
"step": 40
},
{
"epoch": 0.10126582278481013,
"grad_norm": 2.3347809060064737,
"learning_rate": 1e-05,
"loss": 0.6144,
"step": 50
},
{
"epoch": 0.12151898734177215,
"grad_norm": 1.5723585971477918,
"learning_rate": 1e-05,
"loss": 0.608,
"step": 60
},
{
"epoch": 0.14177215189873418,
"grad_norm": 1.3994357935247357,
"learning_rate": 1e-05,
"loss": 0.6034,
"step": 70
},
{
"epoch": 0.1620253164556962,
"grad_norm": 1.447049621938385,
"learning_rate": 1e-05,
"loss": 0.599,
"step": 80
},
{
"epoch": 0.18227848101265823,
"grad_norm": 1.2428690822989794,
"learning_rate": 1e-05,
"loss": 0.5981,
"step": 90
},
{
"epoch": 0.20253164556962025,
"grad_norm": 1.3914703840449831,
"learning_rate": 1e-05,
"loss": 0.6011,
"step": 100
},
{
"epoch": 0.22278481012658227,
"grad_norm": 1.5592092104237008,
"learning_rate": 1e-05,
"loss": 0.5939,
"step": 110
},
{
"epoch": 0.2430379746835443,
"grad_norm": 1.471433544307161,
"learning_rate": 1e-05,
"loss": 0.5912,
"step": 120
},
{
"epoch": 0.26329113924050634,
"grad_norm": 1.2679735338498985,
"learning_rate": 1e-05,
"loss": 0.5897,
"step": 130
},
{
"epoch": 0.28354430379746837,
"grad_norm": 1.6702226860109868,
"learning_rate": 1e-05,
"loss": 0.5788,
"step": 140
},
{
"epoch": 0.3037974683544304,
"grad_norm": 1.3067789351691594,
"learning_rate": 1e-05,
"loss": 0.588,
"step": 150
},
{
"epoch": 0.3240506329113924,
"grad_norm": 1.1703696399532784,
"learning_rate": 1e-05,
"loss": 0.5863,
"step": 160
},
{
"epoch": 0.34430379746835443,
"grad_norm": 1.121107144958954,
"learning_rate": 1e-05,
"loss": 0.588,
"step": 170
},
{
"epoch": 0.36455696202531646,
"grad_norm": 1.5075601932855025,
"learning_rate": 1e-05,
"loss": 0.5843,
"step": 180
},
{
"epoch": 0.3848101265822785,
"grad_norm": 1.338012688042743,
"learning_rate": 1e-05,
"loss": 0.5829,
"step": 190
},
{
"epoch": 0.4050632911392405,
"grad_norm": 1.1821708199327476,
"learning_rate": 1e-05,
"loss": 0.5779,
"step": 200
},
{
"epoch": 0.4253164556962025,
"grad_norm": 1.4310888733583513,
"learning_rate": 1e-05,
"loss": 0.5738,
"step": 210
},
{
"epoch": 0.44556962025316454,
"grad_norm": 1.2425719507952644,
"learning_rate": 1e-05,
"loss": 0.5793,
"step": 220
},
{
"epoch": 0.46582278481012657,
"grad_norm": 1.284207417856141,
"learning_rate": 1e-05,
"loss": 0.5789,
"step": 230
},
{
"epoch": 0.4860759493670886,
"grad_norm": 1.287170935711038,
"learning_rate": 1e-05,
"loss": 0.5747,
"step": 240
},
{
"epoch": 0.5063291139240507,
"grad_norm": 2.4058688944413764,
"learning_rate": 1e-05,
"loss": 0.5796,
"step": 250
},
{
"epoch": 0.5265822784810127,
"grad_norm": 1.6394596935536183,
"learning_rate": 1e-05,
"loss": 0.5666,
"step": 260
},
{
"epoch": 0.5468354430379747,
"grad_norm": 1.6683438715363992,
"learning_rate": 1e-05,
"loss": 0.5721,
"step": 270
},
{
"epoch": 0.5670886075949367,
"grad_norm": 1.1622746081977664,
"learning_rate": 1e-05,
"loss": 0.5649,
"step": 280
},
{
"epoch": 0.5873417721518988,
"grad_norm": 1.1921031772245787,
"learning_rate": 1e-05,
"loss": 0.566,
"step": 290
},
{
"epoch": 0.6075949367088608,
"grad_norm": 1.2152166551469124,
"learning_rate": 1e-05,
"loss": 0.5671,
"step": 300
},
{
"epoch": 0.6278481012658228,
"grad_norm": 1.502458614540582,
"learning_rate": 1e-05,
"loss": 0.5745,
"step": 310
},
{
"epoch": 0.6481012658227848,
"grad_norm": 1.3818767210076068,
"learning_rate": 1e-05,
"loss": 0.5707,
"step": 320
},
{
"epoch": 0.6683544303797468,
"grad_norm": 1.2097710361150822,
"learning_rate": 1e-05,
"loss": 0.5694,
"step": 330
},
{
"epoch": 0.6886075949367089,
"grad_norm": 1.079086895654196,
"learning_rate": 1e-05,
"loss": 0.5696,
"step": 340
},
{
"epoch": 0.7088607594936709,
"grad_norm": 1.157190965982707,
"learning_rate": 1e-05,
"loss": 0.5715,
"step": 350
},
{
"epoch": 0.7291139240506329,
"grad_norm": 1.1550319402689113,
"learning_rate": 1e-05,
"loss": 0.5743,
"step": 360
},
{
"epoch": 0.7493670886075949,
"grad_norm": 1.2470855203394164,
"learning_rate": 1e-05,
"loss": 0.5675,
"step": 370
},
{
"epoch": 0.769620253164557,
"grad_norm": 1.3106394417346152,
"learning_rate": 1e-05,
"loss": 0.5642,
"step": 380
},
{
"epoch": 0.789873417721519,
"grad_norm": 1.4375961620431188,
"learning_rate": 1e-05,
"loss": 0.5669,
"step": 390
},
{
"epoch": 0.810126582278481,
"grad_norm": 1.1354365824942318,
"learning_rate": 1e-05,
"loss": 0.5664,
"step": 400
},
{
"epoch": 0.830379746835443,
"grad_norm": 1.1099955442432055,
"learning_rate": 1e-05,
"loss": 0.5604,
"step": 410
},
{
"epoch": 0.850632911392405,
"grad_norm": 1.0248354813245797,
"learning_rate": 1e-05,
"loss": 0.5693,
"step": 420
},
{
"epoch": 0.8708860759493671,
"grad_norm": 1.1695037280013965,
"learning_rate": 1e-05,
"loss": 0.5633,
"step": 430
},
{
"epoch": 0.8911392405063291,
"grad_norm": 1.0066881046376932,
"learning_rate": 1e-05,
"loss": 0.5691,
"step": 440
},
{
"epoch": 0.9113924050632911,
"grad_norm": 1.1463091892812078,
"learning_rate": 1e-05,
"loss": 0.5603,
"step": 450
},
{
"epoch": 0.9316455696202531,
"grad_norm": 1.0788249736854203,
"learning_rate": 1e-05,
"loss": 0.5655,
"step": 460
},
{
"epoch": 0.9518987341772152,
"grad_norm": 1.239072255047288,
"learning_rate": 1e-05,
"loss": 0.5664,
"step": 470
},
{
"epoch": 0.9721518987341772,
"grad_norm": 1.0915164665788082,
"learning_rate": 1e-05,
"loss": 0.5637,
"step": 480
},
{
"epoch": 0.9924050632911392,
"grad_norm": 1.0969372897726506,
"learning_rate": 1e-05,
"loss": 0.56,
"step": 490
},
{
"epoch": 0.9984810126582279,
"eval_loss": 0.07033738493919373,
"eval_runtime": 504.4731,
"eval_samples_per_second": 26.372,
"eval_steps_per_second": 0.412,
"step": 493
},
{
"epoch": 1.0126582278481013,
"grad_norm": 1.7003665830327457,
"learning_rate": 1e-05,
"loss": 0.4975,
"step": 500
},
{
"epoch": 1.0329113924050632,
"grad_norm": 1.2635282362069415,
"learning_rate": 1e-05,
"loss": 0.4476,
"step": 510
},
{
"epoch": 1.0531645569620254,
"grad_norm": 1.4874167170265886,
"learning_rate": 1e-05,
"loss": 0.4419,
"step": 520
},
{
"epoch": 1.0734177215189873,
"grad_norm": 1.2207382792396062,
"learning_rate": 1e-05,
"loss": 0.4418,
"step": 530
},
{
"epoch": 1.0936708860759494,
"grad_norm": 1.2290216181181564,
"learning_rate": 1e-05,
"loss": 0.4368,
"step": 540
},
{
"epoch": 1.1139240506329113,
"grad_norm": 1.6678262115708307,
"learning_rate": 1e-05,
"loss": 0.4393,
"step": 550
},
{
"epoch": 1.1341772151898735,
"grad_norm": 1.0698168475317462,
"learning_rate": 1e-05,
"loss": 0.4357,
"step": 560
},
{
"epoch": 1.1544303797468354,
"grad_norm": 1.4082917170488416,
"learning_rate": 1e-05,
"loss": 0.4424,
"step": 570
},
{
"epoch": 1.1746835443037975,
"grad_norm": 1.2669419785340237,
"learning_rate": 1e-05,
"loss": 0.4415,
"step": 580
},
{
"epoch": 1.1949367088607594,
"grad_norm": 1.230550253708558,
"learning_rate": 1e-05,
"loss": 0.4357,
"step": 590
},
{
"epoch": 1.2151898734177216,
"grad_norm": 1.2542289011497672,
"learning_rate": 1e-05,
"loss": 0.4442,
"step": 600
},
{
"epoch": 1.2354430379746835,
"grad_norm": 1.110140026650854,
"learning_rate": 1e-05,
"loss": 0.4419,
"step": 610
},
{
"epoch": 1.2556962025316456,
"grad_norm": 1.1673077840894608,
"learning_rate": 1e-05,
"loss": 0.4471,
"step": 620
},
{
"epoch": 1.2759493670886077,
"grad_norm": 1.1952693191466754,
"learning_rate": 1e-05,
"loss": 0.4503,
"step": 630
},
{
"epoch": 1.2962025316455696,
"grad_norm": 1.2811688264876502,
"learning_rate": 1e-05,
"loss": 0.4504,
"step": 640
},
{
"epoch": 1.3164556962025316,
"grad_norm": 1.201361948961674,
"learning_rate": 1e-05,
"loss": 0.4546,
"step": 650
},
{
"epoch": 1.3367088607594937,
"grad_norm": 1.1153919438488649,
"learning_rate": 1e-05,
"loss": 0.444,
"step": 660
},
{
"epoch": 1.3569620253164558,
"grad_norm": 1.1049424729490929,
"learning_rate": 1e-05,
"loss": 0.4589,
"step": 670
},
{
"epoch": 1.3772151898734177,
"grad_norm": 1.1545393310939402,
"learning_rate": 1e-05,
"loss": 0.4565,
"step": 680
},
{
"epoch": 1.3974683544303796,
"grad_norm": 1.11029297429295,
"learning_rate": 1e-05,
"loss": 0.4556,
"step": 690
},
{
"epoch": 1.4177215189873418,
"grad_norm": 1.047113325677032,
"learning_rate": 1e-05,
"loss": 0.4524,
"step": 700
},
{
"epoch": 1.437974683544304,
"grad_norm": 1.0379588408596967,
"learning_rate": 1e-05,
"loss": 0.4591,
"step": 710
},
{
"epoch": 1.4582278481012658,
"grad_norm": 1.2432868288193164,
"learning_rate": 1e-05,
"loss": 0.4602,
"step": 720
},
{
"epoch": 1.4784810126582277,
"grad_norm": 1.125133727987033,
"learning_rate": 1e-05,
"loss": 0.4638,
"step": 730
},
{
"epoch": 1.4987341772151899,
"grad_norm": 1.1243180240639854,
"learning_rate": 1e-05,
"loss": 0.4553,
"step": 740
},
{
"epoch": 1.518987341772152,
"grad_norm": 1.2646245667315335,
"learning_rate": 1e-05,
"loss": 0.458,
"step": 750
},
{
"epoch": 1.539240506329114,
"grad_norm": 1.0026618924941482,
"learning_rate": 1e-05,
"loss": 0.4591,
"step": 760
},
{
"epoch": 1.5594936708860758,
"grad_norm": 1.119323006377659,
"learning_rate": 1e-05,
"loss": 0.4589,
"step": 770
},
{
"epoch": 1.579746835443038,
"grad_norm": 1.2367498859475263,
"learning_rate": 1e-05,
"loss": 0.4596,
"step": 780
},
{
"epoch": 1.6,
"grad_norm": 1.0563847142257616,
"learning_rate": 1e-05,
"loss": 0.4613,
"step": 790
},
{
"epoch": 1.620253164556962,
"grad_norm": 1.2639836828415751,
"learning_rate": 1e-05,
"loss": 0.458,
"step": 800
},
{
"epoch": 1.640506329113924,
"grad_norm": 1.015054309602122,
"learning_rate": 1e-05,
"loss": 0.4518,
"step": 810
},
{
"epoch": 1.660759493670886,
"grad_norm": 1.0938189623403691,
"learning_rate": 1e-05,
"loss": 0.4609,
"step": 820
},
{
"epoch": 1.6810126582278482,
"grad_norm": 1.1944856866424804,
"learning_rate": 1e-05,
"loss": 0.4572,
"step": 830
},
{
"epoch": 1.70126582278481,
"grad_norm": 0.9913091951101142,
"learning_rate": 1e-05,
"loss": 0.4592,
"step": 840
},
{
"epoch": 1.721518987341772,
"grad_norm": 1.0583438868425272,
"learning_rate": 1e-05,
"loss": 0.4614,
"step": 850
},
{
"epoch": 1.7417721518987341,
"grad_norm": 1.0753652786030234,
"learning_rate": 1e-05,
"loss": 0.462,
"step": 860
},
{
"epoch": 1.7620253164556963,
"grad_norm": 1.0641685034731208,
"learning_rate": 1e-05,
"loss": 0.4656,
"step": 870
},
{
"epoch": 1.7822784810126582,
"grad_norm": 1.1115091053423183,
"learning_rate": 1e-05,
"loss": 0.4669,
"step": 880
},
{
"epoch": 1.80253164556962,
"grad_norm": 1.1762455233061868,
"learning_rate": 1e-05,
"loss": 0.4643,
"step": 890
},
{
"epoch": 1.8227848101265822,
"grad_norm": 1.0488806003684397,
"learning_rate": 1e-05,
"loss": 0.4634,
"step": 900
},
{
"epoch": 1.8430379746835444,
"grad_norm": 1.137548743512461,
"learning_rate": 1e-05,
"loss": 0.4653,
"step": 910
},
{
"epoch": 1.8632911392405065,
"grad_norm": 1.2277896309279472,
"learning_rate": 1e-05,
"loss": 0.4642,
"step": 920
},
{
"epoch": 1.8835443037974684,
"grad_norm": 1.1031182132626522,
"learning_rate": 1e-05,
"loss": 0.4617,
"step": 930
},
{
"epoch": 1.9037974683544303,
"grad_norm": 1.17361252748904,
"learning_rate": 1e-05,
"loss": 0.4645,
"step": 940
},
{
"epoch": 1.9240506329113924,
"grad_norm": 1.1510248983778957,
"learning_rate": 1e-05,
"loss": 0.4658,
"step": 950
},
{
"epoch": 1.9443037974683546,
"grad_norm": 0.9489143733406166,
"learning_rate": 1e-05,
"loss": 0.4695,
"step": 960
},
{
"epoch": 1.9645569620253165,
"grad_norm": 1.03558420599709,
"learning_rate": 1e-05,
"loss": 0.4675,
"step": 970
},
{
"epoch": 1.9848101265822784,
"grad_norm": 0.9879031581948011,
"learning_rate": 1e-05,
"loss": 0.4685,
"step": 980
},
{
"epoch": 1.998987341772152,
"eval_loss": 0.07146137207746506,
"eval_runtime": 505.4639,
"eval_samples_per_second": 26.32,
"eval_steps_per_second": 0.412,
"step": 987
},
{
"epoch": 2.0050632911392405,
"grad_norm": 1.892066865726991,
"learning_rate": 1e-05,
"loss": 0.4323,
"step": 990
},
{
"epoch": 2.0253164556962027,
"grad_norm": 1.5719039003024942,
"learning_rate": 1e-05,
"loss": 0.3341,
"step": 1000
},
{
"epoch": 2.0455696202531644,
"grad_norm": 1.449884706175893,
"learning_rate": 1e-05,
"loss": 0.3244,
"step": 1010
},
{
"epoch": 2.0658227848101265,
"grad_norm": 1.2852913794473613,
"learning_rate": 1e-05,
"loss": 0.3184,
"step": 1020
},
{
"epoch": 2.0860759493670886,
"grad_norm": 1.1603083163194567,
"learning_rate": 1e-05,
"loss": 0.3194,
"step": 1030
},
{
"epoch": 2.1063291139240508,
"grad_norm": 1.3069767179205787,
"learning_rate": 1e-05,
"loss": 0.3174,
"step": 1040
},
{
"epoch": 2.1265822784810124,
"grad_norm": 1.6903160262293084,
"learning_rate": 1e-05,
"loss": 0.3271,
"step": 1050
},
{
"epoch": 2.1468354430379746,
"grad_norm": 1.1612363707549425,
"learning_rate": 1e-05,
"loss": 0.3226,
"step": 1060
},
{
"epoch": 2.1670886075949367,
"grad_norm": 1.2336974410905899,
"learning_rate": 1e-05,
"loss": 0.3272,
"step": 1070
},
{
"epoch": 2.187341772151899,
"grad_norm": 1.4860870096822676,
"learning_rate": 1e-05,
"loss": 0.3254,
"step": 1080
},
{
"epoch": 2.207594936708861,
"grad_norm": 1.2327704954496135,
"learning_rate": 1e-05,
"loss": 0.3232,
"step": 1090
},
{
"epoch": 2.2278481012658227,
"grad_norm": 1.2728744826855773,
"learning_rate": 1e-05,
"loss": 0.3303,
"step": 1100
},
{
"epoch": 2.248101265822785,
"grad_norm": 1.564435586204242,
"learning_rate": 1e-05,
"loss": 0.3268,
"step": 1110
},
{
"epoch": 2.268354430379747,
"grad_norm": 1.1796512878110514,
"learning_rate": 1e-05,
"loss": 0.3295,
"step": 1120
},
{
"epoch": 2.2886075949367086,
"grad_norm": 1.3761953620771636,
"learning_rate": 1e-05,
"loss": 0.3314,
"step": 1130
},
{
"epoch": 2.3088607594936708,
"grad_norm": 1.2084195608367814,
"learning_rate": 1e-05,
"loss": 0.3281,
"step": 1140
},
{
"epoch": 2.329113924050633,
"grad_norm": 1.3640182632104951,
"learning_rate": 1e-05,
"loss": 0.3331,
"step": 1150
},
{
"epoch": 2.349367088607595,
"grad_norm": 1.2183639117868437,
"learning_rate": 1e-05,
"loss": 0.3338,
"step": 1160
},
{
"epoch": 2.369620253164557,
"grad_norm": 1.5867826943310064,
"learning_rate": 1e-05,
"loss": 0.3345,
"step": 1170
},
{
"epoch": 2.389873417721519,
"grad_norm": 1.671471525105439,
"learning_rate": 1e-05,
"loss": 0.3349,
"step": 1180
},
{
"epoch": 2.410126582278481,
"grad_norm": 1.3298853522150373,
"learning_rate": 1e-05,
"loss": 0.3358,
"step": 1190
},
{
"epoch": 2.430379746835443,
"grad_norm": 1.2730109312885425,
"learning_rate": 1e-05,
"loss": 0.3343,
"step": 1200
},
{
"epoch": 2.4506329113924052,
"grad_norm": 1.2457421709970629,
"learning_rate": 1e-05,
"loss": 0.3346,
"step": 1210
},
{
"epoch": 2.470886075949367,
"grad_norm": 1.3431855699425361,
"learning_rate": 1e-05,
"loss": 0.339,
"step": 1220
},
{
"epoch": 2.491139240506329,
"grad_norm": 1.2603843195755815,
"learning_rate": 1e-05,
"loss": 0.3403,
"step": 1230
},
{
"epoch": 2.511392405063291,
"grad_norm": 1.4062229291125676,
"learning_rate": 1e-05,
"loss": 0.344,
"step": 1240
},
{
"epoch": 2.5316455696202533,
"grad_norm": 1.322597016772209,
"learning_rate": 1e-05,
"loss": 0.3385,
"step": 1250
},
{
"epoch": 2.5518987341772155,
"grad_norm": 1.373766133387267,
"learning_rate": 1e-05,
"loss": 0.3362,
"step": 1260
},
{
"epoch": 2.572151898734177,
"grad_norm": 1.2344056168917583,
"learning_rate": 1e-05,
"loss": 0.3422,
"step": 1270
},
{
"epoch": 2.5924050632911393,
"grad_norm": 1.2900507739859943,
"learning_rate": 1e-05,
"loss": 0.3434,
"step": 1280
},
{
"epoch": 2.6126582278481014,
"grad_norm": 1.1094628286667152,
"learning_rate": 1e-05,
"loss": 0.3491,
"step": 1290
},
{
"epoch": 2.632911392405063,
"grad_norm": 1.2155731176288158,
"learning_rate": 1e-05,
"loss": 0.3441,
"step": 1300
},
{
"epoch": 2.6531645569620252,
"grad_norm": 1.1792690382009903,
"learning_rate": 1e-05,
"loss": 0.3449,
"step": 1310
},
{
"epoch": 2.6734177215189874,
"grad_norm": 1.1807087130030214,
"learning_rate": 1e-05,
"loss": 0.3478,
"step": 1320
},
{
"epoch": 2.6936708860759495,
"grad_norm": 1.304918179872075,
"learning_rate": 1e-05,
"loss": 0.3492,
"step": 1330
},
{
"epoch": 2.7139240506329116,
"grad_norm": 1.2726713376838257,
"learning_rate": 1e-05,
"loss": 0.349,
"step": 1340
},
{
"epoch": 2.7341772151898733,
"grad_norm": 1.3016244218498019,
"learning_rate": 1e-05,
"loss": 0.3494,
"step": 1350
},
{
"epoch": 2.7544303797468355,
"grad_norm": 1.2698985747702052,
"learning_rate": 1e-05,
"loss": 0.3455,
"step": 1360
},
{
"epoch": 2.7746835443037976,
"grad_norm": 1.2610899461992577,
"learning_rate": 1e-05,
"loss": 0.3513,
"step": 1370
},
{
"epoch": 2.7949367088607593,
"grad_norm": 1.3344761420732612,
"learning_rate": 1e-05,
"loss": 0.349,
"step": 1380
},
{
"epoch": 2.8151898734177214,
"grad_norm": 1.2470207135941664,
"learning_rate": 1e-05,
"loss": 0.3518,
"step": 1390
},
{
"epoch": 2.8354430379746836,
"grad_norm": 1.2318168043565298,
"learning_rate": 1e-05,
"loss": 0.3463,
"step": 1400
},
{
"epoch": 2.8556962025316457,
"grad_norm": 1.2597107398114282,
"learning_rate": 1e-05,
"loss": 0.3542,
"step": 1410
},
{
"epoch": 2.875949367088608,
"grad_norm": 1.2248062173263479,
"learning_rate": 1e-05,
"loss": 0.3516,
"step": 1420
},
{
"epoch": 2.8962025316455695,
"grad_norm": 1.3227956688116478,
"learning_rate": 1e-05,
"loss": 0.3518,
"step": 1430
},
{
"epoch": 2.9164556962025316,
"grad_norm": 1.2776070215734827,
"learning_rate": 1e-05,
"loss": 0.3535,
"step": 1440
},
{
"epoch": 2.9367088607594938,
"grad_norm": 1.154652010117705,
"learning_rate": 1e-05,
"loss": 0.3621,
"step": 1450
},
{
"epoch": 2.9569620253164555,
"grad_norm": 1.1923022485239954,
"learning_rate": 1e-05,
"loss": 0.3521,
"step": 1460
},
{
"epoch": 2.9772151898734176,
"grad_norm": 1.2357715823045246,
"learning_rate": 1e-05,
"loss": 0.356,
"step": 1470
},
{
"epoch": 2.9954430379746837,
"eval_loss": 0.07828417420387268,
"eval_runtime": 506.1814,
"eval_samples_per_second": 26.283,
"eval_steps_per_second": 0.411,
"step": 1479
},
{
"epoch": 2.9954430379746837,
"step": 1479,
"total_flos": 2477170706350080.0,
"train_loss": 0.4641323921404794,
"train_runtime": 83870.3724,
"train_samples_per_second": 9.041,
"train_steps_per_second": 0.018
}
],
"logging_steps": 10,
"max_steps": 1479,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2477170706350080.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}