Affine-top_v2 / trainer_state.json
void-818's picture
Add 16 files
e848b1d verified
{
"best_global_step": 1500,
"best_metric": 0.022791102528572083,
"best_model_checkpoint": "./trained_model_20251223_144504/checkpoint-1500",
"epoch": 1.0838301716350496,
"eval_steps": 500,
"global_step": 1500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.007226738934056007,
"grad_norm": 1.7734375,
"learning_rate": 1.44e-06,
"loss": 0.0577,
"step": 10
},
{
"epoch": 0.014453477868112014,
"grad_norm": 1.3828125,
"learning_rate": 3.04e-06,
"loss": 0.0498,
"step": 20
},
{
"epoch": 0.02168021680216802,
"grad_norm": 0.890625,
"learning_rate": 4.6400000000000005e-06,
"loss": 0.0334,
"step": 30
},
{
"epoch": 0.028906955736224028,
"grad_norm": 0.458984375,
"learning_rate": 6.24e-06,
"loss": 0.0249,
"step": 40
},
{
"epoch": 0.036133694670280034,
"grad_norm": 0.2578125,
"learning_rate": 7.840000000000001e-06,
"loss": 0.0194,
"step": 50
},
{
"epoch": 0.04336043360433604,
"grad_norm": 0.333984375,
"learning_rate": 9.440000000000001e-06,
"loss": 0.0193,
"step": 60
},
{
"epoch": 0.05058717253839205,
"grad_norm": 0.380859375,
"learning_rate": 1.1040000000000001e-05,
"loss": 0.0274,
"step": 70
},
{
"epoch": 0.057813911472448055,
"grad_norm": 0.69921875,
"learning_rate": 1.2640000000000001e-05,
"loss": 0.0188,
"step": 80
},
{
"epoch": 0.06504065040650407,
"grad_norm": 0.271484375,
"learning_rate": 1.4240000000000001e-05,
"loss": 0.0194,
"step": 90
},
{
"epoch": 0.07226738934056007,
"grad_norm": 0.4375,
"learning_rate": 1.584e-05,
"loss": 0.0188,
"step": 100
},
{
"epoch": 0.07949412827461608,
"grad_norm": 0.265625,
"learning_rate": 1.7440000000000002e-05,
"loss": 0.018,
"step": 110
},
{
"epoch": 0.08672086720867209,
"grad_norm": 0.294921875,
"learning_rate": 1.904e-05,
"loss": 0.019,
"step": 120
},
{
"epoch": 0.0939476061427281,
"grad_norm": 0.34375,
"learning_rate": 1.99801340948597e-05,
"loss": 0.0193,
"step": 130
},
{
"epoch": 0.1011743450767841,
"grad_norm": 0.5546875,
"learning_rate": 1.9930469332008943e-05,
"loss": 0.0184,
"step": 140
},
{
"epoch": 0.10840108401084012,
"grad_norm": 0.3046875,
"learning_rate": 1.9880804569158184e-05,
"loss": 0.0184,
"step": 150
},
{
"epoch": 0.11562782294489611,
"grad_norm": 0.2216796875,
"learning_rate": 1.9831139806307428e-05,
"loss": 0.019,
"step": 160
},
{
"epoch": 0.12285456187895212,
"grad_norm": 0.8671875,
"learning_rate": 1.9781475043456668e-05,
"loss": 0.0217,
"step": 170
},
{
"epoch": 0.13008130081300814,
"grad_norm": 0.369140625,
"learning_rate": 1.9731810280605912e-05,
"loss": 0.0187,
"step": 180
},
{
"epoch": 0.13730803974706413,
"grad_norm": 0.271484375,
"learning_rate": 1.9682145517755153e-05,
"loss": 0.029,
"step": 190
},
{
"epoch": 0.14453477868112014,
"grad_norm": 0.40234375,
"learning_rate": 1.9632480754904396e-05,
"loss": 0.0268,
"step": 200
},
{
"epoch": 0.15176151761517614,
"grad_norm": 0.330078125,
"learning_rate": 1.9582815992053637e-05,
"loss": 0.019,
"step": 210
},
{
"epoch": 0.15898825654923215,
"grad_norm": 0.73828125,
"learning_rate": 1.953315122920288e-05,
"loss": 0.0295,
"step": 220
},
{
"epoch": 0.16621499548328816,
"grad_norm": 0.5390625,
"learning_rate": 1.9483486466352125e-05,
"loss": 0.0224,
"step": 230
},
{
"epoch": 0.17344173441734417,
"grad_norm": 0.419921875,
"learning_rate": 1.943382170350137e-05,
"loss": 0.0193,
"step": 240
},
{
"epoch": 0.18066847335140018,
"grad_norm": 0.396484375,
"learning_rate": 1.938415694065061e-05,
"loss": 0.0188,
"step": 250
},
{
"epoch": 0.1878952122854562,
"grad_norm": 0.494140625,
"learning_rate": 1.9334492177799853e-05,
"loss": 0.0211,
"step": 260
},
{
"epoch": 0.1951219512195122,
"grad_norm": 0.46875,
"learning_rate": 1.9284827414949094e-05,
"loss": 0.0183,
"step": 270
},
{
"epoch": 0.2023486901535682,
"grad_norm": 0.404296875,
"learning_rate": 1.9235162652098338e-05,
"loss": 0.0197,
"step": 280
},
{
"epoch": 0.20957542908762422,
"grad_norm": 0.384765625,
"learning_rate": 1.9185497889247578e-05,
"loss": 0.0174,
"step": 290
},
{
"epoch": 0.21680216802168023,
"grad_norm": 0.333984375,
"learning_rate": 1.9135833126396822e-05,
"loss": 0.019,
"step": 300
},
{
"epoch": 0.2240289069557362,
"grad_norm": 0.28125,
"learning_rate": 1.9086168363546066e-05,
"loss": 0.0239,
"step": 310
},
{
"epoch": 0.23125564588979222,
"grad_norm": 0.328125,
"learning_rate": 1.903650360069531e-05,
"loss": 0.0234,
"step": 320
},
{
"epoch": 0.23848238482384823,
"grad_norm": 0.419921875,
"learning_rate": 1.898683883784455e-05,
"loss": 0.0195,
"step": 330
},
{
"epoch": 0.24570912375790424,
"grad_norm": 0.33203125,
"learning_rate": 1.8937174074993794e-05,
"loss": 0.0191,
"step": 340
},
{
"epoch": 0.2529358626919603,
"grad_norm": 0.2734375,
"learning_rate": 1.8887509312143035e-05,
"loss": 0.0186,
"step": 350
},
{
"epoch": 0.2601626016260163,
"grad_norm": 0.64453125,
"learning_rate": 1.883784454929228e-05,
"loss": 0.018,
"step": 360
},
{
"epoch": 0.26738934056007224,
"grad_norm": 0.25390625,
"learning_rate": 1.878817978644152e-05,
"loss": 0.0217,
"step": 370
},
{
"epoch": 0.27461607949412825,
"grad_norm": 0.384765625,
"learning_rate": 1.8738515023590763e-05,
"loss": 0.0182,
"step": 380
},
{
"epoch": 0.28184281842818426,
"grad_norm": 1.1015625,
"learning_rate": 1.8688850260740007e-05,
"loss": 0.0257,
"step": 390
},
{
"epoch": 0.28906955736224027,
"grad_norm": 0.30078125,
"learning_rate": 1.8639185497889248e-05,
"loss": 0.0437,
"step": 400
},
{
"epoch": 0.2962962962962963,
"grad_norm": 0.42578125,
"learning_rate": 1.8589520735038492e-05,
"loss": 0.0607,
"step": 410
},
{
"epoch": 0.3035230352303523,
"grad_norm": 0.267578125,
"learning_rate": 1.8539855972187736e-05,
"loss": 0.0187,
"step": 420
},
{
"epoch": 0.3107497741644083,
"grad_norm": 0.20703125,
"learning_rate": 1.8490191209336976e-05,
"loss": 0.0199,
"step": 430
},
{
"epoch": 0.3179765130984643,
"grad_norm": 0.5234375,
"learning_rate": 1.844052644648622e-05,
"loss": 0.0181,
"step": 440
},
{
"epoch": 0.3252032520325203,
"grad_norm": 0.296875,
"learning_rate": 1.839086168363546e-05,
"loss": 0.0199,
"step": 450
},
{
"epoch": 0.3324299909665763,
"grad_norm": 0.3203125,
"learning_rate": 1.8341196920784705e-05,
"loss": 0.0223,
"step": 460
},
{
"epoch": 0.33965672990063234,
"grad_norm": 0.3125,
"learning_rate": 1.8291532157933945e-05,
"loss": 0.018,
"step": 470
},
{
"epoch": 0.34688346883468835,
"grad_norm": 1.0703125,
"learning_rate": 1.824186739508319e-05,
"loss": 0.0398,
"step": 480
},
{
"epoch": 0.35411020776874436,
"grad_norm": 0.357421875,
"learning_rate": 1.8192202632232433e-05,
"loss": 0.0286,
"step": 490
},
{
"epoch": 0.36133694670280037,
"grad_norm": 0.3125,
"learning_rate": 1.8142537869381677e-05,
"loss": 0.0193,
"step": 500
},
{
"epoch": 0.36133694670280037,
"eval_loss": 0.023125287145376205,
"eval_runtime": 43.3724,
"eval_samples_per_second": 26.883,
"eval_steps_per_second": 6.732,
"step": 500
},
{
"epoch": 0.3685636856368564,
"grad_norm": 0.470703125,
"learning_rate": 1.8092873106530917e-05,
"loss": 0.0202,
"step": 510
},
{
"epoch": 0.3757904245709124,
"grad_norm": 0.328125,
"learning_rate": 1.804320834368016e-05,
"loss": 0.0179,
"step": 520
},
{
"epoch": 0.3830171635049684,
"grad_norm": 0.337890625,
"learning_rate": 1.7993543580829402e-05,
"loss": 0.0178,
"step": 530
},
{
"epoch": 0.3902439024390244,
"grad_norm": 0.2490234375,
"learning_rate": 1.7943878817978646e-05,
"loss": 0.0187,
"step": 540
},
{
"epoch": 0.3974706413730804,
"grad_norm": 0.39453125,
"learning_rate": 1.7894214055127886e-05,
"loss": 0.0194,
"step": 550
},
{
"epoch": 0.4046973803071364,
"grad_norm": 0.38671875,
"learning_rate": 1.784454929227713e-05,
"loss": 0.0288,
"step": 560
},
{
"epoch": 0.41192411924119243,
"grad_norm": 1.0,
"learning_rate": 1.7794884529426374e-05,
"loss": 0.0215,
"step": 570
},
{
"epoch": 0.41915085817524844,
"grad_norm": 0.6484375,
"learning_rate": 1.7745219766575618e-05,
"loss": 0.0185,
"step": 580
},
{
"epoch": 0.42637759710930445,
"grad_norm": 0.34375,
"learning_rate": 1.769555500372486e-05,
"loss": 0.0197,
"step": 590
},
{
"epoch": 0.43360433604336046,
"grad_norm": 0.458984375,
"learning_rate": 1.7645890240874102e-05,
"loss": 0.0191,
"step": 600
},
{
"epoch": 0.4408310749774164,
"grad_norm": 1.015625,
"learning_rate": 1.7596225478023343e-05,
"loss": 0.0218,
"step": 610
},
{
"epoch": 0.4480578139114724,
"grad_norm": 0.416015625,
"learning_rate": 1.7546560715172587e-05,
"loss": 0.0199,
"step": 620
},
{
"epoch": 0.45528455284552843,
"grad_norm": 0.80078125,
"learning_rate": 1.7496895952321827e-05,
"loss": 0.0256,
"step": 630
},
{
"epoch": 0.46251129177958444,
"grad_norm": 0.46484375,
"learning_rate": 1.744723118947107e-05,
"loss": 0.0195,
"step": 640
},
{
"epoch": 0.46973803071364045,
"grad_norm": 0.54296875,
"learning_rate": 1.7397566426620315e-05,
"loss": 0.0191,
"step": 650
},
{
"epoch": 0.47696476964769646,
"grad_norm": 0.294921875,
"learning_rate": 1.7347901663769556e-05,
"loss": 0.0194,
"step": 660
},
{
"epoch": 0.48419150858175247,
"grad_norm": 0.41796875,
"learning_rate": 1.72982369009188e-05,
"loss": 0.0182,
"step": 670
},
{
"epoch": 0.4914182475158085,
"grad_norm": 0.484375,
"learning_rate": 1.7248572138068044e-05,
"loss": 0.0188,
"step": 680
},
{
"epoch": 0.4986449864498645,
"grad_norm": 0.4765625,
"learning_rate": 1.7198907375217284e-05,
"loss": 0.02,
"step": 690
},
{
"epoch": 0.5058717253839206,
"grad_norm": 0.2578125,
"learning_rate": 1.7149242612366528e-05,
"loss": 0.0191,
"step": 700
},
{
"epoch": 0.5130984643179766,
"grad_norm": 0.416015625,
"learning_rate": 1.709957784951577e-05,
"loss": 0.0197,
"step": 710
},
{
"epoch": 0.5203252032520326,
"grad_norm": 0.388671875,
"learning_rate": 1.7049913086665013e-05,
"loss": 0.02,
"step": 720
},
{
"epoch": 0.5275519421860885,
"grad_norm": 1.2265625,
"learning_rate": 1.7000248323814253e-05,
"loss": 0.0233,
"step": 730
},
{
"epoch": 0.5347786811201445,
"grad_norm": 0.400390625,
"learning_rate": 1.6950583560963497e-05,
"loss": 0.0175,
"step": 740
},
{
"epoch": 0.5420054200542005,
"grad_norm": 0.416015625,
"learning_rate": 1.690091879811274e-05,
"loss": 0.019,
"step": 750
},
{
"epoch": 0.5492321589882565,
"grad_norm": 0.306640625,
"learning_rate": 1.6851254035261985e-05,
"loss": 0.0306,
"step": 760
},
{
"epoch": 0.5564588979223125,
"grad_norm": 0.2333984375,
"learning_rate": 1.6801589272411225e-05,
"loss": 0.0187,
"step": 770
},
{
"epoch": 0.5636856368563685,
"grad_norm": 0.2060546875,
"learning_rate": 1.675192450956047e-05,
"loss": 0.0184,
"step": 780
},
{
"epoch": 0.5709123757904245,
"grad_norm": 1.0390625,
"learning_rate": 1.670225974670971e-05,
"loss": 0.0242,
"step": 790
},
{
"epoch": 0.5781391147244805,
"grad_norm": 0.49609375,
"learning_rate": 1.6652594983858954e-05,
"loss": 0.0228,
"step": 800
},
{
"epoch": 0.5853658536585366,
"grad_norm": 0.3828125,
"learning_rate": 1.6602930221008194e-05,
"loss": 0.0197,
"step": 810
},
{
"epoch": 0.5925925925925926,
"grad_norm": 0.4453125,
"learning_rate": 1.6553265458157438e-05,
"loss": 0.0192,
"step": 820
},
{
"epoch": 0.5998193315266486,
"grad_norm": 0.310546875,
"learning_rate": 1.6503600695306682e-05,
"loss": 0.0176,
"step": 830
},
{
"epoch": 0.6070460704607046,
"grad_norm": 0.337890625,
"learning_rate": 1.6453935932455926e-05,
"loss": 0.0228,
"step": 840
},
{
"epoch": 0.6142728093947606,
"grad_norm": 0.3984375,
"learning_rate": 1.6404271169605167e-05,
"loss": 0.0187,
"step": 850
},
{
"epoch": 0.6214995483288166,
"grad_norm": 0.34765625,
"learning_rate": 1.635460640675441e-05,
"loss": 0.0191,
"step": 860
},
{
"epoch": 0.6287262872628726,
"grad_norm": 0.421875,
"learning_rate": 1.630494164390365e-05,
"loss": 0.0182,
"step": 870
},
{
"epoch": 0.6359530261969286,
"grad_norm": 0.3359375,
"learning_rate": 1.6255276881052895e-05,
"loss": 0.019,
"step": 880
},
{
"epoch": 0.6431797651309846,
"grad_norm": 0.353515625,
"learning_rate": 1.6205612118202136e-05,
"loss": 0.0192,
"step": 890
},
{
"epoch": 0.6504065040650406,
"grad_norm": 0.357421875,
"learning_rate": 1.615594735535138e-05,
"loss": 0.0199,
"step": 900
},
{
"epoch": 0.6576332429990966,
"grad_norm": 0.357421875,
"learning_rate": 1.610628259250062e-05,
"loss": 0.0182,
"step": 910
},
{
"epoch": 0.6648599819331527,
"grad_norm": 0.36328125,
"learning_rate": 1.6056617829649864e-05,
"loss": 0.0176,
"step": 920
},
{
"epoch": 0.6720867208672087,
"grad_norm": 0.328125,
"learning_rate": 1.6006953066799108e-05,
"loss": 0.0197,
"step": 930
},
{
"epoch": 0.6793134598012647,
"grad_norm": 0.4296875,
"learning_rate": 1.5957288303948352e-05,
"loss": 0.0188,
"step": 940
},
{
"epoch": 0.6865401987353207,
"grad_norm": 0.345703125,
"learning_rate": 1.5907623541097592e-05,
"loss": 0.0196,
"step": 950
},
{
"epoch": 0.6937669376693767,
"grad_norm": 0.44921875,
"learning_rate": 1.5857958778246836e-05,
"loss": 0.019,
"step": 960
},
{
"epoch": 0.7009936766034327,
"grad_norm": 0.37109375,
"learning_rate": 1.5808294015396077e-05,
"loss": 0.0287,
"step": 970
},
{
"epoch": 0.7082204155374887,
"grad_norm": 0.44921875,
"learning_rate": 1.575862925254532e-05,
"loss": 0.0193,
"step": 980
},
{
"epoch": 0.7154471544715447,
"grad_norm": 0.28515625,
"learning_rate": 1.570896448969456e-05,
"loss": 0.0185,
"step": 990
},
{
"epoch": 0.7226738934056007,
"grad_norm": 0.5625,
"learning_rate": 1.5659299726843805e-05,
"loss": 0.0181,
"step": 1000
},
{
"epoch": 0.7226738934056007,
"eval_loss": 0.02289458177983761,
"eval_runtime": 43.4716,
"eval_samples_per_second": 26.822,
"eval_steps_per_second": 6.717,
"step": 1000
},
{
"epoch": 0.7299006323396567,
"grad_norm": 0.314453125,
"learning_rate": 1.560963496399305e-05,
"loss": 0.0181,
"step": 1010
},
{
"epoch": 0.7371273712737128,
"grad_norm": 0.333984375,
"learning_rate": 1.5559970201142293e-05,
"loss": 0.0517,
"step": 1020
},
{
"epoch": 0.7443541102077688,
"grad_norm": 0.43359375,
"learning_rate": 1.5510305438291533e-05,
"loss": 0.0314,
"step": 1030
},
{
"epoch": 0.7515808491418248,
"grad_norm": 0.60546875,
"learning_rate": 1.5460640675440777e-05,
"loss": 0.0185,
"step": 1040
},
{
"epoch": 0.7588075880758808,
"grad_norm": 0.54296875,
"learning_rate": 1.5410975912590018e-05,
"loss": 0.0183,
"step": 1050
},
{
"epoch": 0.7660343270099368,
"grad_norm": 0.30078125,
"learning_rate": 1.5361311149739262e-05,
"loss": 0.0193,
"step": 1060
},
{
"epoch": 0.7732610659439928,
"grad_norm": 0.322265625,
"learning_rate": 1.5311646386888502e-05,
"loss": 0.0185,
"step": 1070
},
{
"epoch": 0.7804878048780488,
"grad_norm": 0.375,
"learning_rate": 1.5261981624037746e-05,
"loss": 0.0187,
"step": 1080
},
{
"epoch": 0.7877145438121048,
"grad_norm": 0.373046875,
"learning_rate": 1.5212316861186989e-05,
"loss": 0.0176,
"step": 1090
},
{
"epoch": 0.7949412827461608,
"grad_norm": 0.328125,
"learning_rate": 1.5162652098336232e-05,
"loss": 0.0189,
"step": 1100
},
{
"epoch": 0.8021680216802168,
"grad_norm": 0.333984375,
"learning_rate": 1.5112987335485475e-05,
"loss": 0.0189,
"step": 1110
},
{
"epoch": 0.8093947606142728,
"grad_norm": 0.248046875,
"learning_rate": 1.5063322572634717e-05,
"loss": 0.0174,
"step": 1120
},
{
"epoch": 0.8166214995483289,
"grad_norm": 0.2216796875,
"learning_rate": 1.5013657809783959e-05,
"loss": 0.018,
"step": 1130
},
{
"epoch": 0.8238482384823849,
"grad_norm": 0.37109375,
"learning_rate": 1.4963993046933203e-05,
"loss": 0.0178,
"step": 1140
},
{
"epoch": 0.8310749774164409,
"grad_norm": 0.28125,
"learning_rate": 1.4914328284082444e-05,
"loss": 0.0165,
"step": 1150
},
{
"epoch": 0.8383017163504969,
"grad_norm": 0.35546875,
"learning_rate": 1.4864663521231688e-05,
"loss": 0.0238,
"step": 1160
},
{
"epoch": 0.8455284552845529,
"grad_norm": 0.265625,
"learning_rate": 1.481499875838093e-05,
"loss": 0.0195,
"step": 1170
},
{
"epoch": 0.8527551942186089,
"grad_norm": 2.46875,
"learning_rate": 1.4765333995530174e-05,
"loss": 0.0424,
"step": 1180
},
{
"epoch": 0.8599819331526649,
"grad_norm": 0.51953125,
"learning_rate": 1.4715669232679414e-05,
"loss": 0.0203,
"step": 1190
},
{
"epoch": 0.8672086720867209,
"grad_norm": 0.47265625,
"learning_rate": 1.4666004469828658e-05,
"loss": 0.0181,
"step": 1200
},
{
"epoch": 0.8744354110207768,
"grad_norm": 0.451171875,
"learning_rate": 1.46163397069779e-05,
"loss": 0.0189,
"step": 1210
},
{
"epoch": 0.8816621499548328,
"grad_norm": 0.380859375,
"learning_rate": 1.4566674944127144e-05,
"loss": 0.0322,
"step": 1220
},
{
"epoch": 0.8888888888888888,
"grad_norm": 0.298828125,
"learning_rate": 1.4517010181276385e-05,
"loss": 0.019,
"step": 1230
},
{
"epoch": 0.8961156278229448,
"grad_norm": 0.248046875,
"learning_rate": 1.4467345418425629e-05,
"loss": 0.0253,
"step": 1240
},
{
"epoch": 0.9033423667570009,
"grad_norm": 0.39453125,
"learning_rate": 1.4417680655574871e-05,
"loss": 0.018,
"step": 1250
},
{
"epoch": 0.9105691056910569,
"grad_norm": 0.345703125,
"learning_rate": 1.4368015892724115e-05,
"loss": 0.0217,
"step": 1260
},
{
"epoch": 0.9177958446251129,
"grad_norm": 0.330078125,
"learning_rate": 1.4318351129873355e-05,
"loss": 0.0336,
"step": 1270
},
{
"epoch": 0.9250225835591689,
"grad_norm": 0.267578125,
"learning_rate": 1.42686863670226e-05,
"loss": 0.0219,
"step": 1280
},
{
"epoch": 0.9322493224932249,
"grad_norm": 0.65234375,
"learning_rate": 1.4219021604171842e-05,
"loss": 0.0212,
"step": 1290
},
{
"epoch": 0.9394760614272809,
"grad_norm": 0.259765625,
"learning_rate": 1.4169356841321085e-05,
"loss": 0.0172,
"step": 1300
},
{
"epoch": 0.9467028003613369,
"grad_norm": 0.61328125,
"learning_rate": 1.4119692078470326e-05,
"loss": 0.0187,
"step": 1310
},
{
"epoch": 0.9539295392953929,
"grad_norm": 0.3671875,
"learning_rate": 1.407002731561957e-05,
"loss": 0.0179,
"step": 1320
},
{
"epoch": 0.9611562782294489,
"grad_norm": 0.3046875,
"learning_rate": 1.4020362552768812e-05,
"loss": 0.0179,
"step": 1330
},
{
"epoch": 0.9683830171635049,
"grad_norm": 0.56640625,
"learning_rate": 1.3970697789918054e-05,
"loss": 0.0187,
"step": 1340
},
{
"epoch": 0.975609756097561,
"grad_norm": 0.380859375,
"learning_rate": 1.3921033027067297e-05,
"loss": 0.0193,
"step": 1350
},
{
"epoch": 0.982836495031617,
"grad_norm": 0.38671875,
"learning_rate": 1.387136826421654e-05,
"loss": 0.02,
"step": 1360
},
{
"epoch": 0.990063233965673,
"grad_norm": 0.34375,
"learning_rate": 1.3821703501365781e-05,
"loss": 0.0171,
"step": 1370
},
{
"epoch": 0.997289972899729,
"grad_norm": 0.3203125,
"learning_rate": 1.3772038738515025e-05,
"loss": 0.0197,
"step": 1380
},
{
"epoch": 1.0043360433604336,
"grad_norm": 0.2333984375,
"learning_rate": 1.3722373975664267e-05,
"loss": 0.0167,
"step": 1390
},
{
"epoch": 1.0115627822944897,
"grad_norm": 0.1865234375,
"learning_rate": 1.3672709212813511e-05,
"loss": 0.0184,
"step": 1400
},
{
"epoch": 1.0187895212285456,
"grad_norm": 0.2099609375,
"learning_rate": 1.3623044449962752e-05,
"loss": 0.015,
"step": 1410
},
{
"epoch": 1.0260162601626017,
"grad_norm": 0.169921875,
"learning_rate": 1.3573379687111996e-05,
"loss": 0.0145,
"step": 1420
},
{
"epoch": 1.0332429990966576,
"grad_norm": 0.19921875,
"learning_rate": 1.3523714924261238e-05,
"loss": 0.0151,
"step": 1430
},
{
"epoch": 1.0404697380307137,
"grad_norm": 0.1337890625,
"learning_rate": 1.3474050161410482e-05,
"loss": 0.016,
"step": 1440
},
{
"epoch": 1.0476964769647696,
"grad_norm": 0.173828125,
"learning_rate": 1.3424385398559722e-05,
"loss": 0.0153,
"step": 1450
},
{
"epoch": 1.0549232158988258,
"grad_norm": 0.2275390625,
"learning_rate": 1.3374720635708966e-05,
"loss": 0.02,
"step": 1460
},
{
"epoch": 1.0621499548328817,
"grad_norm": 0.2451171875,
"learning_rate": 1.3325055872858208e-05,
"loss": 0.0176,
"step": 1470
},
{
"epoch": 1.0693766937669378,
"grad_norm": 0.287109375,
"learning_rate": 1.3275391110007452e-05,
"loss": 0.0175,
"step": 1480
},
{
"epoch": 1.0766034327009937,
"grad_norm": 0.15234375,
"learning_rate": 1.3225726347156693e-05,
"loss": 0.015,
"step": 1490
},
{
"epoch": 1.0838301716350496,
"grad_norm": 0.15234375,
"learning_rate": 1.3176061584305937e-05,
"loss": 0.0226,
"step": 1500
},
{
"epoch": 1.0838301716350496,
"eval_loss": 0.022791102528572083,
"eval_runtime": 43.397,
"eval_samples_per_second": 26.868,
"eval_steps_per_second": 6.729,
"step": 1500
}
],
"logging_steps": 10,
"max_steps": 4152,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6.261646670739886e+17,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}