admdk5 / trainer_state.json
EXCO123's picture
Upload folder using huggingface_hub
8391e26 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.008,
"grad_norm": 145.86453247070312,
"learning_rate": 1e-05,
"loss": 18.4693,
"mean_token_accuracy": 0.4418533444404602,
"step": 1
},
{
"epoch": 0.016,
"grad_norm": 131.47439575195312,
"learning_rate": 2e-05,
"loss": 17.1904,
"mean_token_accuracy": 0.4580386132001877,
"step": 2
},
{
"epoch": 0.024,
"grad_norm": 93.59867095947266,
"learning_rate": 3e-05,
"loss": 13.3976,
"mean_token_accuracy": 0.520381361246109,
"step": 3
},
{
"epoch": 0.032,
"grad_norm": 89.1477279663086,
"learning_rate": 4e-05,
"loss": 11.5329,
"mean_token_accuracy": 0.5683971494436264,
"step": 4
},
{
"epoch": 0.04,
"grad_norm": 109.09867858886719,
"learning_rate": 5e-05,
"loss": 9.6112,
"mean_token_accuracy": 0.6148007661104202,
"step": 5
},
{
"epoch": 0.048,
"grad_norm": 70.35514068603516,
"learning_rate": 4.98989898989899e-05,
"loss": 8.5729,
"mean_token_accuracy": 0.6641161292791367,
"step": 6
},
{
"epoch": 0.056,
"grad_norm": 69.57074737548828,
"learning_rate": 4.97979797979798e-05,
"loss": 7.9422,
"mean_token_accuracy": 0.6828223764896393,
"step": 7
},
{
"epoch": 0.064,
"grad_norm": 55.16558837890625,
"learning_rate": 4.9696969696969694e-05,
"loss": 7.5634,
"mean_token_accuracy": 0.6935229450464249,
"step": 8
},
{
"epoch": 0.072,
"grad_norm": 55.306785583496094,
"learning_rate": 4.9595959595959594e-05,
"loss": 6.9885,
"mean_token_accuracy": 0.6911955326795578,
"step": 9
},
{
"epoch": 0.08,
"grad_norm": 60.01897048950195,
"learning_rate": 4.94949494949495e-05,
"loss": 7.8794,
"mean_token_accuracy": 0.7122491598129272,
"step": 10
},
{
"epoch": 0.088,
"grad_norm": 44.305118560791016,
"learning_rate": 4.93939393939394e-05,
"loss": 6.9955,
"mean_token_accuracy": 0.7298235297203064,
"step": 11
},
{
"epoch": 0.096,
"grad_norm": 40.814090728759766,
"learning_rate": 4.92929292929293e-05,
"loss": 6.7332,
"mean_token_accuracy": 0.7177170217037201,
"step": 12
},
{
"epoch": 0.104,
"grad_norm": 31.777416229248047,
"learning_rate": 4.919191919191919e-05,
"loss": 6.5969,
"mean_token_accuracy": 0.7185130566358566,
"step": 13
},
{
"epoch": 0.112,
"grad_norm": 32.8369140625,
"learning_rate": 4.909090909090909e-05,
"loss": 6.518,
"mean_token_accuracy": 0.7157497704029083,
"step": 14
},
{
"epoch": 0.12,
"grad_norm": 27.942502975463867,
"learning_rate": 4.898989898989899e-05,
"loss": 6.0411,
"mean_token_accuracy": 0.7299728095531464,
"step": 15
},
{
"epoch": 0.128,
"grad_norm": 26.94302749633789,
"learning_rate": 4.888888888888889e-05,
"loss": 6.0288,
"mean_token_accuracy": 0.7378736436367035,
"step": 16
},
{
"epoch": 0.136,
"grad_norm": 29.398008346557617,
"learning_rate": 4.878787878787879e-05,
"loss": 5.7906,
"mean_token_accuracy": 0.7615540027618408,
"step": 17
},
{
"epoch": 0.144,
"grad_norm": 33.691036224365234,
"learning_rate": 4.868686868686869e-05,
"loss": 6.148,
"mean_token_accuracy": 0.7420072853565216,
"step": 18
},
{
"epoch": 0.152,
"grad_norm": 28.954744338989258,
"learning_rate": 4.858585858585859e-05,
"loss": 6.074,
"mean_token_accuracy": 0.7376030832529068,
"step": 19
},
{
"epoch": 0.16,
"grad_norm": 30.093732833862305,
"learning_rate": 4.848484848484849e-05,
"loss": 6.0524,
"mean_token_accuracy": 0.7355515509843826,
"step": 20
},
{
"epoch": 0.168,
"grad_norm": 32.198020935058594,
"learning_rate": 4.838383838383839e-05,
"loss": 6.3595,
"mean_token_accuracy": 0.7233454436063766,
"step": 21
},
{
"epoch": 0.176,
"grad_norm": 27.62493896484375,
"learning_rate": 4.828282828282829e-05,
"loss": 4.9527,
"mean_token_accuracy": 0.7749838680028915,
"step": 22
},
{
"epoch": 0.184,
"grad_norm": 29.359989166259766,
"learning_rate": 4.8181818181818186e-05,
"loss": 6.0378,
"mean_token_accuracy": 0.7387848794460297,
"step": 23
},
{
"epoch": 0.192,
"grad_norm": 28.882671356201172,
"learning_rate": 4.808080808080808e-05,
"loss": 5.889,
"mean_token_accuracy": 0.7291557788848877,
"step": 24
},
{
"epoch": 0.2,
"grad_norm": 26.889739990234375,
"learning_rate": 4.797979797979798e-05,
"loss": 5.5897,
"mean_token_accuracy": 0.7566297799348831,
"step": 25
},
{
"epoch": 0.208,
"grad_norm": 27.18845558166504,
"learning_rate": 4.787878787878788e-05,
"loss": 5.4735,
"mean_token_accuracy": 0.7656728178262711,
"step": 26
},
{
"epoch": 0.216,
"grad_norm": 23.547197341918945,
"learning_rate": 4.7777777777777784e-05,
"loss": 5.6021,
"mean_token_accuracy": 0.740881085395813,
"step": 27
},
{
"epoch": 0.224,
"grad_norm": 26.538345336914062,
"learning_rate": 4.7676767676767684e-05,
"loss": 5.22,
"mean_token_accuracy": 0.7639279812574387,
"step": 28
},
{
"epoch": 0.232,
"grad_norm": 24.266155242919922,
"learning_rate": 4.7575757575757576e-05,
"loss": 5.449,
"mean_token_accuracy": 0.7561690509319305,
"step": 29
},
{
"epoch": 0.24,
"grad_norm": 26.028945922851562,
"learning_rate": 4.7474747474747476e-05,
"loss": 5.6177,
"mean_token_accuracy": 0.7446402460336685,
"step": 30
},
{
"epoch": 0.248,
"grad_norm": 29.959096908569336,
"learning_rate": 4.7373737373737375e-05,
"loss": 5.9764,
"mean_token_accuracy": 0.7454901784658432,
"step": 31
},
{
"epoch": 0.256,
"grad_norm": 33.07018280029297,
"learning_rate": 4.7272727272727275e-05,
"loss": 4.9627,
"mean_token_accuracy": 0.787445604801178,
"step": 32
},
{
"epoch": 0.264,
"grad_norm": 26.5466365814209,
"learning_rate": 4.7171717171717174e-05,
"loss": 5.0905,
"mean_token_accuracy": 0.772098571062088,
"step": 33
},
{
"epoch": 0.272,
"grad_norm": 29.91367530822754,
"learning_rate": 4.7070707070707074e-05,
"loss": 5.3797,
"mean_token_accuracy": 0.7676996439695358,
"step": 34
},
{
"epoch": 0.28,
"grad_norm": 27.248432159423828,
"learning_rate": 4.696969696969697e-05,
"loss": 5.671,
"mean_token_accuracy": 0.755952462553978,
"step": 35
},
{
"epoch": 0.288,
"grad_norm": 27.235933303833008,
"learning_rate": 4.686868686868687e-05,
"loss": 5.2058,
"mean_token_accuracy": 0.787963479757309,
"step": 36
},
{
"epoch": 0.296,
"grad_norm": 28.442148208618164,
"learning_rate": 4.676767676767677e-05,
"loss": 4.8147,
"mean_token_accuracy": 0.7970237880945206,
"step": 37
},
{
"epoch": 0.304,
"grad_norm": 28.56350326538086,
"learning_rate": 4.666666666666667e-05,
"loss": 5.7801,
"mean_token_accuracy": 0.7452163100242615,
"step": 38
},
{
"epoch": 0.312,
"grad_norm": 24.276662826538086,
"learning_rate": 4.656565656565657e-05,
"loss": 5.3188,
"mean_token_accuracy": 0.7638489454984665,
"step": 39
},
{
"epoch": 0.32,
"grad_norm": 25.21291732788086,
"learning_rate": 4.6464646464646464e-05,
"loss": 5.4533,
"mean_token_accuracy": 0.7537444680929184,
"step": 40
},
{
"epoch": 0.328,
"grad_norm": 26.179948806762695,
"learning_rate": 4.636363636363636e-05,
"loss": 5.2261,
"mean_token_accuracy": 0.7736174613237381,
"step": 41
},
{
"epoch": 0.336,
"grad_norm": 23.527063369750977,
"learning_rate": 4.626262626262626e-05,
"loss": 4.9225,
"mean_token_accuracy": 0.7722691893577576,
"step": 42
},
{
"epoch": 0.344,
"grad_norm": 24.048324584960938,
"learning_rate": 4.616161616161616e-05,
"loss": 5.0608,
"mean_token_accuracy": 0.7645718157291412,
"step": 43
},
{
"epoch": 0.352,
"grad_norm": 24.779226303100586,
"learning_rate": 4.606060606060607e-05,
"loss": 4.8156,
"mean_token_accuracy": 0.7857754081487656,
"step": 44
},
{
"epoch": 0.36,
"grad_norm": 23.97334861755371,
"learning_rate": 4.595959595959596e-05,
"loss": 5.0281,
"mean_token_accuracy": 0.7758517414331436,
"step": 45
},
{
"epoch": 0.368,
"grad_norm": 24.752384185791016,
"learning_rate": 4.585858585858586e-05,
"loss": 4.7445,
"mean_token_accuracy": 0.7875040620565414,
"step": 46
},
{
"epoch": 0.376,
"grad_norm": 23.33396339416504,
"learning_rate": 4.575757575757576e-05,
"loss": 4.9179,
"mean_token_accuracy": 0.7820334136486053,
"step": 47
},
{
"epoch": 0.384,
"grad_norm": 25.201007843017578,
"learning_rate": 4.565656565656566e-05,
"loss": 5.5614,
"mean_token_accuracy": 0.7553493082523346,
"step": 48
},
{
"epoch": 0.392,
"grad_norm": 26.965972900390625,
"learning_rate": 4.555555555555556e-05,
"loss": 5.2295,
"mean_token_accuracy": 0.7683407664299011,
"step": 49
},
{
"epoch": 0.4,
"grad_norm": 27.544897079467773,
"learning_rate": 4.545454545454546e-05,
"loss": 5.6261,
"mean_token_accuracy": 0.7545335441827774,
"step": 50
},
{
"epoch": 0.408,
"grad_norm": 26.644731521606445,
"learning_rate": 4.535353535353535e-05,
"loss": 4.6757,
"mean_token_accuracy": 0.7908278107643127,
"step": 51
},
{
"epoch": 0.416,
"grad_norm": 21.93665885925293,
"learning_rate": 4.525252525252526e-05,
"loss": 4.631,
"mean_token_accuracy": 0.7804701924324036,
"step": 52
},
{
"epoch": 0.424,
"grad_norm": 22.430749893188477,
"learning_rate": 4.515151515151516e-05,
"loss": 5.1858,
"mean_token_accuracy": 0.7598922550678253,
"step": 53
},
{
"epoch": 0.432,
"grad_norm": 22.974781036376953,
"learning_rate": 4.5050505050505056e-05,
"loss": 4.8737,
"mean_token_accuracy": 0.7714487016201019,
"step": 54
},
{
"epoch": 0.44,
"grad_norm": 25.078826904296875,
"learning_rate": 4.494949494949495e-05,
"loss": 5.1416,
"mean_token_accuracy": 0.7775991857051849,
"step": 55
},
{
"epoch": 0.448,
"grad_norm": 26.377742767333984,
"learning_rate": 4.484848484848485e-05,
"loss": 4.3445,
"mean_token_accuracy": 0.8000840097665787,
"step": 56
},
{
"epoch": 0.456,
"grad_norm": 23.724504470825195,
"learning_rate": 4.474747474747475e-05,
"loss": 5.0277,
"mean_token_accuracy": 0.7753082364797592,
"step": 57
},
{
"epoch": 0.464,
"grad_norm": 24.300643920898438,
"learning_rate": 4.464646464646465e-05,
"loss": 4.0889,
"mean_token_accuracy": 0.805213525891304,
"step": 58
},
{
"epoch": 0.472,
"grad_norm": 24.581886291503906,
"learning_rate": 4.454545454545455e-05,
"loss": 5.0518,
"mean_token_accuracy": 0.7843358516693115,
"step": 59
},
{
"epoch": 0.48,
"grad_norm": 26.722314834594727,
"learning_rate": 4.4444444444444447e-05,
"loss": 5.087,
"mean_token_accuracy": 0.7961481958627701,
"step": 60
},
{
"epoch": 0.488,
"grad_norm": 25.016273498535156,
"learning_rate": 4.4343434343434346e-05,
"loss": 5.5151,
"mean_token_accuracy": 0.7682371735572815,
"step": 61
},
{
"epoch": 0.496,
"grad_norm": 24.71924591064453,
"learning_rate": 4.4242424242424246e-05,
"loss": 4.6307,
"mean_token_accuracy": 0.7860427796840668,
"step": 62
},
{
"epoch": 0.504,
"grad_norm": 21.29850959777832,
"learning_rate": 4.4141414141414145e-05,
"loss": 4.7079,
"mean_token_accuracy": 0.7890657931566238,
"step": 63
},
{
"epoch": 0.512,
"grad_norm": 23.304716110229492,
"learning_rate": 4.4040404040404044e-05,
"loss": 5.0797,
"mean_token_accuracy": 0.7593720108270645,
"step": 64
},
{
"epoch": 0.52,
"grad_norm": 21.623689651489258,
"learning_rate": 4.3939393939393944e-05,
"loss": 4.749,
"mean_token_accuracy": 0.778199702501297,
"step": 65
},
{
"epoch": 0.528,
"grad_norm": 19.37482452392578,
"learning_rate": 4.383838383838384e-05,
"loss": 4.6279,
"mean_token_accuracy": 0.7791639715433121,
"step": 66
},
{
"epoch": 0.536,
"grad_norm": 22.020885467529297,
"learning_rate": 4.3737373737373736e-05,
"loss": 4.9881,
"mean_token_accuracy": 0.7844051718711853,
"step": 67
},
{
"epoch": 0.544,
"grad_norm": 23.672704696655273,
"learning_rate": 4.3636363636363636e-05,
"loss": 5.7155,
"mean_token_accuracy": 0.7384811043739319,
"step": 68
},
{
"epoch": 0.552,
"grad_norm": 22.16874122619629,
"learning_rate": 4.3535353535353535e-05,
"loss": 5.272,
"mean_token_accuracy": 0.7523021399974823,
"step": 69
},
{
"epoch": 0.56,
"grad_norm": 21.230941772460938,
"learning_rate": 4.343434343434344e-05,
"loss": 4.7733,
"mean_token_accuracy": 0.7712208926677704,
"step": 70
},
{
"epoch": 0.568,
"grad_norm": 23.96208381652832,
"learning_rate": 4.3333333333333334e-05,
"loss": 5.2342,
"mean_token_accuracy": 0.7734561115503311,
"step": 71
},
{
"epoch": 0.576,
"grad_norm": 19.650798797607422,
"learning_rate": 4.3232323232323234e-05,
"loss": 4.0045,
"mean_token_accuracy": 0.8018752634525299,
"step": 72
},
{
"epoch": 0.584,
"grad_norm": 21.702190399169922,
"learning_rate": 4.313131313131313e-05,
"loss": 5.4078,
"mean_token_accuracy": 0.7569945156574249,
"step": 73
},
{
"epoch": 0.592,
"grad_norm": 20.384090423583984,
"learning_rate": 4.303030303030303e-05,
"loss": 4.2726,
"mean_token_accuracy": 0.8059134036302567,
"step": 74
},
{
"epoch": 0.6,
"grad_norm": 20.51775360107422,
"learning_rate": 4.292929292929293e-05,
"loss": 4.3555,
"mean_token_accuracy": 0.7980977892875671,
"step": 75
},
{
"epoch": 0.608,
"grad_norm": 19.564922332763672,
"learning_rate": 4.282828282828283e-05,
"loss": 4.5646,
"mean_token_accuracy": 0.7788865268230438,
"step": 76
},
{
"epoch": 0.616,
"grad_norm": 24.02853012084961,
"learning_rate": 4.2727272727272724e-05,
"loss": 4.8484,
"mean_token_accuracy": 0.794918566942215,
"step": 77
},
{
"epoch": 0.624,
"grad_norm": 19.45448875427246,
"learning_rate": 4.262626262626263e-05,
"loss": 4.201,
"mean_token_accuracy": 0.79364313185215,
"step": 78
},
{
"epoch": 0.632,
"grad_norm": 20.79984474182129,
"learning_rate": 4.252525252525253e-05,
"loss": 4.7681,
"mean_token_accuracy": 0.7742249518632889,
"step": 79
},
{
"epoch": 0.64,
"grad_norm": 19.014602661132812,
"learning_rate": 4.242424242424243e-05,
"loss": 4.3379,
"mean_token_accuracy": 0.7996639758348465,
"step": 80
},
{
"epoch": 0.648,
"grad_norm": 20.98752212524414,
"learning_rate": 4.232323232323233e-05,
"loss": 4.4264,
"mean_token_accuracy": 0.7978147119283676,
"step": 81
},
{
"epoch": 0.656,
"grad_norm": 19.011503219604492,
"learning_rate": 4.222222222222222e-05,
"loss": 4.4577,
"mean_token_accuracy": 0.7895375341176987,
"step": 82
},
{
"epoch": 0.664,
"grad_norm": 18.420442581176758,
"learning_rate": 4.212121212121212e-05,
"loss": 4.2517,
"mean_token_accuracy": 0.8092215359210968,
"step": 83
},
{
"epoch": 0.672,
"grad_norm": 20.489845275878906,
"learning_rate": 4.202020202020202e-05,
"loss": 4.8926,
"mean_token_accuracy": 0.7734092026948929,
"step": 84
},
{
"epoch": 0.68,
"grad_norm": 19.172943115234375,
"learning_rate": 4.191919191919192e-05,
"loss": 4.0021,
"mean_token_accuracy": 0.8282384127378464,
"step": 85
},
{
"epoch": 0.688,
"grad_norm": 18.97472381591797,
"learning_rate": 4.181818181818182e-05,
"loss": 4.2862,
"mean_token_accuracy": 0.8147316575050354,
"step": 86
},
{
"epoch": 0.696,
"grad_norm": 22.11132049560547,
"learning_rate": 4.171717171717172e-05,
"loss": 4.7801,
"mean_token_accuracy": 0.7815755158662796,
"step": 87
},
{
"epoch": 0.704,
"grad_norm": 24.296100616455078,
"learning_rate": 4.161616161616162e-05,
"loss": 4.7305,
"mean_token_accuracy": 0.8020643591880798,
"step": 88
},
{
"epoch": 0.712,
"grad_norm": 20.803821563720703,
"learning_rate": 4.151515151515152e-05,
"loss": 5.2232,
"mean_token_accuracy": 0.7705590724945068,
"step": 89
},
{
"epoch": 0.72,
"grad_norm": 18.738903045654297,
"learning_rate": 4.141414141414142e-05,
"loss": 4.2926,
"mean_token_accuracy": 0.8065244108438492,
"step": 90
},
{
"epoch": 0.728,
"grad_norm": 21.6732120513916,
"learning_rate": 4.131313131313132e-05,
"loss": 4.3887,
"mean_token_accuracy": 0.7921336442232132,
"step": 91
},
{
"epoch": 0.736,
"grad_norm": 21.643827438354492,
"learning_rate": 4.1212121212121216e-05,
"loss": 4.5504,
"mean_token_accuracy": 0.7796259820461273,
"step": 92
},
{
"epoch": 0.744,
"grad_norm": 19.445819854736328,
"learning_rate": 4.111111111111111e-05,
"loss": 4.2139,
"mean_token_accuracy": 0.7942475080490112,
"step": 93
},
{
"epoch": 0.752,
"grad_norm": 20.41779899597168,
"learning_rate": 4.101010101010101e-05,
"loss": 4.4009,
"mean_token_accuracy": 0.7928617000579834,
"step": 94
},
{
"epoch": 0.76,
"grad_norm": 20.34018325805664,
"learning_rate": 4.0909090909090915e-05,
"loss": 4.2759,
"mean_token_accuracy": 0.800753116607666,
"step": 95
},
{
"epoch": 0.768,
"grad_norm": 19.045612335205078,
"learning_rate": 4.0808080808080814e-05,
"loss": 4.0605,
"mean_token_accuracy": 0.8086353838443756,
"step": 96
},
{
"epoch": 0.776,
"grad_norm": 20.315345764160156,
"learning_rate": 4.070707070707071e-05,
"loss": 4.3944,
"mean_token_accuracy": 0.7914804071187973,
"step": 97
},
{
"epoch": 0.784,
"grad_norm": 19.407426834106445,
"learning_rate": 4.0606060606060606e-05,
"loss": 3.9872,
"mean_token_accuracy": 0.8120452463626862,
"step": 98
},
{
"epoch": 0.792,
"grad_norm": 22.875099182128906,
"learning_rate": 4.0505050505050506e-05,
"loss": 4.3298,
"mean_token_accuracy": 0.7921182066202164,
"step": 99
},
{
"epoch": 0.8,
"grad_norm": 23.487300872802734,
"learning_rate": 4.0404040404040405e-05,
"loss": 4.966,
"mean_token_accuracy": 0.7685753107070923,
"step": 100
},
{
"epoch": 0.808,
"grad_norm": 19.982942581176758,
"learning_rate": 4.0303030303030305e-05,
"loss": 4.0714,
"mean_token_accuracy": 0.8111653327941895,
"step": 101
},
{
"epoch": 0.816,
"grad_norm": 19.33985137939453,
"learning_rate": 4.0202020202020204e-05,
"loss": 3.9349,
"mean_token_accuracy": 0.8289202004671097,
"step": 102
},
{
"epoch": 0.824,
"grad_norm": 19.506378173828125,
"learning_rate": 4.01010101010101e-05,
"loss": 4.1454,
"mean_token_accuracy": 0.8035620599985123,
"step": 103
},
{
"epoch": 0.832,
"grad_norm": 20.6740665435791,
"learning_rate": 4e-05,
"loss": 5.1995,
"mean_token_accuracy": 0.7696829289197922,
"step": 104
},
{
"epoch": 0.84,
"grad_norm": 19.577415466308594,
"learning_rate": 3.98989898989899e-05,
"loss": 4.1304,
"mean_token_accuracy": 0.7980705797672272,
"step": 105
},
{
"epoch": 0.848,
"grad_norm": 21.14516258239746,
"learning_rate": 3.97979797979798e-05,
"loss": 4.1035,
"mean_token_accuracy": 0.8171603679656982,
"step": 106
},
{
"epoch": 0.856,
"grad_norm": 19.88467788696289,
"learning_rate": 3.96969696969697e-05,
"loss": 4.7231,
"mean_token_accuracy": 0.7842641174793243,
"step": 107
},
{
"epoch": 0.864,
"grad_norm": 19.563447952270508,
"learning_rate": 3.9595959595959594e-05,
"loss": 4.9512,
"mean_token_accuracy": 0.7920339405536652,
"step": 108
},
{
"epoch": 0.872,
"grad_norm": 19.343595504760742,
"learning_rate": 3.9494949494949494e-05,
"loss": 4.4894,
"mean_token_accuracy": 0.7923117727041245,
"step": 109
},
{
"epoch": 0.88,
"grad_norm": 19.845014572143555,
"learning_rate": 3.939393939393939e-05,
"loss": 3.9204,
"mean_token_accuracy": 0.8179315477609634,
"step": 110
},
{
"epoch": 0.888,
"grad_norm": 20.17218780517578,
"learning_rate": 3.929292929292929e-05,
"loss": 4.6602,
"mean_token_accuracy": 0.7860896587371826,
"step": 111
},
{
"epoch": 0.896,
"grad_norm": 17.958740234375,
"learning_rate": 3.91919191919192e-05,
"loss": 3.9505,
"mean_token_accuracy": 0.802581325173378,
"step": 112
},
{
"epoch": 0.904,
"grad_norm": 17.446216583251953,
"learning_rate": 3.909090909090909e-05,
"loss": 3.6399,
"mean_token_accuracy": 0.8280811607837677,
"step": 113
},
{
"epoch": 0.912,
"grad_norm": 19.220077514648438,
"learning_rate": 3.898989898989899e-05,
"loss": 3.9278,
"mean_token_accuracy": 0.8266900181770325,
"step": 114
},
{
"epoch": 0.92,
"grad_norm": 21.730310440063477,
"learning_rate": 3.888888888888889e-05,
"loss": 4.5073,
"mean_token_accuracy": 0.7951602935791016,
"step": 115
},
{
"epoch": 0.928,
"grad_norm": 21.945823669433594,
"learning_rate": 3.878787878787879e-05,
"loss": 4.2317,
"mean_token_accuracy": 0.8075039386749268,
"step": 116
},
{
"epoch": 0.936,
"grad_norm": 21.828777313232422,
"learning_rate": 3.868686868686869e-05,
"loss": 4.9559,
"mean_token_accuracy": 0.7718204110860825,
"step": 117
},
{
"epoch": 0.944,
"grad_norm": 18.954065322875977,
"learning_rate": 3.858585858585859e-05,
"loss": 4.1636,
"mean_token_accuracy": 0.8161665499210358,
"step": 118
},
{
"epoch": 0.952,
"grad_norm": 18.235891342163086,
"learning_rate": 3.848484848484848e-05,
"loss": 3.5472,
"mean_token_accuracy": 0.8299020826816559,
"step": 119
},
{
"epoch": 0.96,
"grad_norm": 19.251985549926758,
"learning_rate": 3.838383838383838e-05,
"loss": 4.362,
"mean_token_accuracy": 0.7960271835327148,
"step": 120
},
{
"epoch": 0.968,
"grad_norm": 19.986879348754883,
"learning_rate": 3.828282828282829e-05,
"loss": 4.3958,
"mean_token_accuracy": 0.7863996028900146,
"step": 121
},
{
"epoch": 0.976,
"grad_norm": 19.505414962768555,
"learning_rate": 3.818181818181819e-05,
"loss": 3.7777,
"mean_token_accuracy": 0.8199738562107086,
"step": 122
},
{
"epoch": 0.984,
"grad_norm": 20.4809627532959,
"learning_rate": 3.8080808080808087e-05,
"loss": 4.0683,
"mean_token_accuracy": 0.7989889979362488,
"step": 123
},
{
"epoch": 0.992,
"grad_norm": 17.48236083984375,
"learning_rate": 3.797979797979798e-05,
"loss": 3.8542,
"mean_token_accuracy": 0.8198198229074478,
"step": 124
},
{
"epoch": 1.0,
"grad_norm": 20.318952560424805,
"learning_rate": 3.787878787878788e-05,
"loss": 4.1094,
"mean_token_accuracy": 0.7986647337675095,
"step": 125
},
{
"epoch": 1.008,
"grad_norm": 17.65445327758789,
"learning_rate": 3.777777777777778e-05,
"loss": 3.1601,
"mean_token_accuracy": 0.8280518800020218,
"step": 126
},
{
"epoch": 1.016,
"grad_norm": 17.846628189086914,
"learning_rate": 3.767676767676768e-05,
"loss": 3.3474,
"mean_token_accuracy": 0.8235991448163986,
"step": 127
},
{
"epoch": 1.024,
"grad_norm": 16.856306076049805,
"learning_rate": 3.757575757575758e-05,
"loss": 3.3474,
"mean_token_accuracy": 0.8168051540851593,
"step": 128
},
{
"epoch": 1.032,
"grad_norm": 17.891374588012695,
"learning_rate": 3.747474747474748e-05,
"loss": 3.3601,
"mean_token_accuracy": 0.8238555788993835,
"step": 129
},
{
"epoch": 1.04,
"grad_norm": 16.272624969482422,
"learning_rate": 3.7373737373737376e-05,
"loss": 2.6233,
"mean_token_accuracy": 0.8651646226644516,
"step": 130
},
{
"epoch": 1.048,
"grad_norm": 18.739025115966797,
"learning_rate": 3.7272727272727276e-05,
"loss": 3.2236,
"mean_token_accuracy": 0.8394419103860855,
"step": 131
},
{
"epoch": 1.056,
"grad_norm": 19.770313262939453,
"learning_rate": 3.7171717171717175e-05,
"loss": 3.114,
"mean_token_accuracy": 0.8411016166210175,
"step": 132
},
{
"epoch": 1.064,
"grad_norm": 17.718353271484375,
"learning_rate": 3.7070707070707075e-05,
"loss": 2.1838,
"mean_token_accuracy": 0.8674204647541046,
"step": 133
},
{
"epoch": 1.072,
"grad_norm": 19.21759605407715,
"learning_rate": 3.6969696969696974e-05,
"loss": 3.137,
"mean_token_accuracy": 0.8263226449489594,
"step": 134
},
{
"epoch": 1.08,
"grad_norm": 21.520931243896484,
"learning_rate": 3.686868686868687e-05,
"loss": 3.1435,
"mean_token_accuracy": 0.8464161157608032,
"step": 135
},
{
"epoch": 1.088,
"grad_norm": 22.635766983032227,
"learning_rate": 3.6767676767676766e-05,
"loss": 3.1886,
"mean_token_accuracy": 0.8231067955493927,
"step": 136
},
{
"epoch": 1.096,
"grad_norm": 25.0559024810791,
"learning_rate": 3.6666666666666666e-05,
"loss": 3.6031,
"mean_token_accuracy": 0.8235105574131012,
"step": 137
},
{
"epoch": 1.104,
"grad_norm": 21.050643920898438,
"learning_rate": 3.656565656565657e-05,
"loss": 3.1022,
"mean_token_accuracy": 0.8318233489990234,
"step": 138
},
{
"epoch": 1.112,
"grad_norm": 21.77235221862793,
"learning_rate": 3.6464646464646465e-05,
"loss": 3.4294,
"mean_token_accuracy": 0.8234484493732452,
"step": 139
},
{
"epoch": 1.12,
"grad_norm": 19.92261505126953,
"learning_rate": 3.6363636363636364e-05,
"loss": 2.895,
"mean_token_accuracy": 0.8333221822977066,
"step": 140
},
{
"epoch": 1.1280000000000001,
"grad_norm": 19.0081844329834,
"learning_rate": 3.6262626262626264e-05,
"loss": 3.1505,
"mean_token_accuracy": 0.8352071642875671,
"step": 141
},
{
"epoch": 1.1360000000000001,
"grad_norm": 17.831018447875977,
"learning_rate": 3.616161616161616e-05,
"loss": 3.1531,
"mean_token_accuracy": 0.8377863019704819,
"step": 142
},
{
"epoch": 1.144,
"grad_norm": 18.68701171875,
"learning_rate": 3.606060606060606e-05,
"loss": 3.1063,
"mean_token_accuracy": 0.8171389847993851,
"step": 143
},
{
"epoch": 1.152,
"grad_norm": 16.475784301757812,
"learning_rate": 3.595959595959596e-05,
"loss": 2.9425,
"mean_token_accuracy": 0.841956228017807,
"step": 144
},
{
"epoch": 1.16,
"grad_norm": 18.2690372467041,
"learning_rate": 3.5858585858585855e-05,
"loss": 2.7428,
"mean_token_accuracy": 0.832164078950882,
"step": 145
},
{
"epoch": 1.168,
"grad_norm": 17.770587921142578,
"learning_rate": 3.575757575757576e-05,
"loss": 2.9521,
"mean_token_accuracy": 0.8395673930644989,
"step": 146
},
{
"epoch": 1.176,
"grad_norm": 17.223388671875,
"learning_rate": 3.565656565656566e-05,
"loss": 2.77,
"mean_token_accuracy": 0.8558414876461029,
"step": 147
},
{
"epoch": 1.184,
"grad_norm": 18.294189453125,
"learning_rate": 3.555555555555556e-05,
"loss": 2.9188,
"mean_token_accuracy": 0.8403861075639725,
"step": 148
},
{
"epoch": 1.192,
"grad_norm": 16.28548812866211,
"learning_rate": 3.545454545454546e-05,
"loss": 2.6033,
"mean_token_accuracy": 0.856376975774765,
"step": 149
},
{
"epoch": 1.2,
"grad_norm": 17.804237365722656,
"learning_rate": 3.535353535353535e-05,
"loss": 2.699,
"mean_token_accuracy": 0.849004715681076,
"step": 150
},
{
"epoch": 1.208,
"grad_norm": 22.294708251953125,
"learning_rate": 3.525252525252525e-05,
"loss": 3.3516,
"mean_token_accuracy": 0.8136928379535675,
"step": 151
},
{
"epoch": 1.216,
"grad_norm": 19.004802703857422,
"learning_rate": 3.515151515151515e-05,
"loss": 3.014,
"mean_token_accuracy": 0.8440276980400085,
"step": 152
},
{
"epoch": 1.224,
"grad_norm": 22.38014030456543,
"learning_rate": 3.505050505050505e-05,
"loss": 3.2117,
"mean_token_accuracy": 0.8412062674760818,
"step": 153
},
{
"epoch": 1.232,
"grad_norm": 20.0627384185791,
"learning_rate": 3.494949494949495e-05,
"loss": 2.9902,
"mean_token_accuracy": 0.829803541302681,
"step": 154
},
{
"epoch": 1.24,
"grad_norm": 19.082454681396484,
"learning_rate": 3.484848484848485e-05,
"loss": 3.0242,
"mean_token_accuracy": 0.8390958160161972,
"step": 155
},
{
"epoch": 1.248,
"grad_norm": 18.173250198364258,
"learning_rate": 3.474747474747475e-05,
"loss": 2.7003,
"mean_token_accuracy": 0.8489221632480621,
"step": 156
},
{
"epoch": 1.256,
"grad_norm": 20.8232479095459,
"learning_rate": 3.464646464646465e-05,
"loss": 2.9356,
"mean_token_accuracy": 0.8414415568113327,
"step": 157
},
{
"epoch": 1.264,
"grad_norm": 22.360126495361328,
"learning_rate": 3.454545454545455e-05,
"loss": 3.4103,
"mean_token_accuracy": 0.8287290334701538,
"step": 158
},
{
"epoch": 1.272,
"grad_norm": 17.922536849975586,
"learning_rate": 3.444444444444445e-05,
"loss": 2.8419,
"mean_token_accuracy": 0.8393957614898682,
"step": 159
},
{
"epoch": 1.28,
"grad_norm": 19.15587043762207,
"learning_rate": 3.434343434343435e-05,
"loss": 3.2652,
"mean_token_accuracy": 0.8440860509872437,
"step": 160
},
{
"epoch": 1.288,
"grad_norm": 17.887313842773438,
"learning_rate": 3.424242424242424e-05,
"loss": 3.2203,
"mean_token_accuracy": 0.8331134468317032,
"step": 161
},
{
"epoch": 1.296,
"grad_norm": 17.222763061523438,
"learning_rate": 3.414141414141414e-05,
"loss": 2.6044,
"mean_token_accuracy": 0.8538916707038879,
"step": 162
},
{
"epoch": 1.304,
"grad_norm": 17.67905044555664,
"learning_rate": 3.4040404040404045e-05,
"loss": 3.0019,
"mean_token_accuracy": 0.8431571871042252,
"step": 163
},
{
"epoch": 1.312,
"grad_norm": 16.879220962524414,
"learning_rate": 3.3939393939393945e-05,
"loss": 2.6565,
"mean_token_accuracy": 0.8555881530046463,
"step": 164
},
{
"epoch": 1.32,
"grad_norm": 19.58002281188965,
"learning_rate": 3.3838383838383844e-05,
"loss": 3.1684,
"mean_token_accuracy": 0.8247578740119934,
"step": 165
},
{
"epoch": 1.328,
"grad_norm": 18.66584587097168,
"learning_rate": 3.373737373737374e-05,
"loss": 3.0177,
"mean_token_accuracy": 0.8371108621358871,
"step": 166
},
{
"epoch": 1.336,
"grad_norm": 18.553882598876953,
"learning_rate": 3.3636363636363636e-05,
"loss": 2.9127,
"mean_token_accuracy": 0.8480137139558792,
"step": 167
},
{
"epoch": 1.3439999999999999,
"grad_norm": 16.836511611938477,
"learning_rate": 3.3535353535353536e-05,
"loss": 2.8304,
"mean_token_accuracy": 0.8449047356843948,
"step": 168
},
{
"epoch": 1.3519999999999999,
"grad_norm": 18.28154754638672,
"learning_rate": 3.3434343434343435e-05,
"loss": 2.8143,
"mean_token_accuracy": 0.8557403534650803,
"step": 169
},
{
"epoch": 1.3599999999999999,
"grad_norm": 18.682939529418945,
"learning_rate": 3.3333333333333335e-05,
"loss": 2.9991,
"mean_token_accuracy": 0.8342336267232895,
"step": 170
},
{
"epoch": 1.3679999999999999,
"grad_norm": 19.349193572998047,
"learning_rate": 3.3232323232323234e-05,
"loss": 2.6821,
"mean_token_accuracy": 0.853237047791481,
"step": 171
},
{
"epoch": 1.376,
"grad_norm": 18.103790283203125,
"learning_rate": 3.3131313131313134e-05,
"loss": 2.8468,
"mean_token_accuracy": 0.8595820367336273,
"step": 172
},
{
"epoch": 1.384,
"grad_norm": 19.089265823364258,
"learning_rate": 3.303030303030303e-05,
"loss": 2.9513,
"mean_token_accuracy": 0.8398459106683731,
"step": 173
},
{
"epoch": 1.392,
"grad_norm": 18.12497901916504,
"learning_rate": 3.292929292929293e-05,
"loss": 2.8923,
"mean_token_accuracy": 0.8398792594671249,
"step": 174
},
{
"epoch": 1.4,
"grad_norm": 20.43326187133789,
"learning_rate": 3.282828282828283e-05,
"loss": 3.34,
"mean_token_accuracy": 0.8242058306932449,
"step": 175
},
{
"epoch": 1.408,
"grad_norm": 22.64555549621582,
"learning_rate": 3.272727272727273e-05,
"loss": 3.4045,
"mean_token_accuracy": 0.8219707757234573,
"step": 176
},
{
"epoch": 1.416,
"grad_norm": 19.150569915771484,
"learning_rate": 3.2626262626262624e-05,
"loss": 3.0239,
"mean_token_accuracy": 0.8366655707359314,
"step": 177
},
{
"epoch": 1.424,
"grad_norm": 19.570180892944336,
"learning_rate": 3.2525252525252524e-05,
"loss": 3.1822,
"mean_token_accuracy": 0.8368578106164932,
"step": 178
},
{
"epoch": 1.432,
"grad_norm": 19.738082885742188,
"learning_rate": 3.2424242424242423e-05,
"loss": 3.1123,
"mean_token_accuracy": 0.83487968146801,
"step": 179
},
{
"epoch": 1.44,
"grad_norm": 19.594087600708008,
"learning_rate": 3.232323232323233e-05,
"loss": 2.9401,
"mean_token_accuracy": 0.8384669721126556,
"step": 180
},
{
"epoch": 1.448,
"grad_norm": 21.615190505981445,
"learning_rate": 3.222222222222223e-05,
"loss": 3.1546,
"mean_token_accuracy": 0.8384737074375153,
"step": 181
},
{
"epoch": 1.456,
"grad_norm": 20.175748825073242,
"learning_rate": 3.212121212121212e-05,
"loss": 3.2557,
"mean_token_accuracy": 0.8329617381095886,
"step": 182
},
{
"epoch": 1.464,
"grad_norm": 20.50943374633789,
"learning_rate": 3.202020202020202e-05,
"loss": 3.1785,
"mean_token_accuracy": 0.8321622610092163,
"step": 183
},
{
"epoch": 1.472,
"grad_norm": 20.06108856201172,
"learning_rate": 3.191919191919192e-05,
"loss": 3.5482,
"mean_token_accuracy": 0.8127909004688263,
"step": 184
},
{
"epoch": 1.48,
"grad_norm": 18.441608428955078,
"learning_rate": 3.181818181818182e-05,
"loss": 3.1898,
"mean_token_accuracy": 0.8352639973163605,
"step": 185
},
{
"epoch": 1.488,
"grad_norm": 18.315155029296875,
"learning_rate": 3.171717171717172e-05,
"loss": 3.0578,
"mean_token_accuracy": 0.8414150923490524,
"step": 186
},
{
"epoch": 1.496,
"grad_norm": 19.802762985229492,
"learning_rate": 3.161616161616161e-05,
"loss": 3.4275,
"mean_token_accuracy": 0.8302872627973557,
"step": 187
},
{
"epoch": 1.504,
"grad_norm": 17.772506713867188,
"learning_rate": 3.151515151515151e-05,
"loss": 2.5637,
"mean_token_accuracy": 0.8702727556228638,
"step": 188
},
{
"epoch": 1.512,
"grad_norm": 16.841842651367188,
"learning_rate": 3.141414141414142e-05,
"loss": 2.7877,
"mean_token_accuracy": 0.8445184230804443,
"step": 189
},
{
"epoch": 1.52,
"grad_norm": 18.028287887573242,
"learning_rate": 3.131313131313132e-05,
"loss": 2.8235,
"mean_token_accuracy": 0.8519050180912018,
"step": 190
},
{
"epoch": 1.528,
"grad_norm": 17.80512809753418,
"learning_rate": 3.121212121212122e-05,
"loss": 2.8722,
"mean_token_accuracy": 0.8488593399524689,
"step": 191
},
{
"epoch": 1.536,
"grad_norm": 18.017911911010742,
"learning_rate": 3.111111111111111e-05,
"loss": 2.815,
"mean_token_accuracy": 0.8511413782835007,
"step": 192
},
{
"epoch": 1.544,
"grad_norm": 16.424795150756836,
"learning_rate": 3.101010101010101e-05,
"loss": 2.6713,
"mean_token_accuracy": 0.8467608839273453,
"step": 193
},
{
"epoch": 1.552,
"grad_norm": 18.472646713256836,
"learning_rate": 3.090909090909091e-05,
"loss": 3.0529,
"mean_token_accuracy": 0.848779022693634,
"step": 194
},
{
"epoch": 1.56,
"grad_norm": 19.97772216796875,
"learning_rate": 3.080808080808081e-05,
"loss": 3.2967,
"mean_token_accuracy": 0.8236434757709503,
"step": 195
},
{
"epoch": 1.568,
"grad_norm": 18.231199264526367,
"learning_rate": 3.070707070707071e-05,
"loss": 2.7142,
"mean_token_accuracy": 0.8442593514919281,
"step": 196
},
{
"epoch": 1.576,
"grad_norm": 18.801584243774414,
"learning_rate": 3.060606060606061e-05,
"loss": 2.8128,
"mean_token_accuracy": 0.853375568985939,
"step": 197
},
{
"epoch": 1.584,
"grad_norm": 17.9156551361084,
"learning_rate": 3.050505050505051e-05,
"loss": 2.76,
"mean_token_accuracy": 0.8531969636678696,
"step": 198
},
{
"epoch": 1.592,
"grad_norm": 20.197153091430664,
"learning_rate": 3.0404040404040406e-05,
"loss": 3.3442,
"mean_token_accuracy": 0.820599153637886,
"step": 199
},
{
"epoch": 1.6,
"grad_norm": 19.50455665588379,
"learning_rate": 3.0303030303030306e-05,
"loss": 3.1541,
"mean_token_accuracy": 0.8246084749698639,
"step": 200
},
{
"epoch": 1.608,
"grad_norm": 18.644977569580078,
"learning_rate": 3.0202020202020205e-05,
"loss": 2.9864,
"mean_token_accuracy": 0.8428880572319031,
"step": 201
},
{
"epoch": 1.616,
"grad_norm": 18.98227882385254,
"learning_rate": 3.01010101010101e-05,
"loss": 2.8429,
"mean_token_accuracy": 0.8443266749382019,
"step": 202
},
{
"epoch": 1.624,
"grad_norm": 21.41233253479004,
"learning_rate": 3e-05,
"loss": 3.2002,
"mean_token_accuracy": 0.8272002190351486,
"step": 203
},
{
"epoch": 1.6320000000000001,
"grad_norm": 18.17258071899414,
"learning_rate": 2.98989898989899e-05,
"loss": 2.9346,
"mean_token_accuracy": 0.836423709988594,
"step": 204
},
{
"epoch": 1.6400000000000001,
"grad_norm": 18.524250030517578,
"learning_rate": 2.9797979797979796e-05,
"loss": 2.9417,
"mean_token_accuracy": 0.8409391045570374,
"step": 205
},
{
"epoch": 1.6480000000000001,
"grad_norm": 19.283735275268555,
"learning_rate": 2.96969696969697e-05,
"loss": 2.6658,
"mean_token_accuracy": 0.8474785685539246,
"step": 206
},
{
"epoch": 1.6560000000000001,
"grad_norm": 17.894819259643555,
"learning_rate": 2.95959595959596e-05,
"loss": 2.7392,
"mean_token_accuracy": 0.845216765999794,
"step": 207
},
{
"epoch": 1.6640000000000001,
"grad_norm": 19.564115524291992,
"learning_rate": 2.9494949494949498e-05,
"loss": 3.4316,
"mean_token_accuracy": 0.8185593038797379,
"step": 208
},
{
"epoch": 1.6720000000000002,
"grad_norm": 17.54917335510254,
"learning_rate": 2.9393939393939394e-05,
"loss": 3.0063,
"mean_token_accuracy": 0.8485386520624161,
"step": 209
},
{
"epoch": 1.6800000000000002,
"grad_norm": 18.805709838867188,
"learning_rate": 2.9292929292929294e-05,
"loss": 3.2245,
"mean_token_accuracy": 0.8334101438522339,
"step": 210
},
{
"epoch": 1.688,
"grad_norm": 16.054325103759766,
"learning_rate": 2.9191919191919193e-05,
"loss": 2.4667,
"mean_token_accuracy": 0.8625558912754059,
"step": 211
},
{
"epoch": 1.696,
"grad_norm": 17.8482723236084,
"learning_rate": 2.909090909090909e-05,
"loss": 2.9457,
"mean_token_accuracy": 0.8457487970590591,
"step": 212
},
{
"epoch": 1.704,
"grad_norm": 18.403047561645508,
"learning_rate": 2.898989898989899e-05,
"loss": 2.7452,
"mean_token_accuracy": 0.8584460020065308,
"step": 213
},
{
"epoch": 1.712,
"grad_norm": 17.21164321899414,
"learning_rate": 2.8888888888888888e-05,
"loss": 2.4914,
"mean_token_accuracy": 0.8537319600582123,
"step": 214
},
{
"epoch": 1.72,
"grad_norm": 18.591094970703125,
"learning_rate": 2.878787878787879e-05,
"loss": 2.9312,
"mean_token_accuracy": 0.8463590443134308,
"step": 215
},
{
"epoch": 1.728,
"grad_norm": 17.793514251708984,
"learning_rate": 2.868686868686869e-05,
"loss": 2.653,
"mean_token_accuracy": 0.8544672429561615,
"step": 216
},
{
"epoch": 1.736,
"grad_norm": 16.645397186279297,
"learning_rate": 2.8585858585858587e-05,
"loss": 2.4487,
"mean_token_accuracy": 0.8595747947692871,
"step": 217
},
{
"epoch": 1.744,
"grad_norm": 18.782949447631836,
"learning_rate": 2.8484848484848486e-05,
"loss": 3.1283,
"mean_token_accuracy": 0.8335093259811401,
"step": 218
},
{
"epoch": 1.752,
"grad_norm": 19.578367233276367,
"learning_rate": 2.8383838383838386e-05,
"loss": 2.8753,
"mean_token_accuracy": 0.8500068634748459,
"step": 219
},
{
"epoch": 1.76,
"grad_norm": 20.041643142700195,
"learning_rate": 2.8282828282828282e-05,
"loss": 3.409,
"mean_token_accuracy": 0.8245423436164856,
"step": 220
},
{
"epoch": 1.768,
"grad_norm": 19.69051170349121,
"learning_rate": 2.818181818181818e-05,
"loss": 3.0326,
"mean_token_accuracy": 0.8391107320785522,
"step": 221
},
{
"epoch": 1.776,
"grad_norm": 18.727209091186523,
"learning_rate": 2.808080808080808e-05,
"loss": 2.8157,
"mean_token_accuracy": 0.8591902405023575,
"step": 222
},
{
"epoch": 1.784,
"grad_norm": 16.66358184814453,
"learning_rate": 2.7979797979797984e-05,
"loss": 2.7767,
"mean_token_accuracy": 0.8462460786104202,
"step": 223
},
{
"epoch": 1.792,
"grad_norm": 15.349605560302734,
"learning_rate": 2.7878787878787883e-05,
"loss": 2.3544,
"mean_token_accuracy": 0.8616818338632584,
"step": 224
},
{
"epoch": 1.8,
"grad_norm": 19.82066535949707,
"learning_rate": 2.777777777777778e-05,
"loss": 3.1691,
"mean_token_accuracy": 0.8447847366333008,
"step": 225
},
{
"epoch": 1.808,
"grad_norm": 20.39141845703125,
"learning_rate": 2.767676767676768e-05,
"loss": 2.9149,
"mean_token_accuracy": 0.8456564694643021,
"step": 226
},
{
"epoch": 1.8159999999999998,
"grad_norm": 18.638967514038086,
"learning_rate": 2.7575757575757578e-05,
"loss": 2.889,
"mean_token_accuracy": 0.8354881256818771,
"step": 227
},
{
"epoch": 1.8239999999999998,
"grad_norm": 17.088668823242188,
"learning_rate": 2.7474747474747474e-05,
"loss": 2.6078,
"mean_token_accuracy": 0.8517204225063324,
"step": 228
},
{
"epoch": 1.8319999999999999,
"grad_norm": 18.29672622680664,
"learning_rate": 2.7373737373737374e-05,
"loss": 3.0306,
"mean_token_accuracy": 0.8434745818376541,
"step": 229
},
{
"epoch": 1.8399999999999999,
"grad_norm": 19.094703674316406,
"learning_rate": 2.7272727272727273e-05,
"loss": 2.8052,
"mean_token_accuracy": 0.8540522456169128,
"step": 230
},
{
"epoch": 1.8479999999999999,
"grad_norm": 18.637916564941406,
"learning_rate": 2.717171717171717e-05,
"loss": 2.7584,
"mean_token_accuracy": 0.8538561910390854,
"step": 231
},
{
"epoch": 1.8559999999999999,
"grad_norm": 18.355613708496094,
"learning_rate": 2.7070707070707075e-05,
"loss": 3.1927,
"mean_token_accuracy": 0.8381341099739075,
"step": 232
},
{
"epoch": 1.8639999999999999,
"grad_norm": 24.347230911254883,
"learning_rate": 2.696969696969697e-05,
"loss": 3.353,
"mean_token_accuracy": 0.8150720745325089,
"step": 233
},
{
"epoch": 1.8719999999999999,
"grad_norm": 19.14170265197754,
"learning_rate": 2.686868686868687e-05,
"loss": 3.4434,
"mean_token_accuracy": 0.8221272975206375,
"step": 234
},
{
"epoch": 1.88,
"grad_norm": 16.074077606201172,
"learning_rate": 2.676767676767677e-05,
"loss": 2.6817,
"mean_token_accuracy": 0.856314018368721,
"step": 235
},
{
"epoch": 1.888,
"grad_norm": 20.647747039794922,
"learning_rate": 2.6666666666666667e-05,
"loss": 3.2522,
"mean_token_accuracy": 0.822910264134407,
"step": 236
},
{
"epoch": 1.896,
"grad_norm": 17.33372688293457,
"learning_rate": 2.6565656565656566e-05,
"loss": 2.878,
"mean_token_accuracy": 0.8544587790966034,
"step": 237
},
{
"epoch": 1.904,
"grad_norm": 21.383373260498047,
"learning_rate": 2.6464646464646466e-05,
"loss": 3.0065,
"mean_token_accuracy": 0.8334675282239914,
"step": 238
},
{
"epoch": 1.912,
"grad_norm": 17.983051300048828,
"learning_rate": 2.636363636363636e-05,
"loss": 2.6445,
"mean_token_accuracy": 0.8565590083599091,
"step": 239
},
{
"epoch": 1.92,
"grad_norm": 19.193273544311523,
"learning_rate": 2.6262626262626268e-05,
"loss": 2.8959,
"mean_token_accuracy": 0.8444470465183258,
"step": 240
},
{
"epoch": 1.928,
"grad_norm": 18.387603759765625,
"learning_rate": 2.6161616161616164e-05,
"loss": 3.031,
"mean_token_accuracy": 0.8487697094678879,
"step": 241
},
{
"epoch": 1.936,
"grad_norm": 17.07048988342285,
"learning_rate": 2.6060606060606063e-05,
"loss": 2.3949,
"mean_token_accuracy": 0.8728950917720795,
"step": 242
},
{
"epoch": 1.944,
"grad_norm": 16.93642234802246,
"learning_rate": 2.5959595959595963e-05,
"loss": 2.8926,
"mean_token_accuracy": 0.8463722467422485,
"step": 243
},
{
"epoch": 1.952,
"grad_norm": 18.29349136352539,
"learning_rate": 2.585858585858586e-05,
"loss": 3.0535,
"mean_token_accuracy": 0.8376583307981491,
"step": 244
},
{
"epoch": 1.96,
"grad_norm": 18.56781768798828,
"learning_rate": 2.575757575757576e-05,
"loss": 2.8888,
"mean_token_accuracy": 0.8465193212032318,
"step": 245
},
{
"epoch": 1.968,
"grad_norm": 18.541624069213867,
"learning_rate": 2.5656565656565658e-05,
"loss": 2.3475,
"mean_token_accuracy": 0.8656208217144012,
"step": 246
},
{
"epoch": 1.976,
"grad_norm": 17.897377014160156,
"learning_rate": 2.5555555555555554e-05,
"loss": 2.837,
"mean_token_accuracy": 0.8604338765144348,
"step": 247
},
{
"epoch": 1.984,
"grad_norm": 18.500314712524414,
"learning_rate": 2.5454545454545454e-05,
"loss": 2.6232,
"mean_token_accuracy": 0.859655350446701,
"step": 248
},
{
"epoch": 1.992,
"grad_norm": 18.447683334350586,
"learning_rate": 2.5353535353535356e-05,
"loss": 3.01,
"mean_token_accuracy": 0.8308267742395401,
"step": 249
},
{
"epoch": 2.0,
"grad_norm": 16.8708438873291,
"learning_rate": 2.5252525252525256e-05,
"loss": 2.81,
"mean_token_accuracy": 0.8507586419582367,
"step": 250
},
{
"epoch": 2.008,
"grad_norm": 14.083173751831055,
"learning_rate": 2.5151515151515155e-05,
"loss": 1.5615,
"mean_token_accuracy": 0.9069856852293015,
"step": 251
},
{
"epoch": 2.016,
"grad_norm": 15.285985946655273,
"learning_rate": 2.505050505050505e-05,
"loss": 1.7983,
"mean_token_accuracy": 0.8917003720998764,
"step": 252
},
{
"epoch": 2.024,
"grad_norm": 14.96445083618164,
"learning_rate": 2.494949494949495e-05,
"loss": 1.6866,
"mean_token_accuracy": 0.8935637921094894,
"step": 253
},
{
"epoch": 2.032,
"grad_norm": 15.746031761169434,
"learning_rate": 2.4848484848484847e-05,
"loss": 1.7632,
"mean_token_accuracy": 0.8932289183139801,
"step": 254
},
{
"epoch": 2.04,
"grad_norm": 16.720212936401367,
"learning_rate": 2.474747474747475e-05,
"loss": 1.7424,
"mean_token_accuracy": 0.8972940593957901,
"step": 255
},
{
"epoch": 2.048,
"grad_norm": 15.393232345581055,
"learning_rate": 2.464646464646465e-05,
"loss": 1.5677,
"mean_token_accuracy": 0.9046053141355515,
"step": 256
},
{
"epoch": 2.056,
"grad_norm": 17.94659996032715,
"learning_rate": 2.4545454545454545e-05,
"loss": 1.9221,
"mean_token_accuracy": 0.8920014947652817,
"step": 257
},
{
"epoch": 2.064,
"grad_norm": 18.840778350830078,
"learning_rate": 2.4444444444444445e-05,
"loss": 1.7952,
"mean_token_accuracy": 0.8805793821811676,
"step": 258
},
{
"epoch": 2.072,
"grad_norm": 17.553537368774414,
"learning_rate": 2.4343434343434344e-05,
"loss": 1.6415,
"mean_token_accuracy": 0.9035749733448029,
"step": 259
},
{
"epoch": 2.08,
"grad_norm": 16.594493865966797,
"learning_rate": 2.4242424242424244e-05,
"loss": 1.4085,
"mean_token_accuracy": 0.9104350358247757,
"step": 260
},
{
"epoch": 2.088,
"grad_norm": 18.47384262084961,
"learning_rate": 2.4141414141414143e-05,
"loss": 1.7969,
"mean_token_accuracy": 0.893615260720253,
"step": 261
},
{
"epoch": 2.096,
"grad_norm": 20.94403839111328,
"learning_rate": 2.404040404040404e-05,
"loss": 2.0246,
"mean_token_accuracy": 0.8807271867990494,
"step": 262
},
{
"epoch": 2.104,
"grad_norm": 18.5487117767334,
"learning_rate": 2.393939393939394e-05,
"loss": 1.5092,
"mean_token_accuracy": 0.9083298295736313,
"step": 263
},
{
"epoch": 2.112,
"grad_norm": 20.118314743041992,
"learning_rate": 2.3838383838383842e-05,
"loss": 1.7474,
"mean_token_accuracy": 0.8932338207960129,
"step": 264
},
{
"epoch": 2.12,
"grad_norm": 19.099689483642578,
"learning_rate": 2.3737373737373738e-05,
"loss": 1.8237,
"mean_token_accuracy": 0.8876322358846664,
"step": 265
},
{
"epoch": 2.128,
"grad_norm": 20.295059204101562,
"learning_rate": 2.3636363636363637e-05,
"loss": 1.9511,
"mean_token_accuracy": 0.8856483995914459,
"step": 266
},
{
"epoch": 2.136,
"grad_norm": 21.094022750854492,
"learning_rate": 2.3535353535353537e-05,
"loss": 1.7983,
"mean_token_accuracy": 0.8887585699558258,
"step": 267
},
{
"epoch": 2.144,
"grad_norm": 19.475961685180664,
"learning_rate": 2.3434343434343436e-05,
"loss": 1.8121,
"mean_token_accuracy": 0.8935129791498184,
"step": 268
},
{
"epoch": 2.152,
"grad_norm": 17.48745346069336,
"learning_rate": 2.3333333333333336e-05,
"loss": 1.6347,
"mean_token_accuracy": 0.901911199092865,
"step": 269
},
{
"epoch": 2.16,
"grad_norm": 19.021291732788086,
"learning_rate": 2.3232323232323232e-05,
"loss": 1.6945,
"mean_token_accuracy": 0.8956849575042725,
"step": 270
},
{
"epoch": 2.168,
"grad_norm": 18.498403549194336,
"learning_rate": 2.313131313131313e-05,
"loss": 1.8713,
"mean_token_accuracy": 0.8862475454807281,
"step": 271
},
{
"epoch": 2.176,
"grad_norm": 20.616243362426758,
"learning_rate": 2.3030303030303034e-05,
"loss": 1.9077,
"mean_token_accuracy": 0.8816216289997101,
"step": 272
},
{
"epoch": 2.184,
"grad_norm": 18.55655288696289,
"learning_rate": 2.292929292929293e-05,
"loss": 1.893,
"mean_token_accuracy": 0.8819779455661774,
"step": 273
},
{
"epoch": 2.192,
"grad_norm": 19.719331741333008,
"learning_rate": 2.282828282828283e-05,
"loss": 1.7443,
"mean_token_accuracy": 0.8999358415603638,
"step": 274
},
{
"epoch": 2.2,
"grad_norm": 15.951658248901367,
"learning_rate": 2.272727272727273e-05,
"loss": 1.5942,
"mean_token_accuracy": 0.9009282290935516,
"step": 275
},
{
"epoch": 2.208,
"grad_norm": 17.43150520324707,
"learning_rate": 2.262626262626263e-05,
"loss": 1.597,
"mean_token_accuracy": 0.8990740329027176,
"step": 276
},
{
"epoch": 2.216,
"grad_norm": 18.600112915039062,
"learning_rate": 2.2525252525252528e-05,
"loss": 1.8142,
"mean_token_accuracy": 0.891479030251503,
"step": 277
},
{
"epoch": 2.224,
"grad_norm": 18.331689834594727,
"learning_rate": 2.2424242424242424e-05,
"loss": 1.6484,
"mean_token_accuracy": 0.8858047872781754,
"step": 278
},
{
"epoch": 2.232,
"grad_norm": 17.937702178955078,
"learning_rate": 2.2323232323232324e-05,
"loss": 1.6012,
"mean_token_accuracy": 0.8977932035923004,
"step": 279
},
{
"epoch": 2.24,
"grad_norm": 18.553834915161133,
"learning_rate": 2.2222222222222223e-05,
"loss": 1.507,
"mean_token_accuracy": 0.9132246375083923,
"step": 280
},
{
"epoch": 2.248,
"grad_norm": 17.619672775268555,
"learning_rate": 2.2121212121212123e-05,
"loss": 1.5574,
"mean_token_accuracy": 0.903651237487793,
"step": 281
},
{
"epoch": 2.2560000000000002,
"grad_norm": 18.53679084777832,
"learning_rate": 2.2020202020202022e-05,
"loss": 1.8894,
"mean_token_accuracy": 0.8831851631402969,
"step": 282
},
{
"epoch": 2.2640000000000002,
"grad_norm": 18.827486038208008,
"learning_rate": 2.191919191919192e-05,
"loss": 1.8957,
"mean_token_accuracy": 0.8890838772058487,
"step": 283
},
{
"epoch": 2.2720000000000002,
"grad_norm": 18.129066467285156,
"learning_rate": 2.1818181818181818e-05,
"loss": 1.7498,
"mean_token_accuracy": 0.8904573619365692,
"step": 284
},
{
"epoch": 2.2800000000000002,
"grad_norm": 20.051956176757812,
"learning_rate": 2.171717171717172e-05,
"loss": 1.8336,
"mean_token_accuracy": 0.8877034038305283,
"step": 285
},
{
"epoch": 2.288,
"grad_norm": 18.455982208251953,
"learning_rate": 2.1616161616161617e-05,
"loss": 1.7406,
"mean_token_accuracy": 0.8882922381162643,
"step": 286
},
{
"epoch": 2.296,
"grad_norm": 16.913650512695312,
"learning_rate": 2.1515151515151516e-05,
"loss": 1.5838,
"mean_token_accuracy": 0.8983030468225479,
"step": 287
},
{
"epoch": 2.304,
"grad_norm": 16.81460952758789,
"learning_rate": 2.1414141414141416e-05,
"loss": 1.5973,
"mean_token_accuracy": 0.9012346267700195,
"step": 288
},
{
"epoch": 2.312,
"grad_norm": 18.44367790222168,
"learning_rate": 2.1313131313131315e-05,
"loss": 1.8431,
"mean_token_accuracy": 0.8918169885873795,
"step": 289
},
{
"epoch": 2.32,
"grad_norm": 17.587459564208984,
"learning_rate": 2.1212121212121215e-05,
"loss": 1.7482,
"mean_token_accuracy": 0.883071169257164,
"step": 290
},
{
"epoch": 2.328,
"grad_norm": 20.508304595947266,
"learning_rate": 2.111111111111111e-05,
"loss": 2.1301,
"mean_token_accuracy": 0.8722483068704605,
"step": 291
},
{
"epoch": 2.336,
"grad_norm": 21.067611694335938,
"learning_rate": 2.101010101010101e-05,
"loss": 1.8906,
"mean_token_accuracy": 0.8874034583568573,
"step": 292
},
{
"epoch": 2.344,
"grad_norm": 17.23782730102539,
"learning_rate": 2.090909090909091e-05,
"loss": 1.5809,
"mean_token_accuracy": 0.9070325046777725,
"step": 293
},
{
"epoch": 2.352,
"grad_norm": 17.424673080444336,
"learning_rate": 2.080808080808081e-05,
"loss": 1.7886,
"mean_token_accuracy": 0.8876490592956543,
"step": 294
},
{
"epoch": 2.36,
"grad_norm": 18.26118278503418,
"learning_rate": 2.070707070707071e-05,
"loss": 1.6298,
"mean_token_accuracy": 0.8885916471481323,
"step": 295
},
{
"epoch": 2.368,
"grad_norm": 19.218050003051758,
"learning_rate": 2.0606060606060608e-05,
"loss": 1.9432,
"mean_token_accuracy": 0.8848688006401062,
"step": 296
},
{
"epoch": 2.376,
"grad_norm": 18.870819091796875,
"learning_rate": 2.0505050505050504e-05,
"loss": 1.7683,
"mean_token_accuracy": 0.889656737446785,
"step": 297
},
{
"epoch": 2.384,
"grad_norm": 17.928733825683594,
"learning_rate": 2.0404040404040407e-05,
"loss": 1.8347,
"mean_token_accuracy": 0.8826514780521393,
"step": 298
},
{
"epoch": 2.392,
"grad_norm": 17.18740463256836,
"learning_rate": 2.0303030303030303e-05,
"loss": 1.7417,
"mean_token_accuracy": 0.8983327001333237,
"step": 299
},
{
"epoch": 2.4,
"grad_norm": 17.51845932006836,
"learning_rate": 2.0202020202020203e-05,
"loss": 1.5803,
"mean_token_accuracy": 0.9036453068256378,
"step": 300
},
{
"epoch": 2.408,
"grad_norm": 18.641651153564453,
"learning_rate": 2.0101010101010102e-05,
"loss": 1.9701,
"mean_token_accuracy": 0.8859322518110275,
"step": 301
},
{
"epoch": 2.416,
"grad_norm": 19.06338119506836,
"learning_rate": 2e-05,
"loss": 1.593,
"mean_token_accuracy": 0.8942733258008957,
"step": 302
},
{
"epoch": 2.424,
"grad_norm": 17.082141876220703,
"learning_rate": 1.98989898989899e-05,
"loss": 1.5682,
"mean_token_accuracy": 0.8980588763952255,
"step": 303
},
{
"epoch": 2.432,
"grad_norm": 18.56012725830078,
"learning_rate": 1.9797979797979797e-05,
"loss": 1.9395,
"mean_token_accuracy": 0.8804908245801926,
"step": 304
},
{
"epoch": 2.44,
"grad_norm": 20.00580406188965,
"learning_rate": 1.9696969696969697e-05,
"loss": 1.8976,
"mean_token_accuracy": 0.8813226372003555,
"step": 305
},
{
"epoch": 2.448,
"grad_norm": 17.66527557373047,
"learning_rate": 1.95959595959596e-05,
"loss": 1.7429,
"mean_token_accuracy": 0.8991499990224838,
"step": 306
},
{
"epoch": 2.456,
"grad_norm": 22.222915649414062,
"learning_rate": 1.9494949494949496e-05,
"loss": 2.0225,
"mean_token_accuracy": 0.8786364048719406,
"step": 307
},
{
"epoch": 2.464,
"grad_norm": 15.038418769836426,
"learning_rate": 1.9393939393939395e-05,
"loss": 1.5893,
"mean_token_accuracy": 0.9014905393123627,
"step": 308
},
{
"epoch": 2.472,
"grad_norm": 16.89600944519043,
"learning_rate": 1.9292929292929295e-05,
"loss": 1.7931,
"mean_token_accuracy": 0.8878951072692871,
"step": 309
},
{
"epoch": 2.48,
"grad_norm": 18.997501373291016,
"learning_rate": 1.919191919191919e-05,
"loss": 1.7395,
"mean_token_accuracy": 0.8750288486480713,
"step": 310
},
{
"epoch": 2.488,
"grad_norm": 17.06654930114746,
"learning_rate": 1.9090909090909094e-05,
"loss": 1.7309,
"mean_token_accuracy": 0.8866889774799347,
"step": 311
},
{
"epoch": 2.496,
"grad_norm": 16.174907684326172,
"learning_rate": 1.898989898989899e-05,
"loss": 1.6361,
"mean_token_accuracy": 0.8998618721961975,
"step": 312
},
{
"epoch": 2.504,
"grad_norm": 20.149904251098633,
"learning_rate": 1.888888888888889e-05,
"loss": 1.651,
"mean_token_accuracy": 0.894085094332695,
"step": 313
},
{
"epoch": 2.512,
"grad_norm": 17.874101638793945,
"learning_rate": 1.878787878787879e-05,
"loss": 1.651,
"mean_token_accuracy": 0.8896775543689728,
"step": 314
},
{
"epoch": 2.52,
"grad_norm": 18.547687530517578,
"learning_rate": 1.8686868686868688e-05,
"loss": 1.9905,
"mean_token_accuracy": 0.8854596465826035,
"step": 315
},
{
"epoch": 2.528,
"grad_norm": 19.14983367919922,
"learning_rate": 1.8585858585858588e-05,
"loss": 1.8654,
"mean_token_accuracy": 0.884912833571434,
"step": 316
},
{
"epoch": 2.536,
"grad_norm": 18.558279037475586,
"learning_rate": 1.8484848484848487e-05,
"loss": 1.7852,
"mean_token_accuracy": 0.8943625837564468,
"step": 317
},
{
"epoch": 2.544,
"grad_norm": 17.757083892822266,
"learning_rate": 1.8383838383838383e-05,
"loss": 1.5901,
"mean_token_accuracy": 0.9012947678565979,
"step": 318
},
{
"epoch": 2.552,
"grad_norm": 17.890913009643555,
"learning_rate": 1.8282828282828286e-05,
"loss": 1.8079,
"mean_token_accuracy": 0.8911891281604767,
"step": 319
},
{
"epoch": 2.56,
"grad_norm": 16.844127655029297,
"learning_rate": 1.8181818181818182e-05,
"loss": 1.6101,
"mean_token_accuracy": 0.9100003689527512,
"step": 320
},
{
"epoch": 2.568,
"grad_norm": 20.43346405029297,
"learning_rate": 1.808080808080808e-05,
"loss": 1.9681,
"mean_token_accuracy": 0.8885725736618042,
"step": 321
},
{
"epoch": 2.576,
"grad_norm": 19.34762191772461,
"learning_rate": 1.797979797979798e-05,
"loss": 1.9601,
"mean_token_accuracy": 0.8918892741203308,
"step": 322
},
{
"epoch": 2.584,
"grad_norm": 16.914316177368164,
"learning_rate": 1.787878787878788e-05,
"loss": 1.587,
"mean_token_accuracy": 0.8948457092046738,
"step": 323
},
{
"epoch": 2.592,
"grad_norm": 18.791858673095703,
"learning_rate": 1.777777777777778e-05,
"loss": 1.8718,
"mean_token_accuracy": 0.8815629184246063,
"step": 324
},
{
"epoch": 2.6,
"grad_norm": 17.461881637573242,
"learning_rate": 1.7676767676767676e-05,
"loss": 1.6116,
"mean_token_accuracy": 0.8986152410507202,
"step": 325
},
{
"epoch": 2.608,
"grad_norm": 19.637073516845703,
"learning_rate": 1.7575757575757576e-05,
"loss": 1.8468,
"mean_token_accuracy": 0.8987504243850708,
"step": 326
},
{
"epoch": 2.616,
"grad_norm": 19.572315216064453,
"learning_rate": 1.7474747474747475e-05,
"loss": 1.8924,
"mean_token_accuracy": 0.8777202218770981,
"step": 327
},
{
"epoch": 2.624,
"grad_norm": 19.34830665588379,
"learning_rate": 1.7373737373737375e-05,
"loss": 1.8247,
"mean_token_accuracy": 0.8945279717445374,
"step": 328
},
{
"epoch": 2.632,
"grad_norm": 16.744380950927734,
"learning_rate": 1.7272727272727274e-05,
"loss": 1.615,
"mean_token_accuracy": 0.8965769708156586,
"step": 329
},
{
"epoch": 2.64,
"grad_norm": 16.65348243713379,
"learning_rate": 1.7171717171717173e-05,
"loss": 1.7394,
"mean_token_accuracy": 0.8977630883455276,
"step": 330
},
{
"epoch": 2.648,
"grad_norm": 19.861833572387695,
"learning_rate": 1.707070707070707e-05,
"loss": 1.9065,
"mean_token_accuracy": 0.8806384950876236,
"step": 331
},
{
"epoch": 2.656,
"grad_norm": 18.7454891204834,
"learning_rate": 1.6969696969696972e-05,
"loss": 1.7408,
"mean_token_accuracy": 0.8817498087882996,
"step": 332
},
{
"epoch": 2.664,
"grad_norm": 18.72283935546875,
"learning_rate": 1.686868686868687e-05,
"loss": 1.6119,
"mean_token_accuracy": 0.8949918895959854,
"step": 333
},
{
"epoch": 2.672,
"grad_norm": 17.392744064331055,
"learning_rate": 1.6767676767676768e-05,
"loss": 1.7595,
"mean_token_accuracy": 0.8967541307210922,
"step": 334
},
{
"epoch": 2.68,
"grad_norm": 17.753887176513672,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.867,
"mean_token_accuracy": 0.8946125656366348,
"step": 335
},
{
"epoch": 2.6879999999999997,
"grad_norm": 15.396509170532227,
"learning_rate": 1.6565656565656567e-05,
"loss": 1.4533,
"mean_token_accuracy": 0.902470737695694,
"step": 336
},
{
"epoch": 2.6959999999999997,
"grad_norm": 17.420621871948242,
"learning_rate": 1.6464646464646466e-05,
"loss": 1.8612,
"mean_token_accuracy": 0.8907473981380463,
"step": 337
},
{
"epoch": 2.7039999999999997,
"grad_norm": 16.46539306640625,
"learning_rate": 1.6363636363636366e-05,
"loss": 1.8107,
"mean_token_accuracy": 0.8965497016906738,
"step": 338
},
{
"epoch": 2.7119999999999997,
"grad_norm": 18.55117416381836,
"learning_rate": 1.6262626262626262e-05,
"loss": 1.6489,
"mean_token_accuracy": 0.8972436189651489,
"step": 339
},
{
"epoch": 2.7199999999999998,
"grad_norm": 20.577390670776367,
"learning_rate": 1.6161616161616165e-05,
"loss": 1.7792,
"mean_token_accuracy": 0.8870959579944611,
"step": 340
},
{
"epoch": 2.7279999999999998,
"grad_norm": 16.724843978881836,
"learning_rate": 1.606060606060606e-05,
"loss": 1.7987,
"mean_token_accuracy": 0.896402895450592,
"step": 341
},
{
"epoch": 2.7359999999999998,
"grad_norm": 19.218793869018555,
"learning_rate": 1.595959595959596e-05,
"loss": 1.9317,
"mean_token_accuracy": 0.8903376162052155,
"step": 342
},
{
"epoch": 2.7439999999999998,
"grad_norm": 17.59774398803711,
"learning_rate": 1.585858585858586e-05,
"loss": 1.7176,
"mean_token_accuracy": 0.9004463106393814,
"step": 343
},
{
"epoch": 2.752,
"grad_norm": 18.768749237060547,
"learning_rate": 1.5757575757575756e-05,
"loss": 1.772,
"mean_token_accuracy": 0.8775418102741241,
"step": 344
},
{
"epoch": 2.76,
"grad_norm": 19.772340774536133,
"learning_rate": 1.565656565656566e-05,
"loss": 1.8746,
"mean_token_accuracy": 0.8900909423828125,
"step": 345
},
{
"epoch": 2.768,
"grad_norm": 17.75616455078125,
"learning_rate": 1.5555555555555555e-05,
"loss": 1.6445,
"mean_token_accuracy": 0.8957866132259369,
"step": 346
},
{
"epoch": 2.776,
"grad_norm": 17.134241104125977,
"learning_rate": 1.5454545454545454e-05,
"loss": 1.6659,
"mean_token_accuracy": 0.9030885994434357,
"step": 347
},
{
"epoch": 2.784,
"grad_norm": 16.95988655090332,
"learning_rate": 1.5353535353535354e-05,
"loss": 1.5641,
"mean_token_accuracy": 0.8961215913295746,
"step": 348
},
{
"epoch": 2.792,
"grad_norm": 18.894676208496094,
"learning_rate": 1.5252525252525255e-05,
"loss": 1.6115,
"mean_token_accuracy": 0.9038358479738235,
"step": 349
},
{
"epoch": 2.8,
"grad_norm": 18.057437896728516,
"learning_rate": 1.5151515151515153e-05,
"loss": 1.8048,
"mean_token_accuracy": 0.8895912170410156,
"step": 350
},
{
"epoch": 2.808,
"grad_norm": 16.97344398498535,
"learning_rate": 1.505050505050505e-05,
"loss": 1.7005,
"mean_token_accuracy": 0.9056167453527451,
"step": 351
},
{
"epoch": 2.816,
"grad_norm": 18.987558364868164,
"learning_rate": 1.494949494949495e-05,
"loss": 1.9392,
"mean_token_accuracy": 0.8795923292636871,
"step": 352
},
{
"epoch": 2.824,
"grad_norm": 19.48893165588379,
"learning_rate": 1.484848484848485e-05,
"loss": 1.884,
"mean_token_accuracy": 0.8816027939319611,
"step": 353
},
{
"epoch": 2.832,
"grad_norm": 16.16484260559082,
"learning_rate": 1.4747474747474749e-05,
"loss": 1.3967,
"mean_token_accuracy": 0.9068291187286377,
"step": 354
},
{
"epoch": 2.84,
"grad_norm": 16.221166610717773,
"learning_rate": 1.4646464646464647e-05,
"loss": 1.6643,
"mean_token_accuracy": 0.8908949345350266,
"step": 355
},
{
"epoch": 2.848,
"grad_norm": 20.419599533081055,
"learning_rate": 1.4545454545454545e-05,
"loss": 1.9597,
"mean_token_accuracy": 0.8768892884254456,
"step": 356
},
{
"epoch": 2.856,
"grad_norm": 21.398174285888672,
"learning_rate": 1.4444444444444444e-05,
"loss": 1.8647,
"mean_token_accuracy": 0.882171094417572,
"step": 357
},
{
"epoch": 2.864,
"grad_norm": 20.366880416870117,
"learning_rate": 1.4343434343434345e-05,
"loss": 1.7834,
"mean_token_accuracy": 0.8946562111377716,
"step": 358
},
{
"epoch": 2.872,
"grad_norm": 18.22726821899414,
"learning_rate": 1.4242424242424243e-05,
"loss": 1.7605,
"mean_token_accuracy": 0.901305228471756,
"step": 359
},
{
"epoch": 2.88,
"grad_norm": 19.751203536987305,
"learning_rate": 1.4141414141414141e-05,
"loss": 1.6872,
"mean_token_accuracy": 0.8890763968229294,
"step": 360
},
{
"epoch": 2.888,
"grad_norm": 15.955531120300293,
"learning_rate": 1.404040404040404e-05,
"loss": 1.6292,
"mean_token_accuracy": 0.9044130593538284,
"step": 361
},
{
"epoch": 2.896,
"grad_norm": 17.15973472595215,
"learning_rate": 1.3939393939393942e-05,
"loss": 1.5112,
"mean_token_accuracy": 0.9043047428131104,
"step": 362
},
{
"epoch": 2.904,
"grad_norm": 20.14824676513672,
"learning_rate": 1.383838383838384e-05,
"loss": 2.1377,
"mean_token_accuracy": 0.874171257019043,
"step": 363
},
{
"epoch": 2.912,
"grad_norm": 17.76668930053711,
"learning_rate": 1.3737373737373737e-05,
"loss": 1.815,
"mean_token_accuracy": 0.8899873048067093,
"step": 364
},
{
"epoch": 2.92,
"grad_norm": 17.47455596923828,
"learning_rate": 1.3636363636363637e-05,
"loss": 1.8602,
"mean_token_accuracy": 0.8958509713411331,
"step": 365
},
{
"epoch": 2.928,
"grad_norm": 18.125408172607422,
"learning_rate": 1.3535353535353538e-05,
"loss": 1.7344,
"mean_token_accuracy": 0.8926298767328262,
"step": 366
},
{
"epoch": 2.936,
"grad_norm": 18.787460327148438,
"learning_rate": 1.3434343434343436e-05,
"loss": 1.8846,
"mean_token_accuracy": 0.8847126960754395,
"step": 367
},
{
"epoch": 2.944,
"grad_norm": 17.53483009338379,
"learning_rate": 1.3333333333333333e-05,
"loss": 1.7037,
"mean_token_accuracy": 0.8964135348796844,
"step": 368
},
{
"epoch": 2.952,
"grad_norm": 17.428064346313477,
"learning_rate": 1.3232323232323233e-05,
"loss": 1.7683,
"mean_token_accuracy": 0.8908781260251999,
"step": 369
},
{
"epoch": 2.96,
"grad_norm": 17.09117889404297,
"learning_rate": 1.3131313131313134e-05,
"loss": 1.6467,
"mean_token_accuracy": 0.8997662663459778,
"step": 370
},
{
"epoch": 2.968,
"grad_norm": 18.157711029052734,
"learning_rate": 1.3030303030303032e-05,
"loss": 2.0013,
"mean_token_accuracy": 0.878508061170578,
"step": 371
},
{
"epoch": 2.976,
"grad_norm": 17.718358993530273,
"learning_rate": 1.292929292929293e-05,
"loss": 1.7484,
"mean_token_accuracy": 0.8951921463012695,
"step": 372
},
{
"epoch": 2.984,
"grad_norm": 19.314804077148438,
"learning_rate": 1.2828282828282829e-05,
"loss": 1.8593,
"mean_token_accuracy": 0.8909815549850464,
"step": 373
},
{
"epoch": 2.992,
"grad_norm": 15.990935325622559,
"learning_rate": 1.2727272727272727e-05,
"loss": 1.7345,
"mean_token_accuracy": 0.8998604416847229,
"step": 374
},
{
"epoch": 3.0,
"grad_norm": 17.464195251464844,
"learning_rate": 1.2626262626262628e-05,
"loss": 1.9088,
"mean_token_accuracy": 0.8819395452737808,
"step": 375
},
{
"epoch": 3.008,
"grad_norm": 11.721142768859863,
"learning_rate": 1.2525252525252526e-05,
"loss": 1.0405,
"mean_token_accuracy": 0.9373282045125961,
"step": 376
},
{
"epoch": 3.016,
"grad_norm": 14.365158081054688,
"learning_rate": 1.2424242424242424e-05,
"loss": 1.3254,
"mean_token_accuracy": 0.9172510653734207,
"step": 377
},
{
"epoch": 3.024,
"grad_norm": 12.863395690917969,
"learning_rate": 1.2323232323232325e-05,
"loss": 1.0233,
"mean_token_accuracy": 0.9359502047300339,
"step": 378
},
{
"epoch": 3.032,
"grad_norm": 13.299514770507812,
"learning_rate": 1.2222222222222222e-05,
"loss": 1.0952,
"mean_token_accuracy": 0.9341908991336823,
"step": 379
},
{
"epoch": 3.04,
"grad_norm": 15.089731216430664,
"learning_rate": 1.2121212121212122e-05,
"loss": 1.1399,
"mean_token_accuracy": 0.9282894879579544,
"step": 380
},
{
"epoch": 3.048,
"grad_norm": 13.675347328186035,
"learning_rate": 1.202020202020202e-05,
"loss": 1.1441,
"mean_token_accuracy": 0.9303115904331207,
"step": 381
},
{
"epoch": 3.056,
"grad_norm": 14.102622985839844,
"learning_rate": 1.1919191919191921e-05,
"loss": 1.0362,
"mean_token_accuracy": 0.9242476671934128,
"step": 382
},
{
"epoch": 3.064,
"grad_norm": 13.803597450256348,
"learning_rate": 1.1818181818181819e-05,
"loss": 1.0437,
"mean_token_accuracy": 0.9370378255844116,
"step": 383
},
{
"epoch": 3.072,
"grad_norm": 13.217544555664062,
"learning_rate": 1.1717171717171718e-05,
"loss": 0.9713,
"mean_token_accuracy": 0.9372242540121078,
"step": 384
},
{
"epoch": 3.08,
"grad_norm": 14.189465522766113,
"learning_rate": 1.1616161616161616e-05,
"loss": 0.947,
"mean_token_accuracy": 0.9372307360172272,
"step": 385
},
{
"epoch": 3.088,
"grad_norm": 18.668476104736328,
"learning_rate": 1.1515151515151517e-05,
"loss": 1.0271,
"mean_token_accuracy": 0.9349544644355774,
"step": 386
},
{
"epoch": 3.096,
"grad_norm": 15.80893325805664,
"learning_rate": 1.1414141414141415e-05,
"loss": 0.9894,
"mean_token_accuracy": 0.9350813329219818,
"step": 387
},
{
"epoch": 3.104,
"grad_norm": 13.34670639038086,
"learning_rate": 1.1313131313131314e-05,
"loss": 0.8648,
"mean_token_accuracy": 0.9458875060081482,
"step": 388
},
{
"epoch": 3.112,
"grad_norm": 13.572864532470703,
"learning_rate": 1.1212121212121212e-05,
"loss": 0.8647,
"mean_token_accuracy": 0.9380109906196594,
"step": 389
},
{
"epoch": 3.12,
"grad_norm": 14.114604949951172,
"learning_rate": 1.1111111111111112e-05,
"loss": 0.9754,
"mean_token_accuracy": 0.9376205205917358,
"step": 390
},
{
"epoch": 3.128,
"grad_norm": 17.51618003845215,
"learning_rate": 1.1010101010101011e-05,
"loss": 1.1896,
"mean_token_accuracy": 0.9313846081495285,
"step": 391
},
{
"epoch": 3.136,
"grad_norm": 16.113466262817383,
"learning_rate": 1.0909090909090909e-05,
"loss": 0.9537,
"mean_token_accuracy": 0.9409548491239548,
"step": 392
},
{
"epoch": 3.144,
"grad_norm": 16.324718475341797,
"learning_rate": 1.0808080808080808e-05,
"loss": 1.0058,
"mean_token_accuracy": 0.9435614198446274,
"step": 393
},
{
"epoch": 3.152,
"grad_norm": 14.306757926940918,
"learning_rate": 1.0707070707070708e-05,
"loss": 0.8795,
"mean_token_accuracy": 0.9390368461608887,
"step": 394
},
{
"epoch": 3.16,
"grad_norm": 17.119672775268555,
"learning_rate": 1.0606060606060607e-05,
"loss": 1.0024,
"mean_token_accuracy": 0.9306483864784241,
"step": 395
},
{
"epoch": 3.168,
"grad_norm": 16.017135620117188,
"learning_rate": 1.0505050505050505e-05,
"loss": 0.9437,
"mean_token_accuracy": 0.9379599690437317,
"step": 396
},
{
"epoch": 3.176,
"grad_norm": 14.418791770935059,
"learning_rate": 1.0404040404040405e-05,
"loss": 0.8662,
"mean_token_accuracy": 0.9417054504156113,
"step": 397
},
{
"epoch": 3.184,
"grad_norm": 18.20338249206543,
"learning_rate": 1.0303030303030304e-05,
"loss": 1.048,
"mean_token_accuracy": 0.9236321002244949,
"step": 398
},
{
"epoch": 3.192,
"grad_norm": 16.065502166748047,
"learning_rate": 1.0202020202020204e-05,
"loss": 0.9848,
"mean_token_accuracy": 0.9392602741718292,
"step": 399
},
{
"epoch": 3.2,
"grad_norm": 15.719825744628906,
"learning_rate": 1.0101010101010101e-05,
"loss": 0.9635,
"mean_token_accuracy": 0.9424310624599457,
"step": 400
},
{
"epoch": 3.208,
"grad_norm": 17.026273727416992,
"learning_rate": 1e-05,
"loss": 1.0384,
"mean_token_accuracy": 0.9312147498130798,
"step": 401
},
{
"epoch": 3.216,
"grad_norm": 15.657279014587402,
"learning_rate": 9.898989898989899e-06,
"loss": 0.9847,
"mean_token_accuracy": 0.9377696067094803,
"step": 402
},
{
"epoch": 3.224,
"grad_norm": 15.536697387695312,
"learning_rate": 9.7979797979798e-06,
"loss": 0.8828,
"mean_token_accuracy": 0.9396137297153473,
"step": 403
},
{
"epoch": 3.232,
"grad_norm": 15.973769187927246,
"learning_rate": 9.696969696969698e-06,
"loss": 1.0313,
"mean_token_accuracy": 0.9414243400096893,
"step": 404
},
{
"epoch": 3.24,
"grad_norm": 17.689178466796875,
"learning_rate": 9.595959595959595e-06,
"loss": 1.1151,
"mean_token_accuracy": 0.9186365902423859,
"step": 405
},
{
"epoch": 3.248,
"grad_norm": 16.18196678161621,
"learning_rate": 9.494949494949495e-06,
"loss": 1.0122,
"mean_token_accuracy": 0.9342964887619019,
"step": 406
},
{
"epoch": 3.2560000000000002,
"grad_norm": 14.099705696105957,
"learning_rate": 9.393939393939394e-06,
"loss": 0.9217,
"mean_token_accuracy": 0.9330043792724609,
"step": 407
},
{
"epoch": 3.2640000000000002,
"grad_norm": 16.102678298950195,
"learning_rate": 9.292929292929294e-06,
"loss": 1.0442,
"mean_token_accuracy": 0.9256764650344849,
"step": 408
},
{
"epoch": 3.2720000000000002,
"grad_norm": 16.18111228942871,
"learning_rate": 9.191919191919192e-06,
"loss": 0.9579,
"mean_token_accuracy": 0.92878857254982,
"step": 409
},
{
"epoch": 3.2800000000000002,
"grad_norm": 16.256258010864258,
"learning_rate": 9.090909090909091e-06,
"loss": 1.0232,
"mean_token_accuracy": 0.9233647882938385,
"step": 410
},
{
"epoch": 3.288,
"grad_norm": 15.954553604125977,
"learning_rate": 8.98989898989899e-06,
"loss": 0.9598,
"mean_token_accuracy": 0.9369918555021286,
"step": 411
},
{
"epoch": 3.296,
"grad_norm": 16.494140625,
"learning_rate": 8.88888888888889e-06,
"loss": 1.0289,
"mean_token_accuracy": 0.9376811236143112,
"step": 412
},
{
"epoch": 3.304,
"grad_norm": 14.779414176940918,
"learning_rate": 8.787878787878788e-06,
"loss": 0.9465,
"mean_token_accuracy": 0.9370988309383392,
"step": 413
},
{
"epoch": 3.312,
"grad_norm": 15.431049346923828,
"learning_rate": 8.686868686868687e-06,
"loss": 0.9653,
"mean_token_accuracy": 0.9339174628257751,
"step": 414
},
{
"epoch": 3.32,
"grad_norm": 15.794412612915039,
"learning_rate": 8.585858585858587e-06,
"loss": 1.0466,
"mean_token_accuracy": 0.9339481443166733,
"step": 415
},
{
"epoch": 3.328,
"grad_norm": 17.15564727783203,
"learning_rate": 8.484848484848486e-06,
"loss": 1.1203,
"mean_token_accuracy": 0.9287491142749786,
"step": 416
},
{
"epoch": 3.336,
"grad_norm": 16.112878799438477,
"learning_rate": 8.383838383838384e-06,
"loss": 1.0063,
"mean_token_accuracy": 0.9275001287460327,
"step": 417
},
{
"epoch": 3.344,
"grad_norm": 15.973852157592773,
"learning_rate": 8.282828282828283e-06,
"loss": 1.0703,
"mean_token_accuracy": 0.92690809071064,
"step": 418
},
{
"epoch": 3.352,
"grad_norm": 14.684673309326172,
"learning_rate": 8.181818181818183e-06,
"loss": 0.9457,
"mean_token_accuracy": 0.9447111636400223,
"step": 419
},
{
"epoch": 3.36,
"grad_norm": 15.560981750488281,
"learning_rate": 8.080808080808082e-06,
"loss": 1.0273,
"mean_token_accuracy": 0.934164434671402,
"step": 420
},
{
"epoch": 3.368,
"grad_norm": 15.820408821105957,
"learning_rate": 7.97979797979798e-06,
"loss": 0.976,
"mean_token_accuracy": 0.9367633759975433,
"step": 421
},
{
"epoch": 3.376,
"grad_norm": 14.710906982421875,
"learning_rate": 7.878787878787878e-06,
"loss": 0.9833,
"mean_token_accuracy": 0.9340205192565918,
"step": 422
},
{
"epoch": 3.384,
"grad_norm": 14.58287239074707,
"learning_rate": 7.777777777777777e-06,
"loss": 0.9453,
"mean_token_accuracy": 0.9295031130313873,
"step": 423
},
{
"epoch": 3.392,
"grad_norm": 15.10856819152832,
"learning_rate": 7.676767676767677e-06,
"loss": 0.9536,
"mean_token_accuracy": 0.9329349398612976,
"step": 424
},
{
"epoch": 3.4,
"grad_norm": 12.695680618286133,
"learning_rate": 7.5757575757575764e-06,
"loss": 0.8262,
"mean_token_accuracy": 0.9397930353879929,
"step": 425
},
{
"epoch": 3.408,
"grad_norm": 16.12411880493164,
"learning_rate": 7.474747474747475e-06,
"loss": 1.1105,
"mean_token_accuracy": 0.9315387904644012,
"step": 426
},
{
"epoch": 3.416,
"grad_norm": 15.864951133728027,
"learning_rate": 7.3737373737373745e-06,
"loss": 0.9715,
"mean_token_accuracy": 0.9408050626516342,
"step": 427
},
{
"epoch": 3.424,
"grad_norm": 14.210537910461426,
"learning_rate": 7.272727272727272e-06,
"loss": 1.0179,
"mean_token_accuracy": 0.933457687497139,
"step": 428
},
{
"epoch": 3.432,
"grad_norm": 13.187994003295898,
"learning_rate": 7.171717171717173e-06,
"loss": 0.912,
"mean_token_accuracy": 0.9418987780809402,
"step": 429
},
{
"epoch": 3.44,
"grad_norm": 17.883209228515625,
"learning_rate": 7.0707070707070704e-06,
"loss": 1.1905,
"mean_token_accuracy": 0.9247805923223495,
"step": 430
},
{
"epoch": 3.448,
"grad_norm": 14.041234016418457,
"learning_rate": 6.969696969696971e-06,
"loss": 0.9638,
"mean_token_accuracy": 0.9382515847682953,
"step": 431
},
{
"epoch": 3.456,
"grad_norm": 14.920849800109863,
"learning_rate": 6.8686868686868685e-06,
"loss": 1.0225,
"mean_token_accuracy": 0.9311228841543198,
"step": 432
},
{
"epoch": 3.464,
"grad_norm": 14.72715950012207,
"learning_rate": 6.767676767676769e-06,
"loss": 1.0436,
"mean_token_accuracy": 0.9386313855648041,
"step": 433
},
{
"epoch": 3.472,
"grad_norm": 14.456624984741211,
"learning_rate": 6.666666666666667e-06,
"loss": 0.976,
"mean_token_accuracy": 0.9332603514194489,
"step": 434
},
{
"epoch": 3.48,
"grad_norm": 14.296424865722656,
"learning_rate": 6.565656565656567e-06,
"loss": 0.9394,
"mean_token_accuracy": 0.9352370202541351,
"step": 435
},
{
"epoch": 3.488,
"grad_norm": 14.372357368469238,
"learning_rate": 6.464646464646465e-06,
"loss": 1.0074,
"mean_token_accuracy": 0.9338279366493225,
"step": 436
},
{
"epoch": 3.496,
"grad_norm": 14.993144989013672,
"learning_rate": 6.363636363636363e-06,
"loss": 1.028,
"mean_token_accuracy": 0.932237908244133,
"step": 437
},
{
"epoch": 3.504,
"grad_norm": 15.180071830749512,
"learning_rate": 6.262626262626263e-06,
"loss": 0.949,
"mean_token_accuracy": 0.937222346663475,
"step": 438
},
{
"epoch": 3.512,
"grad_norm": 16.305255889892578,
"learning_rate": 6.161616161616162e-06,
"loss": 1.0678,
"mean_token_accuracy": 0.9322217702865601,
"step": 439
},
{
"epoch": 3.52,
"grad_norm": 14.723209381103516,
"learning_rate": 6.060606060606061e-06,
"loss": 0.9263,
"mean_token_accuracy": 0.943725198507309,
"step": 440
},
{
"epoch": 3.528,
"grad_norm": 13.980120658874512,
"learning_rate": 5.9595959595959605e-06,
"loss": 0.8992,
"mean_token_accuracy": 0.93913933634758,
"step": 441
},
{
"epoch": 3.536,
"grad_norm": 15.935212135314941,
"learning_rate": 5.858585858585859e-06,
"loss": 1.0253,
"mean_token_accuracy": 0.9386613219976425,
"step": 442
},
{
"epoch": 3.544,
"grad_norm": 13.340989112854004,
"learning_rate": 5.7575757575757586e-06,
"loss": 0.903,
"mean_token_accuracy": 0.9501822739839554,
"step": 443
},
{
"epoch": 3.552,
"grad_norm": 15.33117389678955,
"learning_rate": 5.656565656565657e-06,
"loss": 0.9156,
"mean_token_accuracy": 0.9392779171466827,
"step": 444
},
{
"epoch": 3.56,
"grad_norm": 15.94556999206543,
"learning_rate": 5.555555555555556e-06,
"loss": 1.0368,
"mean_token_accuracy": 0.9274450391530991,
"step": 445
},
{
"epoch": 3.568,
"grad_norm": 16.81086540222168,
"learning_rate": 5.4545454545454545e-06,
"loss": 0.9722,
"mean_token_accuracy": 0.929038405418396,
"step": 446
},
{
"epoch": 3.576,
"grad_norm": 14.2332124710083,
"learning_rate": 5.353535353535354e-06,
"loss": 0.8955,
"mean_token_accuracy": 0.9410098195075989,
"step": 447
},
{
"epoch": 3.584,
"grad_norm": 16.48716926574707,
"learning_rate": 5.2525252525252526e-06,
"loss": 1.0025,
"mean_token_accuracy": 0.9347028732299805,
"step": 448
},
{
"epoch": 3.592,
"grad_norm": 14.993062019348145,
"learning_rate": 5.151515151515152e-06,
"loss": 0.943,
"mean_token_accuracy": 0.940876841545105,
"step": 449
},
{
"epoch": 3.6,
"grad_norm": 15.412555694580078,
"learning_rate": 5.050505050505051e-06,
"loss": 1.019,
"mean_token_accuracy": 0.9378762990236282,
"step": 450
},
{
"epoch": 3.608,
"grad_norm": 14.794282913208008,
"learning_rate": 4.949494949494949e-06,
"loss": 0.9683,
"mean_token_accuracy": 0.9323435574769974,
"step": 451
},
{
"epoch": 3.616,
"grad_norm": 14.959280967712402,
"learning_rate": 4.848484848484849e-06,
"loss": 1.0428,
"mean_token_accuracy": 0.9346833378076553,
"step": 452
},
{
"epoch": 3.624,
"grad_norm": 17.50050926208496,
"learning_rate": 4.747474747474747e-06,
"loss": 1.0809,
"mean_token_accuracy": 0.9283154457807541,
"step": 453
},
{
"epoch": 3.632,
"grad_norm": 14.101909637451172,
"learning_rate": 4.646464646464647e-06,
"loss": 0.9455,
"mean_token_accuracy": 0.9361487329006195,
"step": 454
},
{
"epoch": 3.64,
"grad_norm": 14.593957901000977,
"learning_rate": 4.5454545454545455e-06,
"loss": 0.9624,
"mean_token_accuracy": 0.9361502379179001,
"step": 455
},
{
"epoch": 3.648,
"grad_norm": 15.813642501831055,
"learning_rate": 4.444444444444445e-06,
"loss": 0.9207,
"mean_token_accuracy": 0.939594179391861,
"step": 456
},
{
"epoch": 3.656,
"grad_norm": 13.442476272583008,
"learning_rate": 4.343434343434344e-06,
"loss": 0.8886,
"mean_token_accuracy": 0.938508003950119,
"step": 457
},
{
"epoch": 3.664,
"grad_norm": 18.873268127441406,
"learning_rate": 4.242424242424243e-06,
"loss": 1.1159,
"mean_token_accuracy": 0.9257403612136841,
"step": 458
},
{
"epoch": 3.672,
"grad_norm": 14.351343154907227,
"learning_rate": 4.141414141414142e-06,
"loss": 0.9803,
"mean_token_accuracy": 0.9281805902719498,
"step": 459
},
{
"epoch": 3.68,
"grad_norm": 17.297321319580078,
"learning_rate": 4.040404040404041e-06,
"loss": 1.0052,
"mean_token_accuracy": 0.9295466840267181,
"step": 460
},
{
"epoch": 3.6879999999999997,
"grad_norm": 21.608966827392578,
"learning_rate": 3.939393939393939e-06,
"loss": 1.013,
"mean_token_accuracy": 0.9357683062553406,
"step": 461
},
{
"epoch": 3.6959999999999997,
"grad_norm": 16.225805282592773,
"learning_rate": 3.8383838383838385e-06,
"loss": 1.0466,
"mean_token_accuracy": 0.9302550107240677,
"step": 462
},
{
"epoch": 3.7039999999999997,
"grad_norm": 14.666239738464355,
"learning_rate": 3.7373737373737375e-06,
"loss": 0.9911,
"mean_token_accuracy": 0.9366718828678131,
"step": 463
},
{
"epoch": 3.7119999999999997,
"grad_norm": 15.249946594238281,
"learning_rate": 3.636363636363636e-06,
"loss": 1.0193,
"mean_token_accuracy": 0.9248189479112625,
"step": 464
},
{
"epoch": 3.7199999999999998,
"grad_norm": 15.235973358154297,
"learning_rate": 3.5353535353535352e-06,
"loss": 1.0371,
"mean_token_accuracy": 0.9370195269584656,
"step": 465
},
{
"epoch": 3.7279999999999998,
"grad_norm": 15.745083808898926,
"learning_rate": 3.4343434343434343e-06,
"loss": 0.951,
"mean_token_accuracy": 0.9338164031505585,
"step": 466
},
{
"epoch": 3.7359999999999998,
"grad_norm": 14.804433822631836,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.0366,
"mean_token_accuracy": 0.9359022080898285,
"step": 467
},
{
"epoch": 3.7439999999999998,
"grad_norm": 13.7555570602417,
"learning_rate": 3.2323232323232324e-06,
"loss": 0.977,
"mean_token_accuracy": 0.9416959583759308,
"step": 468
},
{
"epoch": 3.752,
"grad_norm": 14.858553886413574,
"learning_rate": 3.1313131313131314e-06,
"loss": 0.9739,
"mean_token_accuracy": 0.9415158182382584,
"step": 469
},
{
"epoch": 3.76,
"grad_norm": 16.706321716308594,
"learning_rate": 3.0303030303030305e-06,
"loss": 1.0242,
"mean_token_accuracy": 0.9358144998550415,
"step": 470
},
{
"epoch": 3.768,
"grad_norm": 15.634292602539062,
"learning_rate": 2.9292929292929295e-06,
"loss": 1.0251,
"mean_token_accuracy": 0.9278706014156342,
"step": 471
},
{
"epoch": 3.776,
"grad_norm": 13.777891159057617,
"learning_rate": 2.8282828282828286e-06,
"loss": 0.8874,
"mean_token_accuracy": 0.9410149455070496,
"step": 472
},
{
"epoch": 3.784,
"grad_norm": 16.45195198059082,
"learning_rate": 2.7272727272727272e-06,
"loss": 0.9863,
"mean_token_accuracy": 0.9311119616031647,
"step": 473
},
{
"epoch": 3.792,
"grad_norm": 16.67084312438965,
"learning_rate": 2.6262626262626263e-06,
"loss": 0.9869,
"mean_token_accuracy": 0.9290085285902023,
"step": 474
},
{
"epoch": 3.8,
"grad_norm": 16.300039291381836,
"learning_rate": 2.5252525252525253e-06,
"loss": 1.0872,
"mean_token_accuracy": 0.9302412569522858,
"step": 475
},
{
"epoch": 3.808,
"grad_norm": 13.501500129699707,
"learning_rate": 2.4242424242424244e-06,
"loss": 0.917,
"mean_token_accuracy": 0.9408380538225174,
"step": 476
},
{
"epoch": 3.816,
"grad_norm": 16.53856658935547,
"learning_rate": 2.3232323232323234e-06,
"loss": 1.1446,
"mean_token_accuracy": 0.9272817373275757,
"step": 477
},
{
"epoch": 3.824,
"grad_norm": 14.656960487365723,
"learning_rate": 2.2222222222222225e-06,
"loss": 0.8935,
"mean_token_accuracy": 0.9378921240568161,
"step": 478
},
{
"epoch": 3.832,
"grad_norm": 14.75877857208252,
"learning_rate": 2.1212121212121216e-06,
"loss": 0.9782,
"mean_token_accuracy": 0.9403275102376938,
"step": 479
},
{
"epoch": 3.84,
"grad_norm": 15.582100868225098,
"learning_rate": 2.0202020202020206e-06,
"loss": 1.0506,
"mean_token_accuracy": 0.9281135201454163,
"step": 480
},
{
"epoch": 3.848,
"grad_norm": 16.705493927001953,
"learning_rate": 1.9191919191919192e-06,
"loss": 1.0155,
"mean_token_accuracy": 0.9309724718332291,
"step": 481
},
{
"epoch": 3.856,
"grad_norm": 14.00627326965332,
"learning_rate": 1.818181818181818e-06,
"loss": 0.9561,
"mean_token_accuracy": 0.9410349428653717,
"step": 482
},
{
"epoch": 3.864,
"grad_norm": 16.71605682373047,
"learning_rate": 1.7171717171717171e-06,
"loss": 1.0531,
"mean_token_accuracy": 0.9354703277349472,
"step": 483
},
{
"epoch": 3.872,
"grad_norm": 12.789509773254395,
"learning_rate": 1.6161616161616162e-06,
"loss": 0.8623,
"mean_token_accuracy": 0.9429314136505127,
"step": 484
},
{
"epoch": 3.88,
"grad_norm": 13.605375289916992,
"learning_rate": 1.5151515151515152e-06,
"loss": 1.017,
"mean_token_accuracy": 0.9328679293394089,
"step": 485
},
{
"epoch": 3.888,
"grad_norm": 15.087848663330078,
"learning_rate": 1.4141414141414143e-06,
"loss": 1.0554,
"mean_token_accuracy": 0.9394855350255966,
"step": 486
},
{
"epoch": 3.896,
"grad_norm": 15.166902542114258,
"learning_rate": 1.3131313131313131e-06,
"loss": 0.9055,
"mean_token_accuracy": 0.9326141625642776,
"step": 487
},
{
"epoch": 3.904,
"grad_norm": 15.783126831054688,
"learning_rate": 1.2121212121212122e-06,
"loss": 0.9401,
"mean_token_accuracy": 0.9369719177484512,
"step": 488
},
{
"epoch": 3.912,
"grad_norm": 15.481629371643066,
"learning_rate": 1.1111111111111112e-06,
"loss": 0.9853,
"mean_token_accuracy": 0.9318475723266602,
"step": 489
},
{
"epoch": 3.92,
"grad_norm": 16.257150650024414,
"learning_rate": 1.0101010101010103e-06,
"loss": 1.0291,
"mean_token_accuracy": 0.9357573091983795,
"step": 490
},
{
"epoch": 3.928,
"grad_norm": 14.175272941589355,
"learning_rate": 9.09090909090909e-07,
"loss": 0.8596,
"mean_token_accuracy": 0.9388793110847473,
"step": 491
},
{
"epoch": 3.936,
"grad_norm": 14.242705345153809,
"learning_rate": 8.080808080808081e-07,
"loss": 0.9267,
"mean_token_accuracy": 0.9343722760677338,
"step": 492
},
{
"epoch": 3.944,
"grad_norm": 17.908700942993164,
"learning_rate": 7.070707070707071e-07,
"loss": 1.0493,
"mean_token_accuracy": 0.923067107796669,
"step": 493
},
{
"epoch": 3.952,
"grad_norm": 14.200380325317383,
"learning_rate": 6.060606060606061e-07,
"loss": 0.9237,
"mean_token_accuracy": 0.9427360892295837,
"step": 494
},
{
"epoch": 3.96,
"grad_norm": 14.010753631591797,
"learning_rate": 5.050505050505052e-07,
"loss": 0.8624,
"mean_token_accuracy": 0.9410801976919174,
"step": 495
},
{
"epoch": 3.968,
"grad_norm": 17.56831169128418,
"learning_rate": 4.0404040404040405e-07,
"loss": 1.002,
"mean_token_accuracy": 0.9286356568336487,
"step": 496
},
{
"epoch": 3.976,
"grad_norm": 12.637655258178711,
"learning_rate": 3.0303030303030305e-07,
"loss": 0.8658,
"mean_token_accuracy": 0.9444387257099152,
"step": 497
},
{
"epoch": 3.984,
"grad_norm": 17.883581161499023,
"learning_rate": 2.0202020202020202e-07,
"loss": 1.068,
"mean_token_accuracy": 0.9314180314540863,
"step": 498
},
{
"epoch": 3.992,
"grad_norm": 15.118261337280273,
"learning_rate": 1.0101010101010101e-07,
"loss": 1.0254,
"mean_token_accuracy": 0.9345874488353729,
"step": 499
},
{
"epoch": 4.0,
"grad_norm": 14.052021026611328,
"learning_rate": 0.0,
"loss": 1.0111,
"mean_token_accuracy": 0.9361301213502884,
"step": 500
}
],
"logging_steps": 1,
"max_steps": 500,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8795704000512000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}