Kyla / trainer_state.json
IIEleven11's picture
Upload folder using huggingface_hub
effa0ab verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 41.328125,
"eval_steps": 500,
"global_step": 2645,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0,
"eval_runtime": 2.9591,
"eval_samples_per_second": 0.338,
"eval_steps_per_second": 0.338,
"step": 0
},
{
"epoch": 0.15625,
"grad_norm": 8.098007202148438,
"learning_rate": 4.000000000000001e-06,
"loss": 8.5142,
"step": 10
},
{
"epoch": 0.3125,
"grad_norm": 6.856651306152344,
"learning_rate": 9e-06,
"loss": 7.9708,
"step": 20
},
{
"epoch": 0.46875,
"grad_norm": 5.80125093460083,
"learning_rate": 1.4000000000000001e-05,
"loss": 8.01,
"step": 30
},
{
"epoch": 0.625,
"grad_norm": 5.687861442565918,
"learning_rate": 1.9e-05,
"loss": 7.6212,
"step": 40
},
{
"epoch": 0.78125,
"grad_norm": 6.117136478424072,
"learning_rate": 2.4e-05,
"loss": 7.3311,
"step": 50
},
{
"epoch": 0.9375,
"grad_norm": 5.861077785491943,
"learning_rate": 2.9e-05,
"loss": 7.5553,
"step": 60
},
{
"epoch": 1.09375,
"grad_norm": 6.021880149841309,
"learning_rate": 3.4000000000000007e-05,
"loss": 7.05,
"step": 70
},
{
"epoch": 1.25,
"grad_norm": 7.620683670043945,
"learning_rate": 3.9000000000000006e-05,
"loss": 6.7186,
"step": 80
},
{
"epoch": 1.40625,
"grad_norm": 7.098018169403076,
"learning_rate": 4.4000000000000006e-05,
"loss": 6.5668,
"step": 90
},
{
"epoch": 1.5625,
"grad_norm": 7.157684326171875,
"learning_rate": 4.9e-05,
"loss": 6.6148,
"step": 100
},
{
"epoch": 1.71875,
"grad_norm": 7.009696006774902,
"learning_rate": 4.9936507936507936e-05,
"loss": 6.5017,
"step": 110
},
{
"epoch": 1.875,
"grad_norm": 7.6622467041015625,
"learning_rate": 4.985714285714286e-05,
"loss": 6.53,
"step": 120
},
{
"epoch": 2.03125,
"grad_norm": 8.221333503723145,
"learning_rate": 4.977777777777778e-05,
"loss": 6.306,
"step": 130
},
{
"epoch": 2.1875,
"grad_norm": 8.832132339477539,
"learning_rate": 4.96984126984127e-05,
"loss": 4.8787,
"step": 140
},
{
"epoch": 2.34375,
"grad_norm": 9.245682716369629,
"learning_rate": 4.961904761904762e-05,
"loss": 4.7796,
"step": 150
},
{
"epoch": 2.5,
"grad_norm": 9.249746322631836,
"learning_rate": 4.953968253968254e-05,
"loss": 4.79,
"step": 160
},
{
"epoch": 2.65625,
"grad_norm": 8.098647117614746,
"learning_rate": 4.9460317460317465e-05,
"loss": 4.8157,
"step": 170
},
{
"epoch": 2.8125,
"grad_norm": 9.594801902770996,
"learning_rate": 4.9380952380952386e-05,
"loss": 4.5135,
"step": 180
},
{
"epoch": 2.96875,
"grad_norm": 9.500739097595215,
"learning_rate": 4.930158730158731e-05,
"loss": 4.9346,
"step": 190
},
{
"epoch": 3.125,
"grad_norm": 10.759628295898438,
"learning_rate": 4.922222222222222e-05,
"loss": 3.4195,
"step": 200
},
{
"epoch": 3.28125,
"grad_norm": 12.300085067749023,
"learning_rate": 4.9142857142857144e-05,
"loss": 3.136,
"step": 210
},
{
"epoch": 3.4375,
"grad_norm": 9.825932502746582,
"learning_rate": 4.9063492063492065e-05,
"loss": 3.0981,
"step": 220
},
{
"epoch": 3.59375,
"grad_norm": 10.764175415039062,
"learning_rate": 4.898412698412699e-05,
"loss": 3.0372,
"step": 230
},
{
"epoch": 3.75,
"grad_norm": 10.330010414123535,
"learning_rate": 4.890476190476191e-05,
"loss": 3.0934,
"step": 240
},
{
"epoch": 3.90625,
"grad_norm": 10.405282020568848,
"learning_rate": 4.882539682539683e-05,
"loss": 3.135,
"step": 250
},
{
"epoch": 4.0625,
"grad_norm": 9.842012405395508,
"learning_rate": 4.874603174603175e-05,
"loss": 2.7457,
"step": 260
},
{
"epoch": 4.21875,
"grad_norm": 10.659170150756836,
"learning_rate": 4.866666666666667e-05,
"loss": 1.981,
"step": 270
},
{
"epoch": 4.375,
"grad_norm": 9.820462226867676,
"learning_rate": 4.858730158730159e-05,
"loss": 2.0881,
"step": 280
},
{
"epoch": 4.53125,
"grad_norm": 9.645284652709961,
"learning_rate": 4.850793650793651e-05,
"loss": 1.9438,
"step": 290
},
{
"epoch": 4.6875,
"grad_norm": 9.881282806396484,
"learning_rate": 4.842857142857143e-05,
"loss": 1.8556,
"step": 300
},
{
"epoch": 4.84375,
"grad_norm": 10.5478515625,
"learning_rate": 4.834920634920635e-05,
"loss": 2.0494,
"step": 310
},
{
"epoch": 5.0,
"grad_norm": 19.77553939819336,
"learning_rate": 4.8269841269841274e-05,
"loss": 2.0695,
"step": 320
},
{
"epoch": 5.15625,
"grad_norm": 8.124504089355469,
"learning_rate": 4.819047619047619e-05,
"loss": 1.1424,
"step": 330
},
{
"epoch": 5.3125,
"grad_norm": 9.63291072845459,
"learning_rate": 4.811111111111111e-05,
"loss": 1.1688,
"step": 340
},
{
"epoch": 5.46875,
"grad_norm": 8.620952606201172,
"learning_rate": 4.803174603174603e-05,
"loss": 1.2346,
"step": 350
},
{
"epoch": 5.625,
"grad_norm": 8.060315132141113,
"learning_rate": 4.795238095238095e-05,
"loss": 1.176,
"step": 360
},
{
"epoch": 5.78125,
"grad_norm": 7.583358287811279,
"learning_rate": 4.7873015873015874e-05,
"loss": 1.2118,
"step": 370
},
{
"epoch": 5.9375,
"grad_norm": 10.535112380981445,
"learning_rate": 4.7793650793650796e-05,
"loss": 1.2446,
"step": 380
},
{
"epoch": 6.09375,
"grad_norm": 7.170854568481445,
"learning_rate": 4.771428571428572e-05,
"loss": 1.0445,
"step": 390
},
{
"epoch": 6.25,
"grad_norm": 6.0907392501831055,
"learning_rate": 4.763492063492064e-05,
"loss": 0.7688,
"step": 400
},
{
"epoch": 6.40625,
"grad_norm": 7.953549385070801,
"learning_rate": 4.755555555555556e-05,
"loss": 0.7333,
"step": 410
},
{
"epoch": 6.5625,
"grad_norm": 10.276406288146973,
"learning_rate": 4.747619047619048e-05,
"loss": 0.8645,
"step": 420
},
{
"epoch": 6.71875,
"grad_norm": 7.596552848815918,
"learning_rate": 4.73968253968254e-05,
"loss": 0.7799,
"step": 430
},
{
"epoch": 6.875,
"grad_norm": 6.478920936584473,
"learning_rate": 4.7317460317460325e-05,
"loss": 0.8262,
"step": 440
},
{
"epoch": 7.03125,
"grad_norm": 4.837390422821045,
"learning_rate": 4.723809523809524e-05,
"loss": 0.7218,
"step": 450
},
{
"epoch": 7.1875,
"grad_norm": 5.8777289390563965,
"learning_rate": 4.715873015873016e-05,
"loss": 0.5238,
"step": 460
},
{
"epoch": 7.34375,
"grad_norm": 4.988452434539795,
"learning_rate": 4.707936507936508e-05,
"loss": 0.5285,
"step": 470
},
{
"epoch": 7.5,
"grad_norm": 5.605051517486572,
"learning_rate": 4.7e-05,
"loss": 0.5368,
"step": 480
},
{
"epoch": 7.65625,
"grad_norm": 7.234042644500732,
"learning_rate": 4.692063492063492e-05,
"loss": 0.5803,
"step": 490
},
{
"epoch": 7.8125,
"grad_norm": 6.273004055023193,
"learning_rate": 4.684126984126984e-05,
"loss": 0.519,
"step": 500
},
{
"epoch": 7.8125,
"eval_runtime": 2.3062,
"eval_samples_per_second": 0.434,
"eval_steps_per_second": 0.434,
"step": 500
},
{
"epoch": 7.96875,
"grad_norm": 4.803182125091553,
"learning_rate": 4.676190476190476e-05,
"loss": 0.5611,
"step": 510
},
{
"epoch": 8.125,
"grad_norm": 4.977607727050781,
"learning_rate": 4.668253968253968e-05,
"loss": 0.3616,
"step": 520
},
{
"epoch": 8.28125,
"grad_norm": 4.4240336418151855,
"learning_rate": 4.6603174603174605e-05,
"loss": 0.4303,
"step": 530
},
{
"epoch": 8.4375,
"grad_norm": 5.406126022338867,
"learning_rate": 4.6523809523809526e-05,
"loss": 0.384,
"step": 540
},
{
"epoch": 8.59375,
"grad_norm": 4.34033203125,
"learning_rate": 4.644444444444445e-05,
"loss": 0.3784,
"step": 550
},
{
"epoch": 8.75,
"grad_norm": 5.256556034088135,
"learning_rate": 4.636507936507937e-05,
"loss": 0.4345,
"step": 560
},
{
"epoch": 8.90625,
"grad_norm": 5.054710388183594,
"learning_rate": 4.628571428571429e-05,
"loss": 0.4257,
"step": 570
},
{
"epoch": 9.0625,
"grad_norm": 5.220774173736572,
"learning_rate": 4.6206349206349205e-05,
"loss": 0.3146,
"step": 580
},
{
"epoch": 9.21875,
"grad_norm": 4.420045375823975,
"learning_rate": 4.612698412698413e-05,
"loss": 0.298,
"step": 590
},
{
"epoch": 9.375,
"grad_norm": 4.474491596221924,
"learning_rate": 4.604761904761905e-05,
"loss": 0.3023,
"step": 600
},
{
"epoch": 9.53125,
"grad_norm": 4.131107807159424,
"learning_rate": 4.596825396825397e-05,
"loss": 0.2846,
"step": 610
},
{
"epoch": 9.6875,
"grad_norm": 4.535623550415039,
"learning_rate": 4.588888888888889e-05,
"loss": 0.3244,
"step": 620
},
{
"epoch": 9.84375,
"grad_norm": 4.365594387054443,
"learning_rate": 4.580952380952381e-05,
"loss": 0.2925,
"step": 630
},
{
"epoch": 10.0,
"grad_norm": 7.354922294616699,
"learning_rate": 4.5730158730158734e-05,
"loss": 0.3234,
"step": 640
},
{
"epoch": 10.15625,
"grad_norm": 3.916835308074951,
"learning_rate": 4.5650793650793656e-05,
"loss": 0.236,
"step": 650
},
{
"epoch": 10.3125,
"grad_norm": 4.322419166564941,
"learning_rate": 4.557142857142858e-05,
"loss": 0.2353,
"step": 660
},
{
"epoch": 10.46875,
"grad_norm": 5.4846038818359375,
"learning_rate": 4.54920634920635e-05,
"loss": 0.2422,
"step": 670
},
{
"epoch": 10.625,
"grad_norm": 4.341657638549805,
"learning_rate": 4.5412698412698414e-05,
"loss": 0.2592,
"step": 680
},
{
"epoch": 10.78125,
"grad_norm": 4.786070346832275,
"learning_rate": 4.5333333333333335e-05,
"loss": 0.2501,
"step": 690
},
{
"epoch": 10.9375,
"grad_norm": 3.9264745712280273,
"learning_rate": 4.525396825396826e-05,
"loss": 0.2683,
"step": 700
},
{
"epoch": 11.09375,
"grad_norm": 3.851856231689453,
"learning_rate": 4.517460317460318e-05,
"loss": 0.1978,
"step": 710
},
{
"epoch": 11.25,
"grad_norm": 4.643581390380859,
"learning_rate": 4.509523809523809e-05,
"loss": 0.1841,
"step": 720
},
{
"epoch": 11.40625,
"grad_norm": 4.105888843536377,
"learning_rate": 4.5015873015873014e-05,
"loss": 0.1976,
"step": 730
},
{
"epoch": 11.5625,
"grad_norm": 3.809528350830078,
"learning_rate": 4.4936507936507936e-05,
"loss": 0.1896,
"step": 740
},
{
"epoch": 11.71875,
"grad_norm": 4.457248687744141,
"learning_rate": 4.485714285714286e-05,
"loss": 0.2125,
"step": 750
},
{
"epoch": 11.875,
"grad_norm": 3.7699806690216064,
"learning_rate": 4.477777777777778e-05,
"loss": 0.2212,
"step": 760
},
{
"epoch": 12.03125,
"grad_norm": 4.643237590789795,
"learning_rate": 4.46984126984127e-05,
"loss": 0.2235,
"step": 770
},
{
"epoch": 12.1875,
"grad_norm": 4.064443588256836,
"learning_rate": 4.461904761904762e-05,
"loss": 0.1503,
"step": 780
},
{
"epoch": 12.34375,
"grad_norm": 4.605109691619873,
"learning_rate": 4.4539682539682543e-05,
"loss": 0.172,
"step": 790
},
{
"epoch": 12.5,
"grad_norm": 3.8005576133728027,
"learning_rate": 4.4460317460317465e-05,
"loss": 0.1713,
"step": 800
},
{
"epoch": 12.65625,
"grad_norm": 4.829875469207764,
"learning_rate": 4.4380952380952386e-05,
"loss": 0.1783,
"step": 810
},
{
"epoch": 12.8125,
"grad_norm": 4.428802490234375,
"learning_rate": 4.430158730158731e-05,
"loss": 0.1742,
"step": 820
},
{
"epoch": 12.96875,
"grad_norm": 5.068906307220459,
"learning_rate": 4.422222222222222e-05,
"loss": 0.1797,
"step": 830
},
{
"epoch": 13.125,
"grad_norm": 3.3268795013427734,
"learning_rate": 4.4142857142857144e-05,
"loss": 0.1389,
"step": 840
},
{
"epoch": 13.28125,
"grad_norm": 3.312582015991211,
"learning_rate": 4.4063492063492066e-05,
"loss": 0.1191,
"step": 850
},
{
"epoch": 13.4375,
"grad_norm": 3.7299697399139404,
"learning_rate": 4.398412698412699e-05,
"loss": 0.1432,
"step": 860
},
{
"epoch": 13.59375,
"grad_norm": 4.627827167510986,
"learning_rate": 4.39047619047619e-05,
"loss": 0.1672,
"step": 870
},
{
"epoch": 13.75,
"grad_norm": 4.645421981811523,
"learning_rate": 4.3825396825396823e-05,
"loss": 0.1503,
"step": 880
},
{
"epoch": 13.90625,
"grad_norm": 5.321810245513916,
"learning_rate": 4.3746031746031745e-05,
"loss": 0.1486,
"step": 890
},
{
"epoch": 14.0625,
"grad_norm": 2.487105369567871,
"learning_rate": 4.3666666666666666e-05,
"loss": 0.1193,
"step": 900
},
{
"epoch": 14.21875,
"grad_norm": 4.098151683807373,
"learning_rate": 4.358730158730159e-05,
"loss": 0.1182,
"step": 910
},
{
"epoch": 14.375,
"grad_norm": 3.2538163661956787,
"learning_rate": 4.350793650793651e-05,
"loss": 0.1145,
"step": 920
},
{
"epoch": 14.53125,
"grad_norm": 3.7881181240081787,
"learning_rate": 4.342857142857143e-05,
"loss": 0.1255,
"step": 930
},
{
"epoch": 14.6875,
"grad_norm": 2.861689567565918,
"learning_rate": 4.334920634920635e-05,
"loss": 0.144,
"step": 940
},
{
"epoch": 14.84375,
"grad_norm": 3.1621310710906982,
"learning_rate": 4.3269841269841274e-05,
"loss": 0.1293,
"step": 950
},
{
"epoch": 15.0,
"grad_norm": 11.439355850219727,
"learning_rate": 4.3190476190476195e-05,
"loss": 0.1553,
"step": 960
},
{
"epoch": 15.15625,
"grad_norm": 3.6791841983795166,
"learning_rate": 4.311111111111111e-05,
"loss": 0.0949,
"step": 970
},
{
"epoch": 15.3125,
"grad_norm": 2.71702241897583,
"learning_rate": 4.303174603174603e-05,
"loss": 0.1125,
"step": 980
},
{
"epoch": 15.46875,
"grad_norm": 2.713571786880493,
"learning_rate": 4.295238095238095e-05,
"loss": 0.137,
"step": 990
},
{
"epoch": 15.625,
"grad_norm": 2.975768804550171,
"learning_rate": 4.2873015873015875e-05,
"loss": 0.108,
"step": 1000
},
{
"epoch": 15.625,
"eval_runtime": 2.2622,
"eval_samples_per_second": 0.442,
"eval_steps_per_second": 0.442,
"step": 1000
},
{
"epoch": 15.78125,
"grad_norm": 3.5744853019714355,
"learning_rate": 4.2793650793650796e-05,
"loss": 0.1207,
"step": 1010
},
{
"epoch": 15.9375,
"grad_norm": 1.8845595121383667,
"learning_rate": 4.271428571428572e-05,
"loss": 0.1013,
"step": 1020
},
{
"epoch": 16.09375,
"grad_norm": 4.227961540222168,
"learning_rate": 4.263492063492064e-05,
"loss": 0.112,
"step": 1030
},
{
"epoch": 16.25,
"grad_norm": 3.289071798324585,
"learning_rate": 4.255555555555556e-05,
"loss": 0.1054,
"step": 1040
},
{
"epoch": 16.40625,
"grad_norm": 1.9135278463363647,
"learning_rate": 4.247619047619048e-05,
"loss": 0.0933,
"step": 1050
},
{
"epoch": 16.5625,
"grad_norm": 2.586151599884033,
"learning_rate": 4.2396825396825404e-05,
"loss": 0.1205,
"step": 1060
},
{
"epoch": 16.71875,
"grad_norm": 3.5681819915771484,
"learning_rate": 4.231746031746032e-05,
"loss": 0.1015,
"step": 1070
},
{
"epoch": 16.875,
"grad_norm": 3.7675039768218994,
"learning_rate": 4.223809523809524e-05,
"loss": 0.128,
"step": 1080
},
{
"epoch": 17.03125,
"grad_norm": 2.631277322769165,
"learning_rate": 4.215873015873016e-05,
"loss": 0.107,
"step": 1090
},
{
"epoch": 17.1875,
"grad_norm": 3.59251070022583,
"learning_rate": 4.2079365079365076e-05,
"loss": 0.0918,
"step": 1100
},
{
"epoch": 17.34375,
"grad_norm": 3.785374641418457,
"learning_rate": 4.2e-05,
"loss": 0.1031,
"step": 1110
},
{
"epoch": 17.5,
"grad_norm": 3.6201488971710205,
"learning_rate": 4.192063492063492e-05,
"loss": 0.0965,
"step": 1120
},
{
"epoch": 17.65625,
"grad_norm": 3.608516216278076,
"learning_rate": 4.184126984126984e-05,
"loss": 0.1046,
"step": 1130
},
{
"epoch": 17.8125,
"grad_norm": 3.3631608486175537,
"learning_rate": 4.176190476190476e-05,
"loss": 0.1047,
"step": 1140
},
{
"epoch": 17.96875,
"grad_norm": 2.2599599361419678,
"learning_rate": 4.1682539682539684e-05,
"loss": 0.0976,
"step": 1150
},
{
"epoch": 18.125,
"grad_norm": 3.3925111293792725,
"learning_rate": 4.1603174603174605e-05,
"loss": 0.081,
"step": 1160
},
{
"epoch": 18.28125,
"grad_norm": 2.5443062782287598,
"learning_rate": 4.152380952380953e-05,
"loss": 0.0917,
"step": 1170
},
{
"epoch": 18.4375,
"grad_norm": 4.180214881896973,
"learning_rate": 4.144444444444445e-05,
"loss": 0.0913,
"step": 1180
},
{
"epoch": 18.59375,
"grad_norm": 2.3229408264160156,
"learning_rate": 4.136507936507937e-05,
"loss": 0.081,
"step": 1190
},
{
"epoch": 18.75,
"grad_norm": 3.885908603668213,
"learning_rate": 4.128571428571429e-05,
"loss": 0.1078,
"step": 1200
},
{
"epoch": 18.90625,
"grad_norm": 3.1222493648529053,
"learning_rate": 4.120634920634921e-05,
"loss": 0.1177,
"step": 1210
},
{
"epoch": 19.0625,
"grad_norm": 3.3118879795074463,
"learning_rate": 4.112698412698413e-05,
"loss": 0.0775,
"step": 1220
},
{
"epoch": 19.21875,
"grad_norm": 1.8756282329559326,
"learning_rate": 4.104761904761905e-05,
"loss": 0.0906,
"step": 1230
},
{
"epoch": 19.375,
"grad_norm": 3.6675071716308594,
"learning_rate": 4.096825396825397e-05,
"loss": 0.0938,
"step": 1240
},
{
"epoch": 19.53125,
"grad_norm": 2.8696744441986084,
"learning_rate": 4.088888888888889e-05,
"loss": 0.0821,
"step": 1250
},
{
"epoch": 19.6875,
"grad_norm": 3.246438980102539,
"learning_rate": 4.0809523809523813e-05,
"loss": 0.1216,
"step": 1260
},
{
"epoch": 19.84375,
"grad_norm": 2.579602003097534,
"learning_rate": 4.073015873015873e-05,
"loss": 0.0705,
"step": 1270
},
{
"epoch": 20.0,
"grad_norm": 14.062989234924316,
"learning_rate": 4.065079365079365e-05,
"loss": 0.1125,
"step": 1280
},
{
"epoch": 20.15625,
"grad_norm": 2.5907585620880127,
"learning_rate": 4.057142857142857e-05,
"loss": 0.0764,
"step": 1290
},
{
"epoch": 20.3125,
"grad_norm": 4.850607872009277,
"learning_rate": 4.049206349206349e-05,
"loss": 0.0882,
"step": 1300
},
{
"epoch": 20.46875,
"grad_norm": 2.3619115352630615,
"learning_rate": 4.0412698412698414e-05,
"loss": 0.0833,
"step": 1310
},
{
"epoch": 20.625,
"grad_norm": 3.1803815364837646,
"learning_rate": 4.0333333333333336e-05,
"loss": 0.0894,
"step": 1320
},
{
"epoch": 20.78125,
"grad_norm": 15.570907592773438,
"learning_rate": 4.025396825396826e-05,
"loss": 0.2225,
"step": 1330
},
{
"epoch": 20.9375,
"grad_norm": 2.721440076828003,
"learning_rate": 4.018253968253968e-05,
"loss": 0.1233,
"step": 1340
},
{
"epoch": 21.09375,
"grad_norm": 1.798134446144104,
"learning_rate": 4.01031746031746e-05,
"loss": 0.067,
"step": 1350
},
{
"epoch": 21.25,
"grad_norm": 3.387782335281372,
"learning_rate": 4.0023809523809524e-05,
"loss": 0.0692,
"step": 1360
},
{
"epoch": 21.40625,
"grad_norm": 2.723196029663086,
"learning_rate": 3.9944444444444446e-05,
"loss": 0.0683,
"step": 1370
},
{
"epoch": 21.5625,
"grad_norm": 3.666444778442383,
"learning_rate": 3.986507936507937e-05,
"loss": 0.0883,
"step": 1380
},
{
"epoch": 21.71875,
"grad_norm": 1.9571526050567627,
"learning_rate": 3.978571428571429e-05,
"loss": 0.0802,
"step": 1390
},
{
"epoch": 21.875,
"grad_norm": 2.5271904468536377,
"learning_rate": 3.970634920634921e-05,
"loss": 0.0737,
"step": 1400
},
{
"epoch": 22.03125,
"grad_norm": 3.5426900386810303,
"learning_rate": 3.962698412698413e-05,
"loss": 0.0838,
"step": 1410
},
{
"epoch": 22.1875,
"grad_norm": 1.7901580333709717,
"learning_rate": 3.954761904761905e-05,
"loss": 0.0671,
"step": 1420
},
{
"epoch": 22.34375,
"grad_norm": 3.4260764122009277,
"learning_rate": 3.946825396825397e-05,
"loss": 0.0775,
"step": 1430
},
{
"epoch": 22.5,
"grad_norm": 2.499107837677002,
"learning_rate": 3.938888888888889e-05,
"loss": 0.0826,
"step": 1440
},
{
"epoch": 22.65625,
"grad_norm": 2.7331862449645996,
"learning_rate": 3.930952380952381e-05,
"loss": 0.0771,
"step": 1450
},
{
"epoch": 22.8125,
"grad_norm": 3.2004685401916504,
"learning_rate": 3.923015873015873e-05,
"loss": 0.088,
"step": 1460
},
{
"epoch": 22.96875,
"grad_norm": 3.175179958343506,
"learning_rate": 3.9150793650793654e-05,
"loss": 0.0861,
"step": 1470
},
{
"epoch": 23.125,
"grad_norm": 2.032646417617798,
"learning_rate": 3.9071428571428575e-05,
"loss": 0.0682,
"step": 1480
},
{
"epoch": 23.28125,
"grad_norm": 1.5635634660720825,
"learning_rate": 3.89920634920635e-05,
"loss": 0.0833,
"step": 1490
},
{
"epoch": 23.4375,
"grad_norm": 1.8121321201324463,
"learning_rate": 3.891269841269842e-05,
"loss": 0.0965,
"step": 1500
},
{
"epoch": 23.4375,
"eval_runtime": 2.0794,
"eval_samples_per_second": 0.481,
"eval_steps_per_second": 0.481,
"step": 1500
},
{
"epoch": 23.59375,
"grad_norm": 1.8793394565582275,
"learning_rate": 3.883333333333333e-05,
"loss": 0.0887,
"step": 1510
},
{
"epoch": 23.75,
"grad_norm": 2.1231632232666016,
"learning_rate": 3.8753968253968255e-05,
"loss": 0.0879,
"step": 1520
},
{
"epoch": 23.90625,
"grad_norm": 3.5764803886413574,
"learning_rate": 3.8674603174603176e-05,
"loss": 0.0861,
"step": 1530
},
{
"epoch": 24.0625,
"grad_norm": 2.105710029602051,
"learning_rate": 3.85952380952381e-05,
"loss": 0.07,
"step": 1540
},
{
"epoch": 24.21875,
"grad_norm": 2.8722870349884033,
"learning_rate": 3.851587301587302e-05,
"loss": 0.076,
"step": 1550
},
{
"epoch": 24.375,
"grad_norm": 2.6126277446746826,
"learning_rate": 3.843650793650794e-05,
"loss": 0.0983,
"step": 1560
},
{
"epoch": 24.53125,
"grad_norm": 2.2635769844055176,
"learning_rate": 3.8357142857142855e-05,
"loss": 0.0864,
"step": 1570
},
{
"epoch": 24.6875,
"grad_norm": 2.11098313331604,
"learning_rate": 3.827777777777778e-05,
"loss": 0.0923,
"step": 1580
},
{
"epoch": 24.84375,
"grad_norm": 1.6163533926010132,
"learning_rate": 3.81984126984127e-05,
"loss": 0.0711,
"step": 1590
},
{
"epoch": 25.0,
"grad_norm": 2.54638671875,
"learning_rate": 3.811904761904762e-05,
"loss": 0.0705,
"step": 1600
},
{
"epoch": 25.15625,
"grad_norm": 2.592470407485962,
"learning_rate": 3.803968253968254e-05,
"loss": 0.0765,
"step": 1610
},
{
"epoch": 25.3125,
"grad_norm": 2.1981208324432373,
"learning_rate": 3.796031746031746e-05,
"loss": 0.0856,
"step": 1620
},
{
"epoch": 25.46875,
"grad_norm": 1.384098768234253,
"learning_rate": 3.7880952380952384e-05,
"loss": 0.0951,
"step": 1630
},
{
"epoch": 25.625,
"grad_norm": 1.502350091934204,
"learning_rate": 3.7801587301587306e-05,
"loss": 0.0858,
"step": 1640
},
{
"epoch": 25.78125,
"grad_norm": 1.4763522148132324,
"learning_rate": 3.772222222222223e-05,
"loss": 0.0647,
"step": 1650
},
{
"epoch": 25.9375,
"grad_norm": 1.7555052042007446,
"learning_rate": 3.764285714285715e-05,
"loss": 0.0874,
"step": 1660
},
{
"epoch": 26.09375,
"grad_norm": 1.0187015533447266,
"learning_rate": 3.756349206349207e-05,
"loss": 0.0635,
"step": 1670
},
{
"epoch": 26.25,
"grad_norm": 1.7138936519622803,
"learning_rate": 3.7484126984126985e-05,
"loss": 0.0772,
"step": 1680
},
{
"epoch": 26.40625,
"grad_norm": 2.3353724479675293,
"learning_rate": 3.7404761904761907e-05,
"loss": 0.0892,
"step": 1690
},
{
"epoch": 26.5625,
"grad_norm": 2.6141700744628906,
"learning_rate": 3.732539682539682e-05,
"loss": 0.0897,
"step": 1700
},
{
"epoch": 26.71875,
"grad_norm": 1.7785848379135132,
"learning_rate": 3.724603174603174e-05,
"loss": 0.0899,
"step": 1710
},
{
"epoch": 26.875,
"grad_norm": 2.6693010330200195,
"learning_rate": 3.7166666666666664e-05,
"loss": 0.0901,
"step": 1720
},
{
"epoch": 27.03125,
"grad_norm": 2.059981346130371,
"learning_rate": 3.7087301587301586e-05,
"loss": 0.0822,
"step": 1730
},
{
"epoch": 27.1875,
"grad_norm": 1.6238901615142822,
"learning_rate": 3.700793650793651e-05,
"loss": 0.0777,
"step": 1740
},
{
"epoch": 27.34375,
"grad_norm": 2.782425880432129,
"learning_rate": 3.692857142857143e-05,
"loss": 0.0782,
"step": 1750
},
{
"epoch": 27.5,
"grad_norm": 1.8468166589736938,
"learning_rate": 3.684920634920635e-05,
"loss": 0.0835,
"step": 1760
},
{
"epoch": 27.65625,
"grad_norm": 1.3156135082244873,
"learning_rate": 3.676984126984127e-05,
"loss": 0.1072,
"step": 1770
},
{
"epoch": 27.8125,
"grad_norm": 3.260084390640259,
"learning_rate": 3.669047619047619e-05,
"loss": 0.0939,
"step": 1780
},
{
"epoch": 27.96875,
"grad_norm": 2.518204689025879,
"learning_rate": 3.6611111111111115e-05,
"loss": 0.0884,
"step": 1790
},
{
"epoch": 28.125,
"grad_norm": 2.598057985305786,
"learning_rate": 3.6531746031746036e-05,
"loss": 0.0683,
"step": 1800
},
{
"epoch": 28.28125,
"grad_norm": 1.8533433675765991,
"learning_rate": 3.645238095238096e-05,
"loss": 0.0833,
"step": 1810
},
{
"epoch": 28.4375,
"grad_norm": 1.2828975915908813,
"learning_rate": 3.637301587301587e-05,
"loss": 0.0832,
"step": 1820
},
{
"epoch": 28.59375,
"grad_norm": 1.7714905738830566,
"learning_rate": 3.6293650793650794e-05,
"loss": 0.0901,
"step": 1830
},
{
"epoch": 28.75,
"grad_norm": 2.098923921585083,
"learning_rate": 3.6214285714285716e-05,
"loss": 0.0954,
"step": 1840
},
{
"epoch": 28.90625,
"grad_norm": 2.298226833343506,
"learning_rate": 3.613492063492064e-05,
"loss": 0.0888,
"step": 1850
},
{
"epoch": 29.0625,
"grad_norm": 1.5519624948501587,
"learning_rate": 3.605555555555556e-05,
"loss": 0.0664,
"step": 1860
},
{
"epoch": 29.21875,
"grad_norm": 2.015573501586914,
"learning_rate": 3.597619047619048e-05,
"loss": 0.0637,
"step": 1870
},
{
"epoch": 29.375,
"grad_norm": 1.925529956817627,
"learning_rate": 3.58968253968254e-05,
"loss": 0.0803,
"step": 1880
},
{
"epoch": 29.53125,
"grad_norm": 2.4342522621154785,
"learning_rate": 3.581746031746032e-05,
"loss": 0.0911,
"step": 1890
},
{
"epoch": 29.6875,
"grad_norm": 1.8124195337295532,
"learning_rate": 3.573809523809524e-05,
"loss": 0.0803,
"step": 1900
},
{
"epoch": 29.84375,
"grad_norm": 2.3409860134124756,
"learning_rate": 3.565873015873016e-05,
"loss": 0.0943,
"step": 1910
},
{
"epoch": 30.0,
"grad_norm": 1.2545162439346313,
"learning_rate": 3.557936507936508e-05,
"loss": 0.0853,
"step": 1920
},
{
"epoch": 30.15625,
"grad_norm": 1.9356091022491455,
"learning_rate": 3.55e-05,
"loss": 0.0585,
"step": 1930
},
{
"epoch": 30.3125,
"grad_norm": 1.6294385194778442,
"learning_rate": 3.5420634920634924e-05,
"loss": 0.0876,
"step": 1940
},
{
"epoch": 30.46875,
"grad_norm": 2.081688165664673,
"learning_rate": 3.534126984126984e-05,
"loss": 0.0827,
"step": 1950
},
{
"epoch": 30.625,
"grad_norm": 1.7468382120132446,
"learning_rate": 3.526190476190476e-05,
"loss": 0.0732,
"step": 1960
},
{
"epoch": 30.78125,
"grad_norm": 2.1742124557495117,
"learning_rate": 3.518253968253968e-05,
"loss": 0.084,
"step": 1970
},
{
"epoch": 30.9375,
"grad_norm": 2.147754430770874,
"learning_rate": 3.51031746031746e-05,
"loss": 0.0856,
"step": 1980
},
{
"epoch": 31.09375,
"grad_norm": 2.1251964569091797,
"learning_rate": 3.5023809523809525e-05,
"loss": 0.0762,
"step": 1990
},
{
"epoch": 31.25,
"grad_norm": 2.6378941535949707,
"learning_rate": 3.4944444444444446e-05,
"loss": 0.071,
"step": 2000
},
{
"epoch": 31.25,
"eval_runtime": 2.0743,
"eval_samples_per_second": 0.482,
"eval_steps_per_second": 0.482,
"step": 2000
},
{
"epoch": 31.40625,
"grad_norm": 2.126807689666748,
"learning_rate": 3.486507936507937e-05,
"loss": 0.0785,
"step": 2010
},
{
"epoch": 31.5625,
"grad_norm": 2.4585835933685303,
"learning_rate": 3.478571428571429e-05,
"loss": 0.1053,
"step": 2020
},
{
"epoch": 31.71875,
"grad_norm": 1.9649542570114136,
"learning_rate": 3.470634920634921e-05,
"loss": 0.0734,
"step": 2030
},
{
"epoch": 31.875,
"grad_norm": 2.3006460666656494,
"learning_rate": 3.462698412698413e-05,
"loss": 0.0858,
"step": 2040
},
{
"epoch": 32.03125,
"grad_norm": 2.163447856903076,
"learning_rate": 3.4547619047619054e-05,
"loss": 0.0826,
"step": 2050
},
{
"epoch": 32.1875,
"grad_norm": 2.0441508293151855,
"learning_rate": 3.4468253968253975e-05,
"loss": 0.0712,
"step": 2060
},
{
"epoch": 32.34375,
"grad_norm": 1.5795445442199707,
"learning_rate": 3.438888888888889e-05,
"loss": 0.056,
"step": 2070
},
{
"epoch": 32.5,
"grad_norm": 3.5661540031433105,
"learning_rate": 3.430952380952381e-05,
"loss": 0.069,
"step": 2080
},
{
"epoch": 32.65625,
"grad_norm": 1.6884055137634277,
"learning_rate": 3.423015873015873e-05,
"loss": 0.0899,
"step": 2090
},
{
"epoch": 32.8125,
"grad_norm": 2.421724557876587,
"learning_rate": 3.415079365079365e-05,
"loss": 0.0768,
"step": 2100
},
{
"epoch": 32.96875,
"grad_norm": 1.1584899425506592,
"learning_rate": 3.407142857142857e-05,
"loss": 0.0763,
"step": 2110
},
{
"epoch": 33.125,
"grad_norm": 2.069801092147827,
"learning_rate": 3.399206349206349e-05,
"loss": 0.0636,
"step": 2120
},
{
"epoch": 33.28125,
"grad_norm": 2.5826573371887207,
"learning_rate": 3.391269841269841e-05,
"loss": 0.0676,
"step": 2130
},
{
"epoch": 33.4375,
"grad_norm": 1.119449496269226,
"learning_rate": 3.3833333333333334e-05,
"loss": 0.0613,
"step": 2140
},
{
"epoch": 33.59375,
"grad_norm": 1.6656103134155273,
"learning_rate": 3.3753968253968255e-05,
"loss": 0.0598,
"step": 2150
},
{
"epoch": 33.75,
"grad_norm": 1.7956265211105347,
"learning_rate": 3.3674603174603177e-05,
"loss": 0.0778,
"step": 2160
},
{
"epoch": 33.90625,
"grad_norm": 2.2106685638427734,
"learning_rate": 3.35952380952381e-05,
"loss": 0.0737,
"step": 2170
},
{
"epoch": 34.0625,
"grad_norm": 2.4478724002838135,
"learning_rate": 3.351587301587302e-05,
"loss": 0.0642,
"step": 2180
},
{
"epoch": 34.21875,
"grad_norm": 2.3334341049194336,
"learning_rate": 3.343650793650794e-05,
"loss": 0.0697,
"step": 2190
},
{
"epoch": 34.375,
"grad_norm": 1.870275855064392,
"learning_rate": 3.3357142857142856e-05,
"loss": 0.0523,
"step": 2200
},
{
"epoch": 34.53125,
"grad_norm": 1.283444881439209,
"learning_rate": 3.327777777777778e-05,
"loss": 0.0619,
"step": 2210
},
{
"epoch": 34.6875,
"grad_norm": 1.7918671369552612,
"learning_rate": 3.31984126984127e-05,
"loss": 0.061,
"step": 2220
},
{
"epoch": 34.84375,
"grad_norm": 1.6546680927276611,
"learning_rate": 3.311904761904762e-05,
"loss": 0.058,
"step": 2230
},
{
"epoch": 35.0,
"grad_norm": 3.8063647747039795,
"learning_rate": 3.303968253968254e-05,
"loss": 0.0605,
"step": 2240
},
{
"epoch": 35.15625,
"grad_norm": 1.8701483011245728,
"learning_rate": 3.296031746031746e-05,
"loss": 0.0652,
"step": 2250
},
{
"epoch": 35.3125,
"grad_norm": 0.48190346360206604,
"learning_rate": 3.2880952380952385e-05,
"loss": 0.0443,
"step": 2260
},
{
"epoch": 35.46875,
"grad_norm": 1.0227997303009033,
"learning_rate": 3.2801587301587306e-05,
"loss": 0.0397,
"step": 2270
},
{
"epoch": 35.625,
"grad_norm": 1.4256937503814697,
"learning_rate": 3.272222222222223e-05,
"loss": 0.0541,
"step": 2280
},
{
"epoch": 35.78125,
"grad_norm": 2.3451199531555176,
"learning_rate": 3.264285714285714e-05,
"loss": 0.0568,
"step": 2290
},
{
"epoch": 35.9375,
"grad_norm": 1.3683526515960693,
"learning_rate": 3.2563492063492064e-05,
"loss": 0.0587,
"step": 2300
},
{
"epoch": 36.09375,
"grad_norm": 1.4543867111206055,
"learning_rate": 3.2484126984126986e-05,
"loss": 0.0394,
"step": 2310
},
{
"epoch": 36.25,
"grad_norm": 1.9606877565383911,
"learning_rate": 3.240476190476191e-05,
"loss": 0.0545,
"step": 2320
},
{
"epoch": 36.40625,
"grad_norm": 1.6910959482192993,
"learning_rate": 3.232539682539683e-05,
"loss": 0.0638,
"step": 2330
},
{
"epoch": 36.5625,
"grad_norm": 1.735841155052185,
"learning_rate": 3.224603174603174e-05,
"loss": 0.0569,
"step": 2340
},
{
"epoch": 36.71875,
"grad_norm": 1.5598944425582886,
"learning_rate": 3.2166666666666665e-05,
"loss": 0.0649,
"step": 2350
},
{
"epoch": 36.875,
"grad_norm": 1.5700335502624512,
"learning_rate": 3.2087301587301586e-05,
"loss": 0.0689,
"step": 2360
},
{
"epoch": 37.03125,
"grad_norm": 1.0767812728881836,
"learning_rate": 3.200793650793651e-05,
"loss": 0.0535,
"step": 2370
},
{
"epoch": 37.1875,
"grad_norm": 2.505707263946533,
"learning_rate": 3.192857142857143e-05,
"loss": 0.0486,
"step": 2380
},
{
"epoch": 37.34375,
"grad_norm": 2.1414477825164795,
"learning_rate": 3.184920634920635e-05,
"loss": 0.0462,
"step": 2390
},
{
"epoch": 37.5,
"grad_norm": 1.0493581295013428,
"learning_rate": 3.176984126984127e-05,
"loss": 0.0515,
"step": 2400
},
{
"epoch": 37.65625,
"grad_norm": 1.5255446434020996,
"learning_rate": 3.1690476190476194e-05,
"loss": 0.0485,
"step": 2410
},
{
"epoch": 37.8125,
"grad_norm": 1.3334754705429077,
"learning_rate": 3.1611111111111115e-05,
"loss": 0.056,
"step": 2420
},
{
"epoch": 37.96875,
"grad_norm": 1.745110273361206,
"learning_rate": 3.153174603174604e-05,
"loss": 0.0473,
"step": 2430
},
{
"epoch": 38.125,
"grad_norm": 1.4413131475448608,
"learning_rate": 3.145238095238096e-05,
"loss": 0.0317,
"step": 2440
},
{
"epoch": 38.28125,
"grad_norm": 1.6811962127685547,
"learning_rate": 3.137301587301587e-05,
"loss": 0.0496,
"step": 2450
},
{
"epoch": 38.4375,
"grad_norm": 1.2820957899093628,
"learning_rate": 3.1293650793650795e-05,
"loss": 0.0369,
"step": 2460
},
{
"epoch": 38.59375,
"grad_norm": 1.7850005626678467,
"learning_rate": 3.1214285714285716e-05,
"loss": 0.0478,
"step": 2470
},
{
"epoch": 38.75,
"grad_norm": 2.456017255783081,
"learning_rate": 3.113492063492064e-05,
"loss": 0.0507,
"step": 2480
},
{
"epoch": 38.90625,
"grad_norm": 2.1933865547180176,
"learning_rate": 3.105555555555555e-05,
"loss": 0.0436,
"step": 2490
},
{
"epoch": 39.0625,
"grad_norm": 0.9716876745223999,
"learning_rate": 3.0976190476190474e-05,
"loss": 0.0439,
"step": 2500
},
{
"epoch": 39.0625,
"eval_runtime": 2.0072,
"eval_samples_per_second": 0.498,
"eval_steps_per_second": 0.498,
"step": 2500
},
{
"epoch": 39.21875,
"grad_norm": 2.0628113746643066,
"learning_rate": 3.0896825396825395e-05,
"loss": 0.0408,
"step": 2510
},
{
"epoch": 39.375,
"grad_norm": 1.5137745141983032,
"learning_rate": 3.081746031746032e-05,
"loss": 0.0445,
"step": 2520
},
{
"epoch": 39.53125,
"grad_norm": 1.6775884628295898,
"learning_rate": 3.073809523809524e-05,
"loss": 0.051,
"step": 2530
},
{
"epoch": 39.6875,
"grad_norm": 1.440619707107544,
"learning_rate": 3.065873015873016e-05,
"loss": 0.0387,
"step": 2540
},
{
"epoch": 39.84375,
"grad_norm": 2.1149260997772217,
"learning_rate": 3.057936507936508e-05,
"loss": 0.0387,
"step": 2550
},
{
"epoch": 40.0,
"grad_norm": 2.163191318511963,
"learning_rate": 3.05e-05,
"loss": 0.0363,
"step": 2560
},
{
"epoch": 40.15625,
"grad_norm": 3.246634006500244,
"learning_rate": 3.042063492063492e-05,
"loss": 0.0429,
"step": 2570
},
{
"epoch": 40.3125,
"grad_norm": 1.188644289970398,
"learning_rate": 3.0341269841269842e-05,
"loss": 0.0316,
"step": 2580
},
{
"epoch": 40.46875,
"grad_norm": 2.2617383003234863,
"learning_rate": 3.0261904761904764e-05,
"loss": 0.0406,
"step": 2590
},
{
"epoch": 40.625,
"grad_norm": 0.6037064790725708,
"learning_rate": 3.0182539682539685e-05,
"loss": 0.031,
"step": 2600
},
{
"epoch": 40.78125,
"grad_norm": 1.1936763525009155,
"learning_rate": 3.0103174603174607e-05,
"loss": 0.0401,
"step": 2610
},
{
"epoch": 40.9375,
"grad_norm": 3.0310215950012207,
"learning_rate": 3.0023809523809525e-05,
"loss": 0.0339,
"step": 2620
},
{
"epoch": 41.09375,
"grad_norm": 0.6716585755348206,
"learning_rate": 2.9944444444444446e-05,
"loss": 0.0338,
"step": 2630
},
{
"epoch": 41.25,
"grad_norm": 0.5010519623756409,
"learning_rate": 2.9865079365079368e-05,
"loss": 0.0315,
"step": 2640
}
],
"logging_steps": 10,
"max_steps": 6400,
"num_input_tokens_seen": 0,
"num_train_epochs": 100,
"save_steps": 115,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}