Aitana-FraudDetection-R-1.0 / trainer_state.json
marbonora's picture
Upload folder using huggingface_hub
199413d verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.0,
"eval_steps": 500,
"global_step": 2120,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009433962264150943,
"grad_norm": 2.1125893592834473,
"learning_rate": 1.9915094339622643e-05,
"loss": 0.6661,
"step": 10
},
{
"epoch": 0.018867924528301886,
"grad_norm": 4.568980693817139,
"learning_rate": 1.9820754716981134e-05,
"loss": 0.6476,
"step": 20
},
{
"epoch": 0.02830188679245283,
"grad_norm": 4.55064582824707,
"learning_rate": 1.9726415094339625e-05,
"loss": 0.609,
"step": 30
},
{
"epoch": 0.03773584905660377,
"grad_norm": 2.458791732788086,
"learning_rate": 1.9632075471698116e-05,
"loss": 0.5465,
"step": 40
},
{
"epoch": 0.04716981132075472,
"grad_norm": 5.235497951507568,
"learning_rate": 1.9537735849056604e-05,
"loss": 0.4277,
"step": 50
},
{
"epoch": 0.05660377358490566,
"grad_norm": 2.81006121635437,
"learning_rate": 1.9443396226415095e-05,
"loss": 0.3367,
"step": 60
},
{
"epoch": 0.0660377358490566,
"grad_norm": 5.580306529998779,
"learning_rate": 1.9349056603773586e-05,
"loss": 0.232,
"step": 70
},
{
"epoch": 0.07547169811320754,
"grad_norm": 6.972874641418457,
"learning_rate": 1.9254716981132077e-05,
"loss": 0.1852,
"step": 80
},
{
"epoch": 0.08490566037735849,
"grad_norm": 2.1650376319885254,
"learning_rate": 1.9160377358490568e-05,
"loss": 0.1581,
"step": 90
},
{
"epoch": 0.09433962264150944,
"grad_norm": 0.42251577973365784,
"learning_rate": 1.906603773584906e-05,
"loss": 0.1821,
"step": 100
},
{
"epoch": 0.10377358490566038,
"grad_norm": 0.361060231924057,
"learning_rate": 1.8971698113207547e-05,
"loss": 0.3396,
"step": 110
},
{
"epoch": 0.11320754716981132,
"grad_norm": 1.1767176389694214,
"learning_rate": 1.887735849056604e-05,
"loss": 0.0724,
"step": 120
},
{
"epoch": 0.12264150943396226,
"grad_norm": 0.310159295797348,
"learning_rate": 1.878301886792453e-05,
"loss": 0.1826,
"step": 130
},
{
"epoch": 0.1320754716981132,
"grad_norm": 0.29479867219924927,
"learning_rate": 1.868867924528302e-05,
"loss": 0.1407,
"step": 140
},
{
"epoch": 0.14150943396226415,
"grad_norm": 0.25081831216812134,
"learning_rate": 1.859433962264151e-05,
"loss": 0.0804,
"step": 150
},
{
"epoch": 0.1509433962264151,
"grad_norm": 7.194790840148926,
"learning_rate": 1.8500000000000002e-05,
"loss": 0.1394,
"step": 160
},
{
"epoch": 0.16037735849056603,
"grad_norm": 0.19942767918109894,
"learning_rate": 1.8405660377358494e-05,
"loss": 0.0876,
"step": 170
},
{
"epoch": 0.16981132075471697,
"grad_norm": 12.822174072265625,
"learning_rate": 1.8311320754716985e-05,
"loss": 0.1606,
"step": 180
},
{
"epoch": 0.1792452830188679,
"grad_norm": 1.3328683376312256,
"learning_rate": 1.8216981132075472e-05,
"loss": 0.1835,
"step": 190
},
{
"epoch": 0.18867924528301888,
"grad_norm": 0.17138229310512543,
"learning_rate": 1.8122641509433963e-05,
"loss": 0.2355,
"step": 200
},
{
"epoch": 0.19811320754716982,
"grad_norm": 0.21127229928970337,
"learning_rate": 1.8028301886792454e-05,
"loss": 0.2031,
"step": 210
},
{
"epoch": 0.20754716981132076,
"grad_norm": 2.211583137512207,
"learning_rate": 1.7933962264150946e-05,
"loss": 0.0223,
"step": 220
},
{
"epoch": 0.2169811320754717,
"grad_norm": 0.17399153113365173,
"learning_rate": 1.7839622641509437e-05,
"loss": 0.2204,
"step": 230
},
{
"epoch": 0.22641509433962265,
"grad_norm": 0.26820316910743713,
"learning_rate": 1.7745283018867928e-05,
"loss": 0.1022,
"step": 240
},
{
"epoch": 0.2358490566037736,
"grad_norm": 0.15803369879722595,
"learning_rate": 1.7650943396226415e-05,
"loss": 0.0638,
"step": 250
},
{
"epoch": 0.24528301886792453,
"grad_norm": 0.15400823950767517,
"learning_rate": 1.7556603773584907e-05,
"loss": 0.1257,
"step": 260
},
{
"epoch": 0.25471698113207547,
"grad_norm": 0.7688778638839722,
"learning_rate": 1.7462264150943398e-05,
"loss": 0.2443,
"step": 270
},
{
"epoch": 0.2641509433962264,
"grad_norm": 0.243111252784729,
"learning_rate": 1.736792452830189e-05,
"loss": 0.2897,
"step": 280
},
{
"epoch": 0.27358490566037735,
"grad_norm": 5.870262145996094,
"learning_rate": 1.727358490566038e-05,
"loss": 0.2198,
"step": 290
},
{
"epoch": 0.2830188679245283,
"grad_norm": 0.2840751111507416,
"learning_rate": 1.7179245283018867e-05,
"loss": 0.3108,
"step": 300
},
{
"epoch": 0.29245283018867924,
"grad_norm": 0.1476099044084549,
"learning_rate": 1.708490566037736e-05,
"loss": 0.1688,
"step": 310
},
{
"epoch": 0.3018867924528302,
"grad_norm": 0.3752176761627197,
"learning_rate": 1.699056603773585e-05,
"loss": 0.2876,
"step": 320
},
{
"epoch": 0.3113207547169811,
"grad_norm": 30.54532241821289,
"learning_rate": 1.689622641509434e-05,
"loss": 0.1215,
"step": 330
},
{
"epoch": 0.32075471698113206,
"grad_norm": 0.15333189070224762,
"learning_rate": 1.6801886792452832e-05,
"loss": 0.2964,
"step": 340
},
{
"epoch": 0.330188679245283,
"grad_norm": 0.23220475018024445,
"learning_rate": 1.6707547169811323e-05,
"loss": 0.1506,
"step": 350
},
{
"epoch": 0.33962264150943394,
"grad_norm": 121.28533172607422,
"learning_rate": 1.661320754716981e-05,
"loss": 0.2107,
"step": 360
},
{
"epoch": 0.3490566037735849,
"grad_norm": 1.4383134841918945,
"learning_rate": 1.6518867924528305e-05,
"loss": 0.2117,
"step": 370
},
{
"epoch": 0.3584905660377358,
"grad_norm": 0.14638499915599823,
"learning_rate": 1.6424528301886793e-05,
"loss": 0.1013,
"step": 380
},
{
"epoch": 0.36792452830188677,
"grad_norm": 0.124303437769413,
"learning_rate": 1.6330188679245284e-05,
"loss": 0.0478,
"step": 390
},
{
"epoch": 0.37735849056603776,
"grad_norm": 0.11256200820207596,
"learning_rate": 1.6235849056603775e-05,
"loss": 0.0689,
"step": 400
},
{
"epoch": 0.3867924528301887,
"grad_norm": 0.10450290888547897,
"learning_rate": 1.6141509433962266e-05,
"loss": 0.1293,
"step": 410
},
{
"epoch": 0.39622641509433965,
"grad_norm": 0.09205685555934906,
"learning_rate": 1.6047169811320754e-05,
"loss": 0.0672,
"step": 420
},
{
"epoch": 0.4056603773584906,
"grad_norm": 1.0851916074752808,
"learning_rate": 1.5952830188679248e-05,
"loss": 0.1918,
"step": 430
},
{
"epoch": 0.41509433962264153,
"grad_norm": 0.1460585743188858,
"learning_rate": 1.5858490566037736e-05,
"loss": 0.0866,
"step": 440
},
{
"epoch": 0.42452830188679247,
"grad_norm": 0.10592811554670334,
"learning_rate": 1.5764150943396227e-05,
"loss": 0.0327,
"step": 450
},
{
"epoch": 0.4339622641509434,
"grad_norm": 0.09917836636304855,
"learning_rate": 1.5669811320754718e-05,
"loss": 0.268,
"step": 460
},
{
"epoch": 0.44339622641509435,
"grad_norm": 0.2683676481246948,
"learning_rate": 1.557547169811321e-05,
"loss": 0.2433,
"step": 470
},
{
"epoch": 0.4528301886792453,
"grad_norm": 0.3096301853656769,
"learning_rate": 1.54811320754717e-05,
"loss": 0.0106,
"step": 480
},
{
"epoch": 0.46226415094339623,
"grad_norm": 0.14542903006076813,
"learning_rate": 1.538679245283019e-05,
"loss": 0.1084,
"step": 490
},
{
"epoch": 0.4716981132075472,
"grad_norm": 0.09798438102006912,
"learning_rate": 1.529245283018868e-05,
"loss": 0.1217,
"step": 500
},
{
"epoch": 0.4811320754716981,
"grad_norm": 0.5679267644882202,
"learning_rate": 1.5198113207547172e-05,
"loss": 0.1084,
"step": 510
},
{
"epoch": 0.49056603773584906,
"grad_norm": 0.07084541022777557,
"learning_rate": 1.5103773584905661e-05,
"loss": 0.1496,
"step": 520
},
{
"epoch": 0.5,
"grad_norm": 0.09527801722288132,
"learning_rate": 1.5009433962264152e-05,
"loss": 0.0397,
"step": 530
},
{
"epoch": 0.5094339622641509,
"grad_norm": 2.7925026416778564,
"learning_rate": 1.4915094339622642e-05,
"loss": 0.0082,
"step": 540
},
{
"epoch": 0.5188679245283019,
"grad_norm": 61.901344299316406,
"learning_rate": 1.4820754716981134e-05,
"loss": 0.2448,
"step": 550
},
{
"epoch": 0.5283018867924528,
"grad_norm": 0.10737080127000809,
"learning_rate": 1.4726415094339624e-05,
"loss": 0.2122,
"step": 560
},
{
"epoch": 0.5377358490566038,
"grad_norm": 0.08159242570400238,
"learning_rate": 1.4632075471698115e-05,
"loss": 0.2056,
"step": 570
},
{
"epoch": 0.5471698113207547,
"grad_norm": 26.218284606933594,
"learning_rate": 1.4537735849056604e-05,
"loss": 0.2137,
"step": 580
},
{
"epoch": 0.5566037735849056,
"grad_norm": 0.09104019403457642,
"learning_rate": 1.4443396226415095e-05,
"loss": 0.1235,
"step": 590
},
{
"epoch": 0.5660377358490566,
"grad_norm": 44.143577575683594,
"learning_rate": 1.4349056603773586e-05,
"loss": 0.1708,
"step": 600
},
{
"epoch": 0.5754716981132075,
"grad_norm": 0.09825887531042099,
"learning_rate": 1.4254716981132078e-05,
"loss": 0.2829,
"step": 610
},
{
"epoch": 0.5849056603773585,
"grad_norm": 0.08215257525444031,
"learning_rate": 1.4160377358490567e-05,
"loss": 0.0978,
"step": 620
},
{
"epoch": 0.5943396226415094,
"grad_norm": 0.0858820378780365,
"learning_rate": 1.4066037735849058e-05,
"loss": 0.1308,
"step": 630
},
{
"epoch": 0.6037735849056604,
"grad_norm": 0.07820013910531998,
"learning_rate": 1.3971698113207547e-05,
"loss": 0.0083,
"step": 640
},
{
"epoch": 0.6132075471698113,
"grad_norm": 0.07088133692741394,
"learning_rate": 1.387735849056604e-05,
"loss": 0.0343,
"step": 650
},
{
"epoch": 0.6226415094339622,
"grad_norm": 0.07562941312789917,
"learning_rate": 1.378301886792453e-05,
"loss": 0.1503,
"step": 660
},
{
"epoch": 0.6320754716981132,
"grad_norm": 0.10470731556415558,
"learning_rate": 1.368867924528302e-05,
"loss": 0.0474,
"step": 670
},
{
"epoch": 0.6415094339622641,
"grad_norm": 0.06047428399324417,
"learning_rate": 1.359433962264151e-05,
"loss": 0.3286,
"step": 680
},
{
"epoch": 0.6509433962264151,
"grad_norm": 0.12241201102733612,
"learning_rate": 1.3500000000000001e-05,
"loss": 0.163,
"step": 690
},
{
"epoch": 0.660377358490566,
"grad_norm": 0.07547236233949661,
"learning_rate": 1.3405660377358492e-05,
"loss": 0.1182,
"step": 700
},
{
"epoch": 0.6698113207547169,
"grad_norm": 0.06518665701150894,
"learning_rate": 1.3311320754716983e-05,
"loss": 0.1177,
"step": 710
},
{
"epoch": 0.6792452830188679,
"grad_norm": 0.05459102615714073,
"learning_rate": 1.3216981132075473e-05,
"loss": 0.0056,
"step": 720
},
{
"epoch": 0.6886792452830188,
"grad_norm": 0.06507308781147003,
"learning_rate": 1.3122641509433964e-05,
"loss": 0.194,
"step": 730
},
{
"epoch": 0.6981132075471698,
"grad_norm": 0.06847833096981049,
"learning_rate": 1.3028301886792453e-05,
"loss": 0.0334,
"step": 740
},
{
"epoch": 0.7075471698113207,
"grad_norm": 0.05916672945022583,
"learning_rate": 1.2933962264150946e-05,
"loss": 0.1326,
"step": 750
},
{
"epoch": 0.7169811320754716,
"grad_norm": 0.05806839466094971,
"learning_rate": 1.2839622641509435e-05,
"loss": 0.0734,
"step": 760
},
{
"epoch": 0.7264150943396226,
"grad_norm": 0.058394916355609894,
"learning_rate": 1.2745283018867926e-05,
"loss": 0.3154,
"step": 770
},
{
"epoch": 0.7358490566037735,
"grad_norm": 0.08128712326288223,
"learning_rate": 1.2650943396226416e-05,
"loss": 0.1004,
"step": 780
},
{
"epoch": 0.7452830188679245,
"grad_norm": 0.1772662252187729,
"learning_rate": 1.2556603773584907e-05,
"loss": 0.1408,
"step": 790
},
{
"epoch": 0.7547169811320755,
"grad_norm": 0.07657451182603836,
"learning_rate": 1.2462264150943398e-05,
"loss": 0.1138,
"step": 800
},
{
"epoch": 0.7641509433962265,
"grad_norm": 0.0920625552535057,
"learning_rate": 1.2367924528301887e-05,
"loss": 0.0064,
"step": 810
},
{
"epoch": 0.7735849056603774,
"grad_norm": 3.893165111541748,
"learning_rate": 1.2273584905660379e-05,
"loss": 0.1015,
"step": 820
},
{
"epoch": 0.7830188679245284,
"grad_norm": 0.06635797768831253,
"learning_rate": 1.2179245283018868e-05,
"loss": 0.1405,
"step": 830
},
{
"epoch": 0.7924528301886793,
"grad_norm": 1.1072912216186523,
"learning_rate": 1.2084905660377359e-05,
"loss": 0.1225,
"step": 840
},
{
"epoch": 0.8018867924528302,
"grad_norm": 0.04783385619521141,
"learning_rate": 1.1990566037735848e-05,
"loss": 0.0895,
"step": 850
},
{
"epoch": 0.8113207547169812,
"grad_norm": 0.05416665971279144,
"learning_rate": 1.1896226415094341e-05,
"loss": 0.1095,
"step": 860
},
{
"epoch": 0.8207547169811321,
"grad_norm": 0.10190918296575546,
"learning_rate": 1.180188679245283e-05,
"loss": 0.1404,
"step": 870
},
{
"epoch": 0.8301886792452831,
"grad_norm": 92.75904083251953,
"learning_rate": 1.1707547169811322e-05,
"loss": 0.0269,
"step": 880
},
{
"epoch": 0.839622641509434,
"grad_norm": 0.06948844343423843,
"learning_rate": 1.1613207547169811e-05,
"loss": 0.0046,
"step": 890
},
{
"epoch": 0.8490566037735849,
"grad_norm": 0.0549422986805439,
"learning_rate": 1.1518867924528304e-05,
"loss": 0.1588,
"step": 900
},
{
"epoch": 0.8584905660377359,
"grad_norm": 0.09215264767408371,
"learning_rate": 1.1424528301886793e-05,
"loss": 0.0639,
"step": 910
},
{
"epoch": 0.8679245283018868,
"grad_norm": 0.0980132520198822,
"learning_rate": 1.1330188679245284e-05,
"loss": 0.21,
"step": 920
},
{
"epoch": 0.8773584905660378,
"grad_norm": 0.06740464270114899,
"learning_rate": 1.1235849056603774e-05,
"loss": 0.0059,
"step": 930
},
{
"epoch": 0.8867924528301887,
"grad_norm": 0.42118075489997864,
"learning_rate": 1.1141509433962265e-05,
"loss": 0.1012,
"step": 940
},
{
"epoch": 0.8962264150943396,
"grad_norm": 0.09071939438581467,
"learning_rate": 1.1047169811320754e-05,
"loss": 0.0636,
"step": 950
},
{
"epoch": 0.9056603773584906,
"grad_norm": 0.06605428457260132,
"learning_rate": 1.0952830188679247e-05,
"loss": 0.1424,
"step": 960
},
{
"epoch": 0.9150943396226415,
"grad_norm": 0.054869458079338074,
"learning_rate": 1.0858490566037736e-05,
"loss": 0.1506,
"step": 970
},
{
"epoch": 0.9245283018867925,
"grad_norm": 8.528816223144531,
"learning_rate": 1.0764150943396227e-05,
"loss": 0.0766,
"step": 980
},
{
"epoch": 0.9339622641509434,
"grad_norm": 0.04189067333936691,
"learning_rate": 1.0669811320754717e-05,
"loss": 0.0383,
"step": 990
},
{
"epoch": 0.9433962264150944,
"grad_norm": 0.19604367017745972,
"learning_rate": 1.057547169811321e-05,
"loss": 0.0474,
"step": 1000
},
{
"epoch": 0.9528301886792453,
"grad_norm": 0.04203261435031891,
"learning_rate": 1.0481132075471699e-05,
"loss": 0.0571,
"step": 1010
},
{
"epoch": 0.9622641509433962,
"grad_norm": 20.811012268066406,
"learning_rate": 1.038679245283019e-05,
"loss": 0.2019,
"step": 1020
},
{
"epoch": 0.9716981132075472,
"grad_norm": 0.10877203941345215,
"learning_rate": 1.029245283018868e-05,
"loss": 0.1336,
"step": 1030
},
{
"epoch": 0.9811320754716981,
"grad_norm": 0.10228094458580017,
"learning_rate": 1.019811320754717e-05,
"loss": 0.0273,
"step": 1040
},
{
"epoch": 0.9905660377358491,
"grad_norm": 1.2278637886047363,
"learning_rate": 1.010377358490566e-05,
"loss": 0.0452,
"step": 1050
},
{
"epoch": 1.0,
"grad_norm": 0.03946210443973541,
"learning_rate": 1.0009433962264153e-05,
"loss": 0.0109,
"step": 1060
},
{
"epoch": 1.0,
"eval_loss": 0.08500511199235916,
"eval_runtime": 1.6971,
"eval_samples_per_second": 555.67,
"eval_steps_per_second": 69.532,
"step": 1060
},
{
"epoch": 1.009433962264151,
"grad_norm": 0.03779453784227371,
"learning_rate": 9.915094339622642e-06,
"loss": 0.1822,
"step": 1070
},
{
"epoch": 1.0188679245283019,
"grad_norm": 0.040553513914346695,
"learning_rate": 9.820754716981133e-06,
"loss": 0.054,
"step": 1080
},
{
"epoch": 1.028301886792453,
"grad_norm": 0.02908136695623398,
"learning_rate": 9.726415094339623e-06,
"loss": 0.0062,
"step": 1090
},
{
"epoch": 1.0377358490566038,
"grad_norm": 0.04287223890423775,
"learning_rate": 9.632075471698114e-06,
"loss": 0.0039,
"step": 1100
},
{
"epoch": 1.0471698113207548,
"grad_norm": 0.03865998610854149,
"learning_rate": 9.537735849056605e-06,
"loss": 0.0446,
"step": 1110
},
{
"epoch": 1.0566037735849056,
"grad_norm": 0.04521052539348602,
"learning_rate": 9.443396226415094e-06,
"loss": 0.0029,
"step": 1120
},
{
"epoch": 1.0660377358490567,
"grad_norm": 0.03594714403152466,
"learning_rate": 9.349056603773585e-06,
"loss": 0.148,
"step": 1130
},
{
"epoch": 1.0754716981132075,
"grad_norm": 0.029539430513978004,
"learning_rate": 9.254716981132076e-06,
"loss": 0.0271,
"step": 1140
},
{
"epoch": 1.0849056603773586,
"grad_norm": 4.038683891296387,
"learning_rate": 9.160377358490566e-06,
"loss": 0.165,
"step": 1150
},
{
"epoch": 1.0943396226415094,
"grad_norm": 0.027737464755773544,
"learning_rate": 9.066037735849057e-06,
"loss": 0.0028,
"step": 1160
},
{
"epoch": 1.1037735849056605,
"grad_norm": 0.03591258078813553,
"learning_rate": 8.971698113207548e-06,
"loss": 0.1371,
"step": 1170
},
{
"epoch": 1.1132075471698113,
"grad_norm": 12.058833122253418,
"learning_rate": 8.877358490566039e-06,
"loss": 0.236,
"step": 1180
},
{
"epoch": 1.1226415094339623,
"grad_norm": 0.04443644732236862,
"learning_rate": 8.783018867924528e-06,
"loss": 0.003,
"step": 1190
},
{
"epoch": 1.1320754716981132,
"grad_norm": 40.05979919433594,
"learning_rate": 8.68867924528302e-06,
"loss": 0.0157,
"step": 1200
},
{
"epoch": 1.1415094339622642,
"grad_norm": 0.04003468528389931,
"learning_rate": 8.59433962264151e-06,
"loss": 0.0029,
"step": 1210
},
{
"epoch": 1.150943396226415,
"grad_norm": 2.571934223175049,
"learning_rate": 8.5e-06,
"loss": 0.1204,
"step": 1220
},
{
"epoch": 1.1603773584905661,
"grad_norm": 0.06405159831047058,
"learning_rate": 8.405660377358491e-06,
"loss": 0.0146,
"step": 1230
},
{
"epoch": 1.169811320754717,
"grad_norm": 8.398702621459961,
"learning_rate": 8.311320754716982e-06,
"loss": 0.0048,
"step": 1240
},
{
"epoch": 1.179245283018868,
"grad_norm": 19.97818946838379,
"learning_rate": 8.216981132075471e-06,
"loss": 0.0688,
"step": 1250
},
{
"epoch": 1.1886792452830188,
"grad_norm": 0.03282918781042099,
"learning_rate": 8.122641509433963e-06,
"loss": 0.0035,
"step": 1260
},
{
"epoch": 1.1981132075471699,
"grad_norm": 0.03513512760400772,
"learning_rate": 8.028301886792454e-06,
"loss": 0.0355,
"step": 1270
},
{
"epoch": 1.2075471698113207,
"grad_norm": 0.03274456784129143,
"learning_rate": 7.933962264150945e-06,
"loss": 0.188,
"step": 1280
},
{
"epoch": 1.2169811320754718,
"grad_norm": 0.03626378998160362,
"learning_rate": 7.839622641509434e-06,
"loss": 0.014,
"step": 1290
},
{
"epoch": 1.2264150943396226,
"grad_norm": 0.8501767516136169,
"learning_rate": 7.745283018867925e-06,
"loss": 0.1076,
"step": 1300
},
{
"epoch": 1.2358490566037736,
"grad_norm": 0.0267633106559515,
"learning_rate": 7.650943396226416e-06,
"loss": 0.0025,
"step": 1310
},
{
"epoch": 1.2452830188679245,
"grad_norm": 0.04418088495731354,
"learning_rate": 7.5566037735849066e-06,
"loss": 0.1331,
"step": 1320
},
{
"epoch": 1.2547169811320755,
"grad_norm": 6.747396469116211,
"learning_rate": 7.462264150943397e-06,
"loss": 0.3259,
"step": 1330
},
{
"epoch": 1.2641509433962264,
"grad_norm": 0.032822128385305405,
"learning_rate": 7.367924528301887e-06,
"loss": 0.1229,
"step": 1340
},
{
"epoch": 1.2735849056603774,
"grad_norm": 0.06315261125564575,
"learning_rate": 7.273584905660378e-06,
"loss": 0.0039,
"step": 1350
},
{
"epoch": 1.2830188679245282,
"grad_norm": 0.03962039574980736,
"learning_rate": 7.179245283018868e-06,
"loss": 0.0072,
"step": 1360
},
{
"epoch": 1.2924528301886793,
"grad_norm": 0.03726816922426224,
"learning_rate": 7.0849056603773594e-06,
"loss": 0.0138,
"step": 1370
},
{
"epoch": 1.3018867924528301,
"grad_norm": 0.04036370664834976,
"learning_rate": 6.99056603773585e-06,
"loss": 0.1472,
"step": 1380
},
{
"epoch": 1.3113207547169812,
"grad_norm": 0.03037635050714016,
"learning_rate": 6.89622641509434e-06,
"loss": 0.0069,
"step": 1390
},
{
"epoch": 1.320754716981132,
"grad_norm": 0.02670077607035637,
"learning_rate": 6.801886792452831e-06,
"loss": 0.0021,
"step": 1400
},
{
"epoch": 1.330188679245283,
"grad_norm": 0.05846339091658592,
"learning_rate": 6.707547169811321e-06,
"loss": 0.0022,
"step": 1410
},
{
"epoch": 1.3396226415094339,
"grad_norm": 9.76332950592041,
"learning_rate": 6.613207547169812e-06,
"loss": 0.1445,
"step": 1420
},
{
"epoch": 1.349056603773585,
"grad_norm": 0.048967599868774414,
"learning_rate": 6.5188679245283026e-06,
"loss": 0.0441,
"step": 1430
},
{
"epoch": 1.3584905660377358,
"grad_norm": 0.025179484859108925,
"learning_rate": 6.424528301886793e-06,
"loss": 0.0188,
"step": 1440
},
{
"epoch": 1.3679245283018868,
"grad_norm": 0.02890361286699772,
"learning_rate": 6.330188679245284e-06,
"loss": 0.0021,
"step": 1450
},
{
"epoch": 1.3773584905660377,
"grad_norm": 0.02526584453880787,
"learning_rate": 6.235849056603774e-06,
"loss": 0.0022,
"step": 1460
},
{
"epoch": 1.3867924528301887,
"grad_norm": 2.650209903717041,
"learning_rate": 6.141509433962265e-06,
"loss": 0.0482,
"step": 1470
},
{
"epoch": 1.3962264150943398,
"grad_norm": 0.0368034765124321,
"learning_rate": 6.0471698113207555e-06,
"loss": 0.0034,
"step": 1480
},
{
"epoch": 1.4056603773584906,
"grad_norm": 0.021781697869300842,
"learning_rate": 5.952830188679246e-06,
"loss": 0.0022,
"step": 1490
},
{
"epoch": 1.4150943396226414,
"grad_norm": 0.019001835957169533,
"learning_rate": 5.858490566037737e-06,
"loss": 0.2058,
"step": 1500
},
{
"epoch": 1.4245283018867925,
"grad_norm": 0.047584012150764465,
"learning_rate": 5.764150943396227e-06,
"loss": 0.1632,
"step": 1510
},
{
"epoch": 1.4339622641509435,
"grad_norm": 0.02781420387327671,
"learning_rate": 5.669811320754718e-06,
"loss": 0.0018,
"step": 1520
},
{
"epoch": 1.4433962264150944,
"grad_norm": 0.03572264313697815,
"learning_rate": 5.575471698113208e-06,
"loss": 0.0184,
"step": 1530
},
{
"epoch": 1.4528301886792452,
"grad_norm": 0.021896662190556526,
"learning_rate": 5.4811320754716994e-06,
"loss": 0.155,
"step": 1540
},
{
"epoch": 1.4622641509433962,
"grad_norm": 0.024771416559815407,
"learning_rate": 5.38679245283019e-06,
"loss": 0.0018,
"step": 1550
},
{
"epoch": 1.4716981132075473,
"grad_norm": 3.849034547805786,
"learning_rate": 5.29245283018868e-06,
"loss": 0.1006,
"step": 1560
},
{
"epoch": 1.4811320754716981,
"grad_norm": 0.12136558443307877,
"learning_rate": 5.198113207547171e-06,
"loss": 0.0024,
"step": 1570
},
{
"epoch": 1.490566037735849,
"grad_norm": 0.029468955472111702,
"learning_rate": 5.103773584905661e-06,
"loss": 0.0021,
"step": 1580
},
{
"epoch": 1.5,
"grad_norm": 2.4301178455352783,
"learning_rate": 5.009433962264152e-06,
"loss": 0.0024,
"step": 1590
},
{
"epoch": 1.509433962264151,
"grad_norm": 0.024639485403895378,
"learning_rate": 4.915094339622642e-06,
"loss": 0.0019,
"step": 1600
},
{
"epoch": 1.5188679245283019,
"grad_norm": 5.811135768890381,
"learning_rate": 4.820754716981133e-06,
"loss": 0.0282,
"step": 1610
},
{
"epoch": 1.5283018867924527,
"grad_norm": 0.02190409228205681,
"learning_rate": 4.726415094339623e-06,
"loss": 0.0016,
"step": 1620
},
{
"epoch": 1.5377358490566038,
"grad_norm": 0.025405917316675186,
"learning_rate": 4.632075471698113e-06,
"loss": 0.0505,
"step": 1630
},
{
"epoch": 1.5471698113207548,
"grad_norm": 16.779964447021484,
"learning_rate": 4.537735849056604e-06,
"loss": 0.1169,
"step": 1640
},
{
"epoch": 1.5566037735849056,
"grad_norm": 43.401329040527344,
"learning_rate": 4.443396226415095e-06,
"loss": 0.0245,
"step": 1650
},
{
"epoch": 1.5660377358490565,
"grad_norm": 0.9246163964271545,
"learning_rate": 4.349056603773586e-06,
"loss": 0.0354,
"step": 1660
},
{
"epoch": 1.5754716981132075,
"grad_norm": 0.02293049544095993,
"learning_rate": 4.254716981132076e-06,
"loss": 0.0036,
"step": 1670
},
{
"epoch": 1.5849056603773586,
"grad_norm": 0.019884012639522552,
"learning_rate": 4.160377358490566e-06,
"loss": 0.0017,
"step": 1680
},
{
"epoch": 1.5943396226415094,
"grad_norm": 0.0297381728887558,
"learning_rate": 4.066037735849057e-06,
"loss": 0.002,
"step": 1690
},
{
"epoch": 1.6037735849056602,
"grad_norm": 0.01664115861058235,
"learning_rate": 3.9716981132075475e-06,
"loss": 0.0029,
"step": 1700
},
{
"epoch": 1.6132075471698113,
"grad_norm": 0.021111104637384415,
"learning_rate": 3.8773584905660386e-06,
"loss": 0.0019,
"step": 1710
},
{
"epoch": 1.6226415094339623,
"grad_norm": 5.916421890258789,
"learning_rate": 3.783018867924529e-06,
"loss": 0.0597,
"step": 1720
},
{
"epoch": 1.6320754716981132,
"grad_norm": 0.11948911845684052,
"learning_rate": 3.688679245283019e-06,
"loss": 0.0288,
"step": 1730
},
{
"epoch": 1.641509433962264,
"grad_norm": 0.029013920575380325,
"learning_rate": 3.5943396226415093e-06,
"loss": 0.0022,
"step": 1740
},
{
"epoch": 1.650943396226415,
"grad_norm": 0.021010151132941246,
"learning_rate": 3.5e-06,
"loss": 0.0021,
"step": 1750
},
{
"epoch": 1.6603773584905661,
"grad_norm": 0.020002691075205803,
"learning_rate": 3.4056603773584906e-06,
"loss": 0.0016,
"step": 1760
},
{
"epoch": 1.669811320754717,
"grad_norm": 0.02407264895737171,
"learning_rate": 3.3113207547169813e-06,
"loss": 0.0928,
"step": 1770
},
{
"epoch": 1.6792452830188678,
"grad_norm": 0.024595679715275764,
"learning_rate": 3.216981132075472e-06,
"loss": 0.0018,
"step": 1780
},
{
"epoch": 1.6886792452830188,
"grad_norm": 0.017810633406043053,
"learning_rate": 3.122641509433962e-06,
"loss": 0.1722,
"step": 1790
},
{
"epoch": 1.6981132075471699,
"grad_norm": 0.01612844318151474,
"learning_rate": 3.028301886792453e-06,
"loss": 0.1158,
"step": 1800
},
{
"epoch": 1.7075471698113207,
"grad_norm": 0.023075569421052933,
"learning_rate": 2.9339622641509435e-06,
"loss": 0.0016,
"step": 1810
},
{
"epoch": 1.7169811320754715,
"grad_norm": 0.01716834492981434,
"learning_rate": 2.839622641509434e-06,
"loss": 0.0015,
"step": 1820
},
{
"epoch": 1.7264150943396226,
"grad_norm": 0.0205362718552351,
"learning_rate": 2.745283018867925e-06,
"loss": 0.0017,
"step": 1830
},
{
"epoch": 1.7358490566037736,
"grad_norm": 0.023218706250190735,
"learning_rate": 2.6509433962264155e-06,
"loss": 0.0014,
"step": 1840
},
{
"epoch": 1.7452830188679245,
"grad_norm": 0.018329111859202385,
"learning_rate": 2.5566037735849057e-06,
"loss": 0.1204,
"step": 1850
},
{
"epoch": 1.7547169811320755,
"grad_norm": 0.019258936867117882,
"learning_rate": 2.4622641509433964e-06,
"loss": 0.0018,
"step": 1860
},
{
"epoch": 1.7641509433962264,
"grad_norm": 0.023526819422841072,
"learning_rate": 2.367924528301887e-06,
"loss": 0.0074,
"step": 1870
},
{
"epoch": 1.7735849056603774,
"grad_norm": 0.01679695025086403,
"learning_rate": 2.2735849056603777e-06,
"loss": 0.0014,
"step": 1880
},
{
"epoch": 1.7830188679245285,
"grad_norm": 0.014311902225017548,
"learning_rate": 2.1792452830188684e-06,
"loss": 0.0015,
"step": 1890
},
{
"epoch": 1.7924528301886793,
"grad_norm": 1.5082017183303833,
"learning_rate": 2.0849056603773586e-06,
"loss": 0.0708,
"step": 1900
},
{
"epoch": 1.8018867924528301,
"grad_norm": 52.86256408691406,
"learning_rate": 1.9905660377358493e-06,
"loss": 0.0666,
"step": 1910
},
{
"epoch": 1.8113207547169812,
"grad_norm": 0.020178191363811493,
"learning_rate": 1.89622641509434e-06,
"loss": 0.0694,
"step": 1920
},
{
"epoch": 1.8207547169811322,
"grad_norm": 0.01885703019797802,
"learning_rate": 1.8018867924528302e-06,
"loss": 0.0416,
"step": 1930
},
{
"epoch": 1.830188679245283,
"grad_norm": 0.021212713792920113,
"learning_rate": 1.7075471698113208e-06,
"loss": 0.0015,
"step": 1940
},
{
"epoch": 1.8396226415094339,
"grad_norm": 0.020716039463877678,
"learning_rate": 1.6132075471698113e-06,
"loss": 0.0446,
"step": 1950
},
{
"epoch": 1.849056603773585,
"grad_norm": 0.01979793794453144,
"learning_rate": 1.518867924528302e-06,
"loss": 0.0017,
"step": 1960
},
{
"epoch": 1.858490566037736,
"grad_norm": 0.029191287234425545,
"learning_rate": 1.4245283018867926e-06,
"loss": 0.0016,
"step": 1970
},
{
"epoch": 1.8679245283018868,
"grad_norm": 0.02217436581850052,
"learning_rate": 1.330188679245283e-06,
"loss": 0.0441,
"step": 1980
},
{
"epoch": 1.8773584905660377,
"grad_norm": 0.0228409580886364,
"learning_rate": 1.2358490566037737e-06,
"loss": 0.0016,
"step": 1990
},
{
"epoch": 1.8867924528301887,
"grad_norm": 0.022314254194498062,
"learning_rate": 1.1415094339622642e-06,
"loss": 0.1457,
"step": 2000
},
{
"epoch": 1.8962264150943398,
"grad_norm": 0.017337264493107796,
"learning_rate": 1.0471698113207548e-06,
"loss": 0.1144,
"step": 2010
},
{
"epoch": 1.9056603773584906,
"grad_norm": 0.021478446200489998,
"learning_rate": 9.528301886792454e-07,
"loss": 0.0014,
"step": 2020
},
{
"epoch": 1.9150943396226414,
"grad_norm": 0.021643251180648804,
"learning_rate": 8.584905660377358e-07,
"loss": 0.0284,
"step": 2030
},
{
"epoch": 1.9245283018867925,
"grad_norm": 0.02016671560704708,
"learning_rate": 7.641509433962264e-07,
"loss": 0.0482,
"step": 2040
},
{
"epoch": 1.9339622641509435,
"grad_norm": 0.019274510443210602,
"learning_rate": 6.698113207547171e-07,
"loss": 0.0016,
"step": 2050
},
{
"epoch": 1.9433962264150944,
"grad_norm": 0.014301498420536518,
"learning_rate": 5.754716981132076e-07,
"loss": 0.0045,
"step": 2060
},
{
"epoch": 1.9528301886792452,
"grad_norm": 0.01988300494849682,
"learning_rate": 4.811320754716982e-07,
"loss": 0.0015,
"step": 2070
},
{
"epoch": 1.9622641509433962,
"grad_norm": 0.02060701698064804,
"learning_rate": 3.867924528301887e-07,
"loss": 0.0044,
"step": 2080
},
{
"epoch": 1.9716981132075473,
"grad_norm": 41.62403106689453,
"learning_rate": 2.924528301886793e-07,
"loss": 0.1818,
"step": 2090
},
{
"epoch": 1.9811320754716981,
"grad_norm": 0.0649556890130043,
"learning_rate": 1.9811320754716982e-07,
"loss": 0.0016,
"step": 2100
},
{
"epoch": 1.990566037735849,
"grad_norm": 0.02280914969742298,
"learning_rate": 1.0377358490566039e-07,
"loss": 0.0414,
"step": 2110
},
{
"epoch": 2.0,
"grad_norm": 0.01854662224650383,
"learning_rate": 9.433962264150945e-09,
"loss": 0.0015,
"step": 2120
},
{
"epoch": 2.0,
"eval_loss": 0.06546525657176971,
"eval_runtime": 1.7009,
"eval_samples_per_second": 554.403,
"eval_steps_per_second": 69.374,
"step": 2120
}
],
"logging_steps": 10,
"max_steps": 2120,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4461837276794880.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}