sledopyt_embedder_v2 / trainer_state.json
George2002's picture
Upload model checkpoint
15e722d verified
{
"best_global_step": 1400,
"best_metric": 0.5561918616294861,
"best_model_checkpoint": ".../training_output/checkpoint-1400",
"epoch": 10.0,
"eval_steps": 50,
"global_step": 1420,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07042253521126761,
"grad_norm": 4.834911346435547,
"learning_rate": 6.338028169014085e-07,
"loss": 3.8016,
"step": 10
},
{
"epoch": 0.14084507042253522,
"grad_norm": 3.7942707538604736,
"learning_rate": 1.3380281690140844e-06,
"loss": 3.7424,
"step": 20
},
{
"epoch": 0.2112676056338028,
"grad_norm": 4.044854164123535,
"learning_rate": 2.0422535211267608e-06,
"loss": 3.6583,
"step": 30
},
{
"epoch": 0.28169014084507044,
"grad_norm": 4.419040679931641,
"learning_rate": 2.746478873239437e-06,
"loss": 3.7144,
"step": 40
},
{
"epoch": 0.352112676056338,
"grad_norm": 4.994226455688477,
"learning_rate": 3.4507042253521127e-06,
"loss": 3.5497,
"step": 50
},
{
"epoch": 0.352112676056338,
"eval_loss": 1.7768433094024658,
"eval_runtime": 10.0717,
"eval_samples_per_second": 95.217,
"eval_steps_per_second": 5.957,
"step": 50
},
{
"epoch": 0.4225352112676056,
"grad_norm": 4.968951225280762,
"learning_rate": 4.154929577464789e-06,
"loss": 3.6716,
"step": 60
},
{
"epoch": 0.49295774647887325,
"grad_norm": 4.8160014152526855,
"learning_rate": 4.859154929577465e-06,
"loss": 3.5206,
"step": 70
},
{
"epoch": 0.5633802816901409,
"grad_norm": 5.351637363433838,
"learning_rate": 5.563380281690142e-06,
"loss": 3.4553,
"step": 80
},
{
"epoch": 0.6338028169014085,
"grad_norm": 5.762838363647461,
"learning_rate": 6.267605633802818e-06,
"loss": 3.4572,
"step": 90
},
{
"epoch": 0.704225352112676,
"grad_norm": 5.674283504486084,
"learning_rate": 6.9718309859154935e-06,
"loss": 3.4715,
"step": 100
},
{
"epoch": 0.704225352112676,
"eval_loss": 1.704325556755066,
"eval_runtime": 9.3452,
"eval_samples_per_second": 102.619,
"eval_steps_per_second": 6.42,
"step": 100
},
{
"epoch": 0.7746478873239436,
"grad_norm": 5.651138782501221,
"learning_rate": 7.67605633802817e-06,
"loss": 3.4693,
"step": 110
},
{
"epoch": 0.8450704225352113,
"grad_norm": 5.866046905517578,
"learning_rate": 8.380281690140846e-06,
"loss": 3.482,
"step": 120
},
{
"epoch": 0.9154929577464789,
"grad_norm": 6.652071475982666,
"learning_rate": 9.084507042253522e-06,
"loss": 3.4296,
"step": 130
},
{
"epoch": 0.9859154929577465,
"grad_norm": 8.077320098876953,
"learning_rate": 9.788732394366198e-06,
"loss": 3.3627,
"step": 140
},
{
"epoch": 1.056338028169014,
"grad_norm": 6.8231329917907715,
"learning_rate": 9.945226917057903e-06,
"loss": 3.3338,
"step": 150
},
{
"epoch": 1.056338028169014,
"eval_loss": 1.6177195310592651,
"eval_runtime": 9.306,
"eval_samples_per_second": 103.052,
"eval_steps_per_second": 6.447,
"step": 150
},
{
"epoch": 1.1267605633802817,
"grad_norm": 7.225569725036621,
"learning_rate": 9.866979655712052e-06,
"loss": 3.2974,
"step": 160
},
{
"epoch": 1.1971830985915493,
"grad_norm": 7.667593479156494,
"learning_rate": 9.788732394366198e-06,
"loss": 3.3756,
"step": 170
},
{
"epoch": 1.267605633802817,
"grad_norm": 7.092339992523193,
"learning_rate": 9.710485133020345e-06,
"loss": 3.3302,
"step": 180
},
{
"epoch": 1.3380281690140845,
"grad_norm": 7.544447898864746,
"learning_rate": 9.632237871674493e-06,
"loss": 3.2841,
"step": 190
},
{
"epoch": 1.408450704225352,
"grad_norm": 8.606858253479004,
"learning_rate": 9.55399061032864e-06,
"loss": 3.14,
"step": 200
},
{
"epoch": 1.408450704225352,
"eval_loss": 1.5555429458618164,
"eval_runtime": 8.9418,
"eval_samples_per_second": 107.249,
"eval_steps_per_second": 6.71,
"step": 200
},
{
"epoch": 1.4788732394366197,
"grad_norm": 9.349486351013184,
"learning_rate": 9.475743348982786e-06,
"loss": 3.2447,
"step": 210
},
{
"epoch": 1.5492957746478875,
"grad_norm": 10.120262145996094,
"learning_rate": 9.397496087636934e-06,
"loss": 3.301,
"step": 220
},
{
"epoch": 1.619718309859155,
"grad_norm": 8.683691024780273,
"learning_rate": 9.31924882629108e-06,
"loss": 3.2738,
"step": 230
},
{
"epoch": 1.6901408450704225,
"grad_norm": 8.391075134277344,
"learning_rate": 9.241001564945227e-06,
"loss": 3.1474,
"step": 240
},
{
"epoch": 1.76056338028169,
"grad_norm": 9.799640655517578,
"learning_rate": 9.162754303599375e-06,
"loss": 3.1417,
"step": 250
},
{
"epoch": 1.76056338028169,
"eval_loss": 1.5090895891189575,
"eval_runtime": 9.059,
"eval_samples_per_second": 105.862,
"eval_steps_per_second": 6.623,
"step": 250
},
{
"epoch": 1.8309859154929577,
"grad_norm": 9.724505424499512,
"learning_rate": 9.084507042253522e-06,
"loss": 3.1931,
"step": 260
},
{
"epoch": 1.9014084507042255,
"grad_norm": 11.591618537902832,
"learning_rate": 9.006259780907669e-06,
"loss": 3.1649,
"step": 270
},
{
"epoch": 1.971830985915493,
"grad_norm": 9.90600299835205,
"learning_rate": 8.928012519561817e-06,
"loss": 3.2231,
"step": 280
},
{
"epoch": 2.0422535211267605,
"grad_norm": 10.291154861450195,
"learning_rate": 8.849765258215963e-06,
"loss": 3.1172,
"step": 290
},
{
"epoch": 2.112676056338028,
"grad_norm": 11.620966911315918,
"learning_rate": 8.77151799687011e-06,
"loss": 3.0193,
"step": 300
},
{
"epoch": 2.112676056338028,
"eval_loss": 1.437499761581421,
"eval_runtime": 8.9265,
"eval_samples_per_second": 107.433,
"eval_steps_per_second": 6.722,
"step": 300
},
{
"epoch": 2.183098591549296,
"grad_norm": 12.65719223022461,
"learning_rate": 8.693270735524258e-06,
"loss": 2.9451,
"step": 310
},
{
"epoch": 2.2535211267605635,
"grad_norm": 12.792698860168457,
"learning_rate": 8.615023474178405e-06,
"loss": 2.9669,
"step": 320
},
{
"epoch": 2.323943661971831,
"grad_norm": 12.682389259338379,
"learning_rate": 8.536776212832551e-06,
"loss": 2.9739,
"step": 330
},
{
"epoch": 2.3943661971830985,
"grad_norm": 12.96263313293457,
"learning_rate": 8.4585289514867e-06,
"loss": 2.8988,
"step": 340
},
{
"epoch": 2.464788732394366,
"grad_norm": 14.160724639892578,
"learning_rate": 8.380281690140846e-06,
"loss": 2.9009,
"step": 350
},
{
"epoch": 2.464788732394366,
"eval_loss": 1.3772871494293213,
"eval_runtime": 9.6484,
"eval_samples_per_second": 99.394,
"eval_steps_per_second": 6.219,
"step": 350
},
{
"epoch": 2.535211267605634,
"grad_norm": 14.379948616027832,
"learning_rate": 8.302034428794992e-06,
"loss": 2.8597,
"step": 360
},
{
"epoch": 2.6056338028169015,
"grad_norm": 15.024813652038574,
"learning_rate": 8.22378716744914e-06,
"loss": 2.8603,
"step": 370
},
{
"epoch": 2.676056338028169,
"grad_norm": 15.947714805603027,
"learning_rate": 8.145539906103287e-06,
"loss": 2.9262,
"step": 380
},
{
"epoch": 2.7464788732394365,
"grad_norm": 15.812085151672363,
"learning_rate": 8.067292644757434e-06,
"loss": 2.9301,
"step": 390
},
{
"epoch": 2.816901408450704,
"grad_norm": 15.922052383422852,
"learning_rate": 7.989045383411582e-06,
"loss": 2.9446,
"step": 400
},
{
"epoch": 2.816901408450704,
"eval_loss": 1.3113040924072266,
"eval_runtime": 8.9464,
"eval_samples_per_second": 107.194,
"eval_steps_per_second": 6.707,
"step": 400
},
{
"epoch": 2.887323943661972,
"grad_norm": 15.517271995544434,
"learning_rate": 7.910798122065728e-06,
"loss": 2.9103,
"step": 410
},
{
"epoch": 2.9577464788732395,
"grad_norm": 16.0577449798584,
"learning_rate": 7.832550860719875e-06,
"loss": 2.88,
"step": 420
},
{
"epoch": 3.028169014084507,
"grad_norm": 15.434414863586426,
"learning_rate": 7.754303599374023e-06,
"loss": 2.7008,
"step": 430
},
{
"epoch": 3.0985915492957745,
"grad_norm": 18.000938415527344,
"learning_rate": 7.67605633802817e-06,
"loss": 2.669,
"step": 440
},
{
"epoch": 3.169014084507042,
"grad_norm": 17.124902725219727,
"learning_rate": 7.597809076682316e-06,
"loss": 2.6466,
"step": 450
},
{
"epoch": 3.169014084507042,
"eval_loss": 1.2235872745513916,
"eval_runtime": 8.9632,
"eval_samples_per_second": 106.993,
"eval_steps_per_second": 6.694,
"step": 450
},
{
"epoch": 3.23943661971831,
"grad_norm": 18.728208541870117,
"learning_rate": 7.5195618153364636e-06,
"loss": 2.6583,
"step": 460
},
{
"epoch": 3.3098591549295775,
"grad_norm": 20.174840927124023,
"learning_rate": 7.441314553990611e-06,
"loss": 2.5912,
"step": 470
},
{
"epoch": 3.380281690140845,
"grad_norm": 19.468631744384766,
"learning_rate": 7.3630672926447575e-06,
"loss": 2.5551,
"step": 480
},
{
"epoch": 3.4507042253521125,
"grad_norm": 19.333127975463867,
"learning_rate": 7.284820031298905e-06,
"loss": 2.5759,
"step": 490
},
{
"epoch": 3.52112676056338,
"grad_norm": 20.050857543945312,
"learning_rate": 7.206572769953052e-06,
"loss": 2.6059,
"step": 500
},
{
"epoch": 3.52112676056338,
"eval_loss": 1.1633683443069458,
"eval_runtime": 8.9644,
"eval_samples_per_second": 106.979,
"eval_steps_per_second": 6.693,
"step": 500
},
{
"epoch": 3.591549295774648,
"grad_norm": 21.03053855895996,
"learning_rate": 7.128325508607199e-06,
"loss": 2.5128,
"step": 510
},
{
"epoch": 3.6619718309859155,
"grad_norm": 20.328510284423828,
"learning_rate": 7.050078247261346e-06,
"loss": 2.5397,
"step": 520
},
{
"epoch": 3.732394366197183,
"grad_norm": 21.760059356689453,
"learning_rate": 6.9718309859154935e-06,
"loss": 2.5198,
"step": 530
},
{
"epoch": 3.802816901408451,
"grad_norm": 20.767467498779297,
"learning_rate": 6.89358372456964e-06,
"loss": 2.4605,
"step": 540
},
{
"epoch": 3.873239436619718,
"grad_norm": 24.69249725341797,
"learning_rate": 6.815336463223787e-06,
"loss": 2.4911,
"step": 550
},
{
"epoch": 3.873239436619718,
"eval_loss": 1.0876926183700562,
"eval_runtime": 8.9503,
"eval_samples_per_second": 107.147,
"eval_steps_per_second": 6.704,
"step": 550
},
{
"epoch": 3.943661971830986,
"grad_norm": 21.451522827148438,
"learning_rate": 6.737089201877935e-06,
"loss": 2.445,
"step": 560
},
{
"epoch": 4.014084507042254,
"grad_norm": 21.80568504333496,
"learning_rate": 6.658841940532081e-06,
"loss": 2.4261,
"step": 570
},
{
"epoch": 4.084507042253521,
"grad_norm": 23.770599365234375,
"learning_rate": 6.580594679186229e-06,
"loss": 2.2965,
"step": 580
},
{
"epoch": 4.154929577464789,
"grad_norm": 22.17987060546875,
"learning_rate": 6.502347417840375e-06,
"loss": 2.1889,
"step": 590
},
{
"epoch": 4.225352112676056,
"grad_norm": 22.51678466796875,
"learning_rate": 6.424100156494523e-06,
"loss": 2.2193,
"step": 600
},
{
"epoch": 4.225352112676056,
"eval_loss": 1.049774408340454,
"eval_runtime": 8.9455,
"eval_samples_per_second": 107.205,
"eval_steps_per_second": 6.707,
"step": 600
},
{
"epoch": 4.295774647887324,
"grad_norm": 23.58041000366211,
"learning_rate": 6.345852895148671e-06,
"loss": 2.2024,
"step": 610
},
{
"epoch": 4.366197183098592,
"grad_norm": 24.69291877746582,
"learning_rate": 6.267605633802818e-06,
"loss": 2.2096,
"step": 620
},
{
"epoch": 4.436619718309859,
"grad_norm": 24.199907302856445,
"learning_rate": 6.189358372456965e-06,
"loss": 2.2598,
"step": 630
},
{
"epoch": 4.507042253521127,
"grad_norm": 23.92287826538086,
"learning_rate": 6.111111111111112e-06,
"loss": 2.2322,
"step": 640
},
{
"epoch": 4.577464788732394,
"grad_norm": 22.647499084472656,
"learning_rate": 6.0328638497652595e-06,
"loss": 2.1822,
"step": 650
},
{
"epoch": 4.577464788732394,
"eval_loss": 0.9610461592674255,
"eval_runtime": 8.9698,
"eval_samples_per_second": 106.915,
"eval_steps_per_second": 6.689,
"step": 650
},
{
"epoch": 4.647887323943662,
"grad_norm": 23.590543746948242,
"learning_rate": 5.954616588419406e-06,
"loss": 2.1621,
"step": 660
},
{
"epoch": 4.71830985915493,
"grad_norm": 23.479148864746094,
"learning_rate": 5.876369327073553e-06,
"loss": 2.1543,
"step": 670
},
{
"epoch": 4.788732394366197,
"grad_norm": 26.514415740966797,
"learning_rate": 5.798122065727701e-06,
"loss": 2.2376,
"step": 680
},
{
"epoch": 4.859154929577465,
"grad_norm": 25.98239517211914,
"learning_rate": 5.719874804381847e-06,
"loss": 2.1652,
"step": 690
},
{
"epoch": 4.929577464788732,
"grad_norm": 25.147550582885742,
"learning_rate": 5.641627543035995e-06,
"loss": 2.1194,
"step": 700
},
{
"epoch": 4.929577464788732,
"eval_loss": 0.8909062743186951,
"eval_runtime": 8.9807,
"eval_samples_per_second": 106.785,
"eval_steps_per_second": 6.681,
"step": 700
},
{
"epoch": 5.0,
"grad_norm": 24.74693489074707,
"learning_rate": 5.563380281690142e-06,
"loss": 2.0919,
"step": 710
},
{
"epoch": 5.070422535211268,
"grad_norm": 27.079837799072266,
"learning_rate": 5.4851330203442885e-06,
"loss": 1.9779,
"step": 720
},
{
"epoch": 5.140845070422535,
"grad_norm": 25.947982788085938,
"learning_rate": 5.406885758998436e-06,
"loss": 1.8619,
"step": 730
},
{
"epoch": 5.211267605633803,
"grad_norm": 27.102828979492188,
"learning_rate": 5.328638497652583e-06,
"loss": 1.9172,
"step": 740
},
{
"epoch": 5.28169014084507,
"grad_norm": 27.407745361328125,
"learning_rate": 5.25039123630673e-06,
"loss": 1.9355,
"step": 750
},
{
"epoch": 5.28169014084507,
"eval_loss": 0.8482040166854858,
"eval_runtime": 9.8318,
"eval_samples_per_second": 97.54,
"eval_steps_per_second": 6.103,
"step": 750
},
{
"epoch": 5.352112676056338,
"grad_norm": 24.58213233947754,
"learning_rate": 5.172143974960877e-06,
"loss": 1.9393,
"step": 760
},
{
"epoch": 5.422535211267606,
"grad_norm": 24.623889923095703,
"learning_rate": 5.093896713615024e-06,
"loss": 1.8727,
"step": 770
},
{
"epoch": 5.492957746478873,
"grad_norm": 26.396087646484375,
"learning_rate": 5.015649452269171e-06,
"loss": 1.9599,
"step": 780
},
{
"epoch": 5.563380281690141,
"grad_norm": 28.786396026611328,
"learning_rate": 4.9374021909233185e-06,
"loss": 1.8271,
"step": 790
},
{
"epoch": 5.633802816901408,
"grad_norm": 25.507366180419922,
"learning_rate": 4.859154929577465e-06,
"loss": 1.9065,
"step": 800
},
{
"epoch": 5.633802816901408,
"eval_loss": 0.8195229768753052,
"eval_runtime": 8.968,
"eval_samples_per_second": 106.935,
"eval_steps_per_second": 6.69,
"step": 800
},
{
"epoch": 5.704225352112676,
"grad_norm": 26.951005935668945,
"learning_rate": 4.780907668231612e-06,
"loss": 1.8742,
"step": 810
},
{
"epoch": 5.774647887323944,
"grad_norm": 27.19331169128418,
"learning_rate": 4.70266040688576e-06,
"loss": 1.8754,
"step": 820
},
{
"epoch": 5.845070422535211,
"grad_norm": 26.17990493774414,
"learning_rate": 4.624413145539906e-06,
"loss": 1.8412,
"step": 830
},
{
"epoch": 5.915492957746479,
"grad_norm": 28.198486328125,
"learning_rate": 4.546165884194054e-06,
"loss": 1.845,
"step": 840
},
{
"epoch": 5.985915492957746,
"grad_norm": 24.81572914123535,
"learning_rate": 4.467918622848201e-06,
"loss": 1.8035,
"step": 850
},
{
"epoch": 5.985915492957746,
"eval_loss": 0.7713097929954529,
"eval_runtime": 8.914,
"eval_samples_per_second": 107.583,
"eval_steps_per_second": 6.731,
"step": 850
},
{
"epoch": 6.056338028169014,
"grad_norm": 27.168418884277344,
"learning_rate": 4.3896713615023476e-06,
"loss": 1.7062,
"step": 860
},
{
"epoch": 6.126760563380282,
"grad_norm": 25.584774017333984,
"learning_rate": 4.311424100156495e-06,
"loss": 1.6908,
"step": 870
},
{
"epoch": 6.197183098591549,
"grad_norm": 27.289413452148438,
"learning_rate": 4.233176838810642e-06,
"loss": 1.6868,
"step": 880
},
{
"epoch": 6.267605633802817,
"grad_norm": 26.789995193481445,
"learning_rate": 4.154929577464789e-06,
"loss": 1.686,
"step": 890
},
{
"epoch": 6.338028169014084,
"grad_norm": 27.878765106201172,
"learning_rate": 4.076682316118936e-06,
"loss": 1.564,
"step": 900
},
{
"epoch": 6.338028169014084,
"eval_loss": 0.7355391383171082,
"eval_runtime": 8.9357,
"eval_samples_per_second": 107.322,
"eval_steps_per_second": 6.715,
"step": 900
},
{
"epoch": 6.408450704225352,
"grad_norm": 27.236083984375,
"learning_rate": 3.998435054773084e-06,
"loss": 1.7135,
"step": 910
},
{
"epoch": 6.47887323943662,
"grad_norm": 26.007850646972656,
"learning_rate": 3.92018779342723e-06,
"loss": 1.6614,
"step": 920
},
{
"epoch": 6.549295774647887,
"grad_norm": 26.08616065979004,
"learning_rate": 3.8419405320813775e-06,
"loss": 1.6442,
"step": 930
},
{
"epoch": 6.619718309859155,
"grad_norm": 26.021728515625,
"learning_rate": 3.7636932707355245e-06,
"loss": 1.6117,
"step": 940
},
{
"epoch": 6.690140845070422,
"grad_norm": 26.019577026367188,
"learning_rate": 3.6854460093896714e-06,
"loss": 1.6317,
"step": 950
},
{
"epoch": 6.690140845070422,
"eval_loss": 0.700791597366333,
"eval_runtime": 8.9228,
"eval_samples_per_second": 107.477,
"eval_steps_per_second": 6.724,
"step": 950
},
{
"epoch": 6.76056338028169,
"grad_norm": 25.988859176635742,
"learning_rate": 3.6071987480438188e-06,
"loss": 1.6589,
"step": 960
},
{
"epoch": 6.830985915492958,
"grad_norm": 26.973453521728516,
"learning_rate": 3.5289514866979657e-06,
"loss": 1.673,
"step": 970
},
{
"epoch": 6.901408450704225,
"grad_norm": 27.36475372314453,
"learning_rate": 3.4507042253521127e-06,
"loss": 1.6018,
"step": 980
},
{
"epoch": 6.971830985915493,
"grad_norm": 28.897504806518555,
"learning_rate": 3.37245696400626e-06,
"loss": 1.6291,
"step": 990
},
{
"epoch": 7.042253521126761,
"grad_norm": 26.3133544921875,
"learning_rate": 3.294209702660407e-06,
"loss": 1.5407,
"step": 1000
},
{
"epoch": 7.042253521126761,
"eval_loss": 0.6679695248603821,
"eval_runtime": 8.9256,
"eval_samples_per_second": 107.444,
"eval_steps_per_second": 6.722,
"step": 1000
},
{
"epoch": 7.112676056338028,
"grad_norm": 29.09581184387207,
"learning_rate": 3.215962441314554e-06,
"loss": 1.4842,
"step": 1010
},
{
"epoch": 7.183098591549296,
"grad_norm": 27.576128005981445,
"learning_rate": 3.137715179968701e-06,
"loss": 1.5063,
"step": 1020
},
{
"epoch": 7.253521126760563,
"grad_norm": 29.655752182006836,
"learning_rate": 3.0594679186228483e-06,
"loss": 1.5052,
"step": 1030
},
{
"epoch": 7.323943661971831,
"grad_norm": 25.104883193969727,
"learning_rate": 2.9812206572769952e-06,
"loss": 1.42,
"step": 1040
},
{
"epoch": 7.394366197183099,
"grad_norm": 27.621112823486328,
"learning_rate": 2.902973395931143e-06,
"loss": 1.4671,
"step": 1050
},
{
"epoch": 7.394366197183099,
"eval_loss": 0.6450517177581787,
"eval_runtime": 8.9546,
"eval_samples_per_second": 107.096,
"eval_steps_per_second": 6.7,
"step": 1050
},
{
"epoch": 7.464788732394366,
"grad_norm": 27.425331115722656,
"learning_rate": 2.82472613458529e-06,
"loss": 1.4879,
"step": 1060
},
{
"epoch": 7.535211267605634,
"grad_norm": 27.76177978515625,
"learning_rate": 2.746478873239437e-06,
"loss": 1.5012,
"step": 1070
},
{
"epoch": 7.605633802816901,
"grad_norm": 24.934165954589844,
"learning_rate": 2.6682316118935843e-06,
"loss": 1.4386,
"step": 1080
},
{
"epoch": 7.676056338028169,
"grad_norm": 28.298328399658203,
"learning_rate": 2.5899843505477313e-06,
"loss": 1.4544,
"step": 1090
},
{
"epoch": 7.746478873239437,
"grad_norm": 24.99042320251465,
"learning_rate": 2.5117370892018782e-06,
"loss": 1.4076,
"step": 1100
},
{
"epoch": 7.746478873239437,
"eval_loss": 0.6194617748260498,
"eval_runtime": 8.9294,
"eval_samples_per_second": 107.398,
"eval_steps_per_second": 6.719,
"step": 1100
},
{
"epoch": 7.816901408450704,
"grad_norm": 27.29831314086914,
"learning_rate": 2.433489827856025e-06,
"loss": 1.4838,
"step": 1110
},
{
"epoch": 7.887323943661972,
"grad_norm": 27.53099250793457,
"learning_rate": 2.3552425665101726e-06,
"loss": 1.4745,
"step": 1120
},
{
"epoch": 7.957746478873239,
"grad_norm": 27.09596824645996,
"learning_rate": 2.2769953051643195e-06,
"loss": 1.3781,
"step": 1130
},
{
"epoch": 8.028169014084508,
"grad_norm": 26.090456008911133,
"learning_rate": 2.1987480438184665e-06,
"loss": 1.422,
"step": 1140
},
{
"epoch": 8.098591549295774,
"grad_norm": 26.628990173339844,
"learning_rate": 2.120500782472614e-06,
"loss": 1.345,
"step": 1150
},
{
"epoch": 8.098591549295774,
"eval_loss": 0.603298008441925,
"eval_runtime": 8.923,
"eval_samples_per_second": 107.475,
"eval_steps_per_second": 6.724,
"step": 1150
},
{
"epoch": 8.169014084507042,
"grad_norm": 26.968416213989258,
"learning_rate": 2.0422535211267608e-06,
"loss": 1.4478,
"step": 1160
},
{
"epoch": 8.23943661971831,
"grad_norm": 28.169918060302734,
"learning_rate": 1.9640062597809077e-06,
"loss": 1.3111,
"step": 1170
},
{
"epoch": 8.309859154929578,
"grad_norm": 28.084226608276367,
"learning_rate": 1.885758998435055e-06,
"loss": 1.3754,
"step": 1180
},
{
"epoch": 8.380281690140846,
"grad_norm": 28.451416015625,
"learning_rate": 1.807511737089202e-06,
"loss": 1.3996,
"step": 1190
},
{
"epoch": 8.450704225352112,
"grad_norm": 27.99500846862793,
"learning_rate": 1.729264475743349e-06,
"loss": 1.3818,
"step": 1200
},
{
"epoch": 8.450704225352112,
"eval_loss": 0.5862457752227783,
"eval_runtime": 8.9231,
"eval_samples_per_second": 107.473,
"eval_steps_per_second": 6.724,
"step": 1200
},
{
"epoch": 8.52112676056338,
"grad_norm": 26.25287628173828,
"learning_rate": 1.6510172143974962e-06,
"loss": 1.348,
"step": 1210
},
{
"epoch": 8.591549295774648,
"grad_norm": 27.26688003540039,
"learning_rate": 1.5727699530516433e-06,
"loss": 1.3567,
"step": 1220
},
{
"epoch": 8.661971830985916,
"grad_norm": 25.22686767578125,
"learning_rate": 1.4945226917057903e-06,
"loss": 1.331,
"step": 1230
},
{
"epoch": 8.732394366197184,
"grad_norm": 27.34234619140625,
"learning_rate": 1.4162754303599375e-06,
"loss": 1.2977,
"step": 1240
},
{
"epoch": 8.80281690140845,
"grad_norm": 23.48381233215332,
"learning_rate": 1.3380281690140844e-06,
"loss": 1.2578,
"step": 1250
},
{
"epoch": 8.80281690140845,
"eval_loss": 0.5742074847221375,
"eval_runtime": 8.9308,
"eval_samples_per_second": 107.381,
"eval_steps_per_second": 6.718,
"step": 1250
},
{
"epoch": 8.873239436619718,
"grad_norm": 25.575672149658203,
"learning_rate": 1.2597809076682316e-06,
"loss": 1.2807,
"step": 1260
},
{
"epoch": 8.943661971830986,
"grad_norm": 27.154817581176758,
"learning_rate": 1.1815336463223787e-06,
"loss": 1.3597,
"step": 1270
},
{
"epoch": 9.014084507042254,
"grad_norm": 28.042072296142578,
"learning_rate": 1.103286384976526e-06,
"loss": 1.3292,
"step": 1280
},
{
"epoch": 9.084507042253522,
"grad_norm": 27.011009216308594,
"learning_rate": 1.0250391236306729e-06,
"loss": 1.2585,
"step": 1290
},
{
"epoch": 9.154929577464788,
"grad_norm": 27.294761657714844,
"learning_rate": 9.467918622848201e-07,
"loss": 1.2552,
"step": 1300
},
{
"epoch": 9.154929577464788,
"eval_loss": 0.5647178292274475,
"eval_runtime": 8.9528,
"eval_samples_per_second": 107.117,
"eval_steps_per_second": 6.702,
"step": 1300
},
{
"epoch": 9.225352112676056,
"grad_norm": 25.794416427612305,
"learning_rate": 8.685446009389673e-07,
"loss": 1.3339,
"step": 1310
},
{
"epoch": 9.295774647887324,
"grad_norm": 26.401464462280273,
"learning_rate": 7.902973395931143e-07,
"loss": 1.2872,
"step": 1320
},
{
"epoch": 9.366197183098592,
"grad_norm": 28.1634464263916,
"learning_rate": 7.120500782472614e-07,
"loss": 1.276,
"step": 1330
},
{
"epoch": 9.43661971830986,
"grad_norm": 26.156156539916992,
"learning_rate": 6.338028169014085e-07,
"loss": 1.2846,
"step": 1340
},
{
"epoch": 9.507042253521126,
"grad_norm": 26.216571807861328,
"learning_rate": 5.555555555555555e-07,
"loss": 1.2782,
"step": 1350
},
{
"epoch": 9.507042253521126,
"eval_loss": 0.5621166229248047,
"eval_runtime": 8.9665,
"eval_samples_per_second": 106.953,
"eval_steps_per_second": 6.692,
"step": 1350
},
{
"epoch": 9.577464788732394,
"grad_norm": 26.63737678527832,
"learning_rate": 4.773082942097027e-07,
"loss": 1.2883,
"step": 1360
},
{
"epoch": 9.647887323943662,
"grad_norm": 27.532459259033203,
"learning_rate": 3.990610328638498e-07,
"loss": 1.2515,
"step": 1370
},
{
"epoch": 9.71830985915493,
"grad_norm": 23.93533706665039,
"learning_rate": 3.208137715179969e-07,
"loss": 1.3023,
"step": 1380
},
{
"epoch": 9.788732394366198,
"grad_norm": 26.48802375793457,
"learning_rate": 2.42566510172144e-07,
"loss": 1.3053,
"step": 1390
},
{
"epoch": 9.859154929577464,
"grad_norm": 25.6018123626709,
"learning_rate": 1.643192488262911e-07,
"loss": 1.2556,
"step": 1400
},
{
"epoch": 9.859154929577464,
"eval_loss": 0.5561918616294861,
"eval_runtime": 9.3282,
"eval_samples_per_second": 102.806,
"eval_steps_per_second": 6.432,
"step": 1400
},
{
"epoch": 9.929577464788732,
"grad_norm": 26.17299461364746,
"learning_rate": 8.607198748043818e-08,
"loss": 1.2926,
"step": 1410
},
{
"epoch": 10.0,
"grad_norm": 28.140972137451172,
"learning_rate": 7.824726134585291e-09,
"loss": 1.2199,
"step": 1420
}
],
"logging_steps": 10,
"max_steps": 1420,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 350,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}