salary-normalizer / trainer_state.json
Draup-DS's picture
Upload folder using huggingface_hub
3b56547 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.0,
"eval_steps": 500,
"global_step": 11900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03361344537815126,
"grad_norm": 1.2532932758331299,
"learning_rate": 4.966722689075631e-05,
"loss": 0.7069,
"step": 100
},
{
"epoch": 0.06722689075630252,
"grad_norm": 0.3335288465023041,
"learning_rate": 4.933109243697479e-05,
"loss": 0.0112,
"step": 200
},
{
"epoch": 0.10084033613445378,
"grad_norm": 0.21957238018512726,
"learning_rate": 4.899495798319328e-05,
"loss": 0.0075,
"step": 300
},
{
"epoch": 0.13445378151260504,
"grad_norm": 0.12609940767288208,
"learning_rate": 4.8658823529411766e-05,
"loss": 0.0063,
"step": 400
},
{
"epoch": 0.16806722689075632,
"grad_norm": 0.25657421350479126,
"learning_rate": 4.832268907563026e-05,
"loss": 0.0056,
"step": 500
},
{
"epoch": 0.20168067226890757,
"grad_norm": 0.5225902795791626,
"learning_rate": 4.7986554621848737e-05,
"loss": 0.0073,
"step": 600
},
{
"epoch": 0.23529411764705882,
"grad_norm": 0.3552115261554718,
"learning_rate": 4.765042016806723e-05,
"loss": 0.0049,
"step": 700
},
{
"epoch": 0.2689075630252101,
"grad_norm": 0.4207090139389038,
"learning_rate": 4.7314285714285714e-05,
"loss": 0.0043,
"step": 800
},
{
"epoch": 0.3025210084033613,
"grad_norm": 0.6347660422325134,
"learning_rate": 4.6978151260504207e-05,
"loss": 0.0039,
"step": 900
},
{
"epoch": 0.33613445378151263,
"grad_norm": 0.4249831438064575,
"learning_rate": 4.664201680672269e-05,
"loss": 0.0035,
"step": 1000
},
{
"epoch": 0.3697478991596639,
"grad_norm": 0.20205572247505188,
"learning_rate": 4.630588235294118e-05,
"loss": 0.0045,
"step": 1100
},
{
"epoch": 0.40336134453781514,
"grad_norm": 0.25402364134788513,
"learning_rate": 4.596974789915966e-05,
"loss": 0.0048,
"step": 1200
},
{
"epoch": 0.4369747899159664,
"grad_norm": 0.11454110592603683,
"learning_rate": 4.5633613445378155e-05,
"loss": 0.0046,
"step": 1300
},
{
"epoch": 0.47058823529411764,
"grad_norm": 0.29738855361938477,
"learning_rate": 4.529747899159664e-05,
"loss": 0.0084,
"step": 1400
},
{
"epoch": 0.5042016806722689,
"grad_norm": 0.048551127314567566,
"learning_rate": 4.4961344537815126e-05,
"loss": 0.0029,
"step": 1500
},
{
"epoch": 0.5378151260504201,
"grad_norm": 0.1416209638118744,
"learning_rate": 4.462521008403361e-05,
"loss": 0.0036,
"step": 1600
},
{
"epoch": 0.5714285714285714,
"grad_norm": 0.4617021083831787,
"learning_rate": 4.4289075630252104e-05,
"loss": 0.0029,
"step": 1700
},
{
"epoch": 0.6050420168067226,
"grad_norm": 0.25475579500198364,
"learning_rate": 4.395294117647059e-05,
"loss": 0.0031,
"step": 1800
},
{
"epoch": 0.6386554621848739,
"grad_norm": 0.23906832933425903,
"learning_rate": 4.361680672268908e-05,
"loss": 0.0032,
"step": 1900
},
{
"epoch": 0.6722689075630253,
"grad_norm": 0.2611936032772064,
"learning_rate": 4.328067226890756e-05,
"loss": 0.0037,
"step": 2000
},
{
"epoch": 0.7058823529411765,
"grad_norm": 0.04429571330547333,
"learning_rate": 4.294453781512605e-05,
"loss": 0.0028,
"step": 2100
},
{
"epoch": 0.7394957983193278,
"grad_norm": 0.47502580285072327,
"learning_rate": 4.260840336134454e-05,
"loss": 0.0032,
"step": 2200
},
{
"epoch": 0.773109243697479,
"grad_norm": 0.2212914377450943,
"learning_rate": 4.227226890756303e-05,
"loss": 0.0024,
"step": 2300
},
{
"epoch": 0.8067226890756303,
"grad_norm": 20.17454719543457,
"learning_rate": 4.1936134453781516e-05,
"loss": 0.0057,
"step": 2400
},
{
"epoch": 0.8403361344537815,
"grad_norm": 0.48056426644325256,
"learning_rate": 4.16e-05,
"loss": 0.0039,
"step": 2500
},
{
"epoch": 0.8739495798319328,
"grad_norm": 0.4135717451572418,
"learning_rate": 4.126386554621849e-05,
"loss": 0.0029,
"step": 2600
},
{
"epoch": 0.907563025210084,
"grad_norm": 0.10312103480100632,
"learning_rate": 4.092773109243698e-05,
"loss": 0.0027,
"step": 2700
},
{
"epoch": 0.9411764705882353,
"grad_norm": 0.10092929005622864,
"learning_rate": 4.0591596638655465e-05,
"loss": 0.003,
"step": 2800
},
{
"epoch": 0.9747899159663865,
"grad_norm": 0.42715680599212646,
"learning_rate": 4.025546218487395e-05,
"loss": 0.0036,
"step": 2900
},
{
"epoch": 1.0,
"eval_loss": 0.0022414932027459145,
"eval_runtime": 62.59,
"eval_samples_per_second": 124.908,
"eval_steps_per_second": 7.813,
"step": 2975
},
{
"epoch": 1.0084033613445378,
"grad_norm": 0.010128838941454887,
"learning_rate": 3.9919327731092436e-05,
"loss": 0.0025,
"step": 3000
},
{
"epoch": 1.0420168067226891,
"grad_norm": 0.05345134809613228,
"learning_rate": 3.958319327731093e-05,
"loss": 0.0014,
"step": 3100
},
{
"epoch": 1.0756302521008403,
"grad_norm": 0.030169779434800148,
"learning_rate": 3.9247058823529413e-05,
"loss": 0.0019,
"step": 3200
},
{
"epoch": 1.1092436974789917,
"grad_norm": 0.26832690834999084,
"learning_rate": 3.8910924369747906e-05,
"loss": 0.0017,
"step": 3300
},
{
"epoch": 1.1428571428571428,
"grad_norm": 0.0775144025683403,
"learning_rate": 3.8574789915966384e-05,
"loss": 0.002,
"step": 3400
},
{
"epoch": 1.1764705882352942,
"grad_norm": 0.07601425796747208,
"learning_rate": 3.823865546218488e-05,
"loss": 0.0017,
"step": 3500
},
{
"epoch": 1.2100840336134453,
"grad_norm": 0.23803076148033142,
"learning_rate": 3.790252100840336e-05,
"loss": 0.0021,
"step": 3600
},
{
"epoch": 1.2436974789915967,
"grad_norm": 0.014035782776772976,
"learning_rate": 3.7566386554621854e-05,
"loss": 0.0019,
"step": 3700
},
{
"epoch": 1.2773109243697478,
"grad_norm": 0.15406791865825653,
"learning_rate": 3.723025210084034e-05,
"loss": 0.002,
"step": 3800
},
{
"epoch": 1.3109243697478992,
"grad_norm": 0.014077406376600266,
"learning_rate": 3.6894117647058825e-05,
"loss": 0.0019,
"step": 3900
},
{
"epoch": 1.3445378151260505,
"grad_norm": 0.06503783166408539,
"learning_rate": 3.655798319327731e-05,
"loss": 0.0014,
"step": 4000
},
{
"epoch": 1.3781512605042017,
"grad_norm": 0.0875903069972992,
"learning_rate": 3.62218487394958e-05,
"loss": 0.0019,
"step": 4100
},
{
"epoch": 1.4117647058823528,
"grad_norm": 0.010527476668357849,
"learning_rate": 3.588571428571429e-05,
"loss": 0.0017,
"step": 4200
},
{
"epoch": 1.4453781512605042,
"grad_norm": 0.011578446254134178,
"learning_rate": 3.5549579831932774e-05,
"loss": 0.0024,
"step": 4300
},
{
"epoch": 1.4789915966386555,
"grad_norm": 0.0051996163092553616,
"learning_rate": 3.521344537815126e-05,
"loss": 0.0019,
"step": 4400
},
{
"epoch": 1.5126050420168067,
"grad_norm": 0.023098396137356758,
"learning_rate": 3.487731092436975e-05,
"loss": 0.0013,
"step": 4500
},
{
"epoch": 1.5462184873949578,
"grad_norm": 0.16987691819667816,
"learning_rate": 3.454117647058824e-05,
"loss": 0.0021,
"step": 4600
},
{
"epoch": 1.5798319327731094,
"grad_norm": 0.11584833264350891,
"learning_rate": 3.420504201680673e-05,
"loss": 0.0017,
"step": 4700
},
{
"epoch": 1.6134453781512605,
"grad_norm": 0.07376863062381744,
"learning_rate": 3.386890756302521e-05,
"loss": 0.0019,
"step": 4800
},
{
"epoch": 1.6470588235294117,
"grad_norm": 0.003893248736858368,
"learning_rate": 3.35327731092437e-05,
"loss": 0.0023,
"step": 4900
},
{
"epoch": 1.680672268907563,
"grad_norm": 0.11629138141870499,
"learning_rate": 3.3196638655462186e-05,
"loss": 0.002,
"step": 5000
},
{
"epoch": 1.7142857142857144,
"grad_norm": 0.09188009798526764,
"learning_rate": 3.286050420168068e-05,
"loss": 0.0017,
"step": 5100
},
{
"epoch": 1.7478991596638656,
"grad_norm": 0.11714625358581543,
"learning_rate": 3.2524369747899164e-05,
"loss": 0.0019,
"step": 5200
},
{
"epoch": 1.7815126050420167,
"grad_norm": 0.011511634103953838,
"learning_rate": 3.218823529411765e-05,
"loss": 0.0014,
"step": 5300
},
{
"epoch": 1.815126050420168,
"grad_norm": 0.025230418890714645,
"learning_rate": 3.1852100840336135e-05,
"loss": 0.0013,
"step": 5400
},
{
"epoch": 1.8487394957983194,
"grad_norm": 0.08280649781227112,
"learning_rate": 3.151596638655463e-05,
"loss": 0.0014,
"step": 5500
},
{
"epoch": 1.8823529411764706,
"grad_norm": 0.044068414717912674,
"learning_rate": 3.117983193277311e-05,
"loss": 0.0017,
"step": 5600
},
{
"epoch": 1.9159663865546217,
"grad_norm": 0.13378596305847168,
"learning_rate": 3.08436974789916e-05,
"loss": 0.0017,
"step": 5700
},
{
"epoch": 1.949579831932773,
"grad_norm": 0.08546321094036102,
"learning_rate": 3.0507563025210084e-05,
"loss": 0.0016,
"step": 5800
},
{
"epoch": 1.9831932773109244,
"grad_norm": 0.21406061947345734,
"learning_rate": 3.0171428571428572e-05,
"loss": 0.0017,
"step": 5900
},
{
"epoch": 2.0,
"eval_loss": 0.0019952901639044285,
"eval_runtime": 62.6295,
"eval_samples_per_second": 124.829,
"eval_steps_per_second": 7.808,
"step": 5950
},
{
"epoch": 2.0168067226890756,
"grad_norm": 0.12653397023677826,
"learning_rate": 2.983529411764706e-05,
"loss": 0.0012,
"step": 6000
},
{
"epoch": 2.0504201680672267,
"grad_norm": 0.021875187754631042,
"learning_rate": 2.949915966386555e-05,
"loss": 0.0011,
"step": 6100
},
{
"epoch": 2.0840336134453783,
"grad_norm": 0.02089652232825756,
"learning_rate": 2.9163025210084032e-05,
"loss": 0.0007,
"step": 6200
},
{
"epoch": 2.1176470588235294,
"grad_norm": 0.10652671009302139,
"learning_rate": 2.882689075630252e-05,
"loss": 0.0009,
"step": 6300
},
{
"epoch": 2.1512605042016806,
"grad_norm": 0.052155669778585434,
"learning_rate": 2.849075630252101e-05,
"loss": 0.001,
"step": 6400
},
{
"epoch": 2.184873949579832,
"grad_norm": 0.03689173236489296,
"learning_rate": 2.81546218487395e-05,
"loss": 0.0011,
"step": 6500
},
{
"epoch": 2.2184873949579833,
"grad_norm": 0.1453682780265808,
"learning_rate": 2.7818487394957988e-05,
"loss": 0.001,
"step": 6600
},
{
"epoch": 2.2521008403361344,
"grad_norm": 0.10738670825958252,
"learning_rate": 2.748235294117647e-05,
"loss": 0.0012,
"step": 6700
},
{
"epoch": 2.2857142857142856,
"grad_norm": 0.07871459424495697,
"learning_rate": 2.714621848739496e-05,
"loss": 0.0015,
"step": 6800
},
{
"epoch": 2.3193277310924367,
"grad_norm": 0.0664721354842186,
"learning_rate": 2.6810084033613448e-05,
"loss": 0.0011,
"step": 6900
},
{
"epoch": 2.3529411764705883,
"grad_norm": 0.0028383585158735514,
"learning_rate": 2.6473949579831937e-05,
"loss": 0.0012,
"step": 7000
},
{
"epoch": 2.3865546218487395,
"grad_norm": 0.06004633009433746,
"learning_rate": 2.613781512605042e-05,
"loss": 0.0011,
"step": 7100
},
{
"epoch": 2.4201680672268906,
"grad_norm": 0.1983553171157837,
"learning_rate": 2.5801680672268908e-05,
"loss": 0.0012,
"step": 7200
},
{
"epoch": 2.453781512605042,
"grad_norm": 0.10064397752285004,
"learning_rate": 2.5465546218487396e-05,
"loss": 0.0015,
"step": 7300
},
{
"epoch": 2.4873949579831933,
"grad_norm": 0.010177959688007832,
"learning_rate": 2.5129411764705885e-05,
"loss": 0.0012,
"step": 7400
},
{
"epoch": 2.5210084033613445,
"grad_norm": 0.0731307789683342,
"learning_rate": 2.479327731092437e-05,
"loss": 0.0013,
"step": 7500
},
{
"epoch": 2.5546218487394956,
"grad_norm": 0.015169462189078331,
"learning_rate": 2.445714285714286e-05,
"loss": 0.0014,
"step": 7600
},
{
"epoch": 2.588235294117647,
"grad_norm": 0.10259702801704407,
"learning_rate": 2.4121008403361345e-05,
"loss": 0.0009,
"step": 7700
},
{
"epoch": 2.6218487394957983,
"grad_norm": 0.11249709874391556,
"learning_rate": 2.3784873949579834e-05,
"loss": 0.0014,
"step": 7800
},
{
"epoch": 2.6554621848739495,
"grad_norm": 0.033951204270124435,
"learning_rate": 2.344873949579832e-05,
"loss": 0.0011,
"step": 7900
},
{
"epoch": 2.689075630252101,
"grad_norm": 0.07527955621480942,
"learning_rate": 2.311260504201681e-05,
"loss": 0.0011,
"step": 8000
},
{
"epoch": 2.722689075630252,
"grad_norm": 0.057067230343818665,
"learning_rate": 2.2776470588235297e-05,
"loss": 0.0008,
"step": 8100
},
{
"epoch": 2.7563025210084033,
"grad_norm": 0.028707563877105713,
"learning_rate": 2.2440336134453783e-05,
"loss": 0.0009,
"step": 8200
},
{
"epoch": 2.7899159663865545,
"grad_norm": 0.04694965109229088,
"learning_rate": 2.210420168067227e-05,
"loss": 0.0011,
"step": 8300
},
{
"epoch": 2.8235294117647056,
"grad_norm": 0.05145597457885742,
"learning_rate": 2.1768067226890757e-05,
"loss": 0.0009,
"step": 8400
},
{
"epoch": 2.857142857142857,
"grad_norm": 0.009829564020037651,
"learning_rate": 2.1431932773109246e-05,
"loss": 0.0007,
"step": 8500
},
{
"epoch": 2.8907563025210083,
"grad_norm": 0.017278891056776047,
"learning_rate": 2.109579831932773e-05,
"loss": 0.0007,
"step": 8600
},
{
"epoch": 2.92436974789916,
"grad_norm": 0.01149184163659811,
"learning_rate": 2.075966386554622e-05,
"loss": 0.0009,
"step": 8700
},
{
"epoch": 2.957983193277311,
"grad_norm": 0.024918945506215096,
"learning_rate": 2.042352941176471e-05,
"loss": 0.0007,
"step": 8800
},
{
"epoch": 2.991596638655462,
"grad_norm": 0.13216158747673035,
"learning_rate": 2.0087394957983195e-05,
"loss": 0.0007,
"step": 8900
},
{
"epoch": 3.0,
"eval_loss": 0.0018142093904316425,
"eval_runtime": 62.4784,
"eval_samples_per_second": 125.131,
"eval_steps_per_second": 7.827,
"step": 8925
},
{
"epoch": 3.0252100840336134,
"grad_norm": 0.006363982334733009,
"learning_rate": 1.9751260504201684e-05,
"loss": 0.0006,
"step": 9000
},
{
"epoch": 3.0588235294117645,
"grad_norm": 0.12958742678165436,
"learning_rate": 1.941512605042017e-05,
"loss": 0.0006,
"step": 9100
},
{
"epoch": 3.092436974789916,
"grad_norm": 0.00047478260239586234,
"learning_rate": 1.9078991596638658e-05,
"loss": 0.0004,
"step": 9200
},
{
"epoch": 3.1260504201680672,
"grad_norm": 0.1666508913040161,
"learning_rate": 1.8742857142857143e-05,
"loss": 0.0005,
"step": 9300
},
{
"epoch": 3.1596638655462184,
"grad_norm": 0.003551214002072811,
"learning_rate": 1.8406722689075632e-05,
"loss": 0.0007,
"step": 9400
},
{
"epoch": 3.19327731092437,
"grad_norm": 0.00195235013961792,
"learning_rate": 1.807058823529412e-05,
"loss": 0.0004,
"step": 9500
},
{
"epoch": 3.226890756302521,
"grad_norm": 0.004866756033152342,
"learning_rate": 1.7734453781512607e-05,
"loss": 0.0004,
"step": 9600
},
{
"epoch": 3.2605042016806722,
"grad_norm": 0.1616717129945755,
"learning_rate": 1.7398319327731096e-05,
"loss": 0.0005,
"step": 9700
},
{
"epoch": 3.2941176470588234,
"grad_norm": 0.1331503987312317,
"learning_rate": 1.706218487394958e-05,
"loss": 0.0005,
"step": 9800
},
{
"epoch": 3.327731092436975,
"grad_norm": 0.0034314363729208708,
"learning_rate": 1.672605042016807e-05,
"loss": 0.0006,
"step": 9900
},
{
"epoch": 3.361344537815126,
"grad_norm": 0.00794992409646511,
"learning_rate": 1.6389915966386555e-05,
"loss": 0.0003,
"step": 10000
},
{
"epoch": 3.3949579831932772,
"grad_norm": 0.003377394750714302,
"learning_rate": 1.6053781512605044e-05,
"loss": 0.0003,
"step": 10100
},
{
"epoch": 3.4285714285714284,
"grad_norm": 0.005109515506774187,
"learning_rate": 1.571764705882353e-05,
"loss": 0.0007,
"step": 10200
},
{
"epoch": 3.46218487394958,
"grad_norm": 0.08545704185962677,
"learning_rate": 1.538151260504202e-05,
"loss": 0.0007,
"step": 10300
},
{
"epoch": 3.495798319327731,
"grad_norm": 0.0017083921702578664,
"learning_rate": 1.5045378151260506e-05,
"loss": 0.0005,
"step": 10400
},
{
"epoch": 3.5294117647058822,
"grad_norm": 0.011194012127816677,
"learning_rate": 1.4709243697478991e-05,
"loss": 0.0007,
"step": 10500
},
{
"epoch": 3.5630252100840334,
"grad_norm": 0.20523475110530853,
"learning_rate": 1.437310924369748e-05,
"loss": 0.0003,
"step": 10600
},
{
"epoch": 3.596638655462185,
"grad_norm": 0.06024377420544624,
"learning_rate": 1.4036974789915966e-05,
"loss": 0.0007,
"step": 10700
},
{
"epoch": 3.630252100840336,
"grad_norm": 0.11052771657705307,
"learning_rate": 1.3700840336134455e-05,
"loss": 0.0005,
"step": 10800
},
{
"epoch": 3.6638655462184873,
"grad_norm": 0.2950091063976288,
"learning_rate": 1.3364705882352943e-05,
"loss": 0.0005,
"step": 10900
},
{
"epoch": 3.697478991596639,
"grad_norm": 0.016148166730999947,
"learning_rate": 1.3028571428571429e-05,
"loss": 0.0005,
"step": 11000
},
{
"epoch": 3.73109243697479,
"grad_norm": 0.15638086199760437,
"learning_rate": 1.2692436974789918e-05,
"loss": 0.0005,
"step": 11100
},
{
"epoch": 3.764705882352941,
"grad_norm": 0.00034360421705059707,
"learning_rate": 1.2356302521008405e-05,
"loss": 0.0005,
"step": 11200
},
{
"epoch": 3.7983193277310923,
"grad_norm": 0.026777559891343117,
"learning_rate": 1.2020168067226892e-05,
"loss": 0.0004,
"step": 11300
},
{
"epoch": 3.831932773109244,
"grad_norm": 0.038060788065195084,
"learning_rate": 1.168403361344538e-05,
"loss": 0.0005,
"step": 11400
},
{
"epoch": 3.865546218487395,
"grad_norm": 0.003074630396440625,
"learning_rate": 1.1347899159663867e-05,
"loss": 0.0004,
"step": 11500
},
{
"epoch": 3.899159663865546,
"grad_norm": 0.0007972168968990445,
"learning_rate": 1.1011764705882354e-05,
"loss": 0.0004,
"step": 11600
},
{
"epoch": 3.9327731092436977,
"grad_norm": 0.011879481375217438,
"learning_rate": 1.0675630252100841e-05,
"loss": 0.0005,
"step": 11700
},
{
"epoch": 3.966386554621849,
"grad_norm": 0.0010381491156294942,
"learning_rate": 1.0339495798319328e-05,
"loss": 0.0004,
"step": 11800
},
{
"epoch": 4.0,
"grad_norm": 0.0010164816631004214,
"learning_rate": 1.0003361344537815e-05,
"loss": 0.0003,
"step": 11900
},
{
"epoch": 4.0,
"eval_loss": 0.0016061929054558277,
"eval_runtime": 62.4482,
"eval_samples_per_second": 125.192,
"eval_steps_per_second": 7.83,
"step": 11900
}
],
"logging_steps": 100,
"max_steps": 14875,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.71886155388416e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}