MsIssuesBERT / run-0 /checkpoint-10000 /trainer_state.json
YagiASAFAS's picture
Training in progress, epoch 1
6457906 verified
Invalid JSON: Unexpected token 'N', ..."al_loss": NaN, "... is not valid JSON
{
"best_global_step": 10000,
"best_metric": 0.8275137305036369,
"best_model_checkpoint": "./results/run-0/checkpoint-10000",
"epoch": 5.0,
"eval_steps": 500,
"global_step": 10000,
"is_hyper_param_search": true,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05,
"grad_norm": 2.039576768875122,
"learning_rate": 1.447537100215313e-06,
"loss": 0.0,
"step": 100
},
{
"epoch": 0.1,
"grad_norm": 4.460484504699707,
"learning_rate": 2.9096957873014878e-06,
"loss": 0.0821,
"step": 200
},
{
"epoch": 0.15,
"grad_norm": 26.84711456298828,
"learning_rate": 4.371854474387663e-06,
"loss": 0.0519,
"step": 300
},
{
"epoch": 0.2,
"grad_norm": 0.6646966934204102,
"learning_rate": 5.834013161473838e-06,
"loss": 0.0,
"step": 400
},
{
"epoch": 0.25,
"grad_norm": 1.8385223150253296,
"learning_rate": 7.296171848560012e-06,
"loss": 0.0,
"step": 500
},
{
"epoch": 0.3,
"grad_norm": 0.9234638810157776,
"learning_rate": 8.758330535646188e-06,
"loss": 0.0,
"step": 600
},
{
"epoch": 0.35,
"grad_norm": 38.24382400512695,
"learning_rate": 9.9933989317167e-06,
"loss": 0.0,
"step": 700
},
{
"epoch": 0.4,
"grad_norm": 42.83884811401367,
"learning_rate": 9.94162234536513e-06,
"loss": 0.0,
"step": 800
},
{
"epoch": 0.45,
"grad_norm": 0.8020399808883667,
"learning_rate": 9.889845759013559e-06,
"loss": 0.0,
"step": 900
},
{
"epoch": 0.5,
"grad_norm": 0.86091148853302,
"learning_rate": 9.838069172661988e-06,
"loss": 0.0,
"step": 1000
},
{
"epoch": 0.55,
"grad_norm": 0.16439934074878693,
"learning_rate": 9.786292586310415e-06,
"loss": 0.0,
"step": 1100
},
{
"epoch": 0.6,
"grad_norm": 2.752115488052368,
"learning_rate": 9.734515999958846e-06,
"loss": 0.0,
"step": 1200
},
{
"epoch": 0.65,
"grad_norm": 1.5777802467346191,
"learning_rate": 9.682739413607275e-06,
"loss": 0.0,
"step": 1300
},
{
"epoch": 0.7,
"grad_norm": 5.708035945892334,
"learning_rate": 9.630962827255703e-06,
"loss": 0.0598,
"step": 1400
},
{
"epoch": 0.75,
"grad_norm": 0.3160113990306854,
"learning_rate": 9.579186240904132e-06,
"loss": 0.0441,
"step": 1500
},
{
"epoch": 0.8,
"grad_norm": 1.2273201942443848,
"learning_rate": 9.527409654552563e-06,
"loss": 0.0,
"step": 1600
},
{
"epoch": 0.85,
"grad_norm": 29.215551376342773,
"learning_rate": 9.47563306820099e-06,
"loss": 0.0,
"step": 1700
},
{
"epoch": 0.9,
"grad_norm": 0.18806450068950653,
"learning_rate": 9.42385648184942e-06,
"loss": 0.0,
"step": 1800
},
{
"epoch": 0.95,
"grad_norm": 0.41723617911338806,
"learning_rate": 9.372079895497849e-06,
"loss": 0.0183,
"step": 1900
},
{
"epoch": 1.0,
"grad_norm": 1.9550623893737793,
"learning_rate": 9.32030330914628e-06,
"loss": 0.0,
"step": 2000
},
{
"epoch": 1.0,
"eval_economic_inequality_accuracy": 0.8047858942065491,
"eval_economic_inequality_f1": 0.7536914695213144,
"eval_economic_policy_benefits_accuracy": 0.8282828282828283,
"eval_economic_policy_benefits_f1": 0.7567090529234769,
"eval_ethnic_boundaries_accuracy": 0.9526143790849673,
"eval_ethnic_boundaries_f1": 0.9344984933861545,
"eval_language_policy_accuracy": 0.7058823529411765,
"eval_language_policy_f1": 0.6071428571428571,
"eval_loss": NaN,
"eval_mother_tongue_education_accuracy": 0.8888888888888888,
"eval_mother_tongue_education_f1": 0.837037037037037,
"eval_overall_accuracy": 0.8392311075242039,
"eval_overall_f1": 0.782435168791527,
"eval_religion_ethnic_identity_accuracy": 0.8549323017408124,
"eval_religion_ethnic_identity_f1": 0.8055321027383222,
"eval_runtime": 4.4874,
"eval_samples_per_second": 891.378,
"eval_steps_per_second": 55.711,
"step": 2000
},
{
"epoch": 1.05,
"grad_norm": 2.762986183166504,
"learning_rate": 9.268526722794707e-06,
"loss": 0.0,
"step": 2100
},
{
"epoch": 1.1,
"grad_norm": 0.5487369298934937,
"learning_rate": 9.216750136443136e-06,
"loss": 0.0,
"step": 2200
},
{
"epoch": 1.15,
"grad_norm": 4.76751184463501,
"learning_rate": 9.164973550091565e-06,
"loss": 0.0102,
"step": 2300
},
{
"epoch": 1.2,
"grad_norm": 2.681756019592285,
"learning_rate": 9.113196963739995e-06,
"loss": 0.0,
"step": 2400
},
{
"epoch": 1.25,
"grad_norm": 0.24124185740947723,
"learning_rate": 9.061420377388424e-06,
"loss": 0.0,
"step": 2500
},
{
"epoch": 1.3,
"grad_norm": 1.943154215812683,
"learning_rate": 9.009643791036853e-06,
"loss": 0.0,
"step": 2600
},
{
"epoch": 1.35,
"grad_norm": 0.28053101897239685,
"learning_rate": 8.957867204685282e-06,
"loss": 0.0347,
"step": 2700
},
{
"epoch": 1.4,
"grad_norm": 1.7952568531036377,
"learning_rate": 8.906090618333711e-06,
"loss": 0.4436,
"step": 2800
},
{
"epoch": 1.45,
"grad_norm": 3.859933614730835,
"learning_rate": 8.85431403198214e-06,
"loss": 0.0,
"step": 2900
},
{
"epoch": 1.5,
"grad_norm": 1.094464898109436,
"learning_rate": 8.80253744563057e-06,
"loss": 0.0,
"step": 3000
},
{
"epoch": 1.55,
"grad_norm": 0.5670285820960999,
"learning_rate": 8.750760859278999e-06,
"loss": 0.0,
"step": 3100
},
{
"epoch": 1.6,
"grad_norm": 2.8412418365478516,
"learning_rate": 8.698984272927428e-06,
"loss": 0.0,
"step": 3200
},
{
"epoch": 1.65,
"grad_norm": 65.82475280761719,
"learning_rate": 8.647207686575855e-06,
"loss": 0.0,
"step": 3300
},
{
"epoch": 1.7,
"grad_norm": 8.976615905761719,
"learning_rate": 8.595431100224286e-06,
"loss": 0.0048,
"step": 3400
},
{
"epoch": 1.75,
"grad_norm": 126.71548461914062,
"learning_rate": 8.543654513872716e-06,
"loss": 0.0,
"step": 3500
},
{
"epoch": 1.8,
"grad_norm": 0.595015823841095,
"learning_rate": 8.491877927521143e-06,
"loss": 0.0033,
"step": 3600
},
{
"epoch": 1.85,
"grad_norm": 3.98215651512146,
"learning_rate": 8.440101341169572e-06,
"loss": 0.1482,
"step": 3700
},
{
"epoch": 1.9,
"grad_norm": 3.0417540073394775,
"learning_rate": 8.388324754818003e-06,
"loss": 0.0031,
"step": 3800
},
{
"epoch": 1.95,
"grad_norm": 27.85649299621582,
"learning_rate": 8.33654816846643e-06,
"loss": 0.0589,
"step": 3900
},
{
"epoch": 2.0,
"grad_norm": 2.3094232082366943,
"learning_rate": 8.28477158211486e-06,
"loss": 0.0,
"step": 4000
},
{
"epoch": 2.0,
"eval_economic_inequality_accuracy": 0.8236775818639799,
"eval_economic_inequality_f1": 0.8161521311449569,
"eval_economic_policy_benefits_accuracy": 0.835016835016835,
"eval_economic_policy_benefits_f1": 0.81117677081242,
"eval_ethnic_boundaries_accuracy": 0.9477124183006536,
"eval_ethnic_boundaries_f1": 0.9421262334088456,
"eval_language_policy_accuracy": 0.7058823529411765,
"eval_language_policy_f1": 0.6071428571428571,
"eval_loss": NaN,
"eval_mother_tongue_education_accuracy": 0.7777777777777778,
"eval_mother_tongue_education_f1": 0.7407407407407408,
"eval_overall_accuracy": 0.8277126438976526,
"eval_overall_f1": 0.7960173130694123,
"eval_religion_ethnic_identity_accuracy": 0.8762088974854932,
"eval_religion_ethnic_identity_f1": 0.8587651451666537,
"eval_runtime": 4.4654,
"eval_samples_per_second": 895.775,
"eval_steps_per_second": 55.986,
"step": 4000
},
{
"epoch": 2.05,
"grad_norm": 8.547677040100098,
"learning_rate": 8.232994995763289e-06,
"loss": 0.0,
"step": 4100
},
{
"epoch": 2.1,
"grad_norm": 117.06266784667969,
"learning_rate": 8.181218409411718e-06,
"loss": 0.0,
"step": 4200
},
{
"epoch": 2.15,
"grad_norm": 0.19274334609508514,
"learning_rate": 8.129441823060147e-06,
"loss": 0.0,
"step": 4300
},
{
"epoch": 2.2,
"grad_norm": 1.0360257625579834,
"learning_rate": 8.077665236708576e-06,
"loss": 0.0,
"step": 4400
},
{
"epoch": 2.25,
"grad_norm": 0.08476172387599945,
"learning_rate": 8.025888650357006e-06,
"loss": 0.0,
"step": 4500
},
{
"epoch": 2.3,
"grad_norm": 0.08015169203281403,
"learning_rate": 7.974112064005435e-06,
"loss": 0.0,
"step": 4600
},
{
"epoch": 2.35,
"grad_norm": 0.22844159603118896,
"learning_rate": 7.922335477653864e-06,
"loss": 0.0006,
"step": 4700
},
{
"epoch": 2.4,
"grad_norm": 0.08015509694814682,
"learning_rate": 7.870558891302293e-06,
"loss": 0.0006,
"step": 4800
},
{
"epoch": 2.45,
"grad_norm": 5.426948547363281,
"learning_rate": 7.818782304950722e-06,
"loss": 0.0,
"step": 4900
},
{
"epoch": 2.5,
"grad_norm": 0.022730231285095215,
"learning_rate": 7.767005718599151e-06,
"loss": 0.0,
"step": 5000
},
{
"epoch": 2.55,
"grad_norm": 5.662137508392334,
"learning_rate": 7.71522913224758e-06,
"loss": 0.0,
"step": 5100
},
{
"epoch": 2.6,
"grad_norm": 0.05326724052429199,
"learning_rate": 7.66345254589601e-06,
"loss": 0.0,
"step": 5200
},
{
"epoch": 2.65,
"grad_norm": 2.7219042778015137,
"learning_rate": 7.611675959544438e-06,
"loss": 0.0003,
"step": 5300
},
{
"epoch": 2.7,
"grad_norm": 4.045983791351318,
"learning_rate": 7.559899373192868e-06,
"loss": 0.0,
"step": 5400
},
{
"epoch": 2.75,
"grad_norm": 0.17610976099967957,
"learning_rate": 7.5081227868412964e-06,
"loss": 0.0,
"step": 5500
},
{
"epoch": 2.8,
"grad_norm": 0.03921931982040405,
"learning_rate": 7.456346200489726e-06,
"loss": 0.0,
"step": 5600
},
{
"epoch": 2.85,
"grad_norm": 3.3493025302886963,
"learning_rate": 7.404569614138155e-06,
"loss": 0.0,
"step": 5700
},
{
"epoch": 2.9,
"grad_norm": 0.17839455604553223,
"learning_rate": 7.352793027786584e-06,
"loss": 0.0,
"step": 5800
},
{
"epoch": 2.95,
"grad_norm": 0.01533100288361311,
"learning_rate": 7.301016441435013e-06,
"loss": 0.0,
"step": 5900
},
{
"epoch": 3.0,
"grad_norm": 0.6168579459190369,
"learning_rate": 7.249239855083442e-06,
"loss": 0.0,
"step": 6000
},
{
"epoch": 3.0,
"eval_economic_inequality_accuracy": 0.818639798488665,
"eval_economic_inequality_f1": 0.8128041981528128,
"eval_economic_policy_benefits_accuracy": 0.8686868686868687,
"eval_economic_policy_benefits_f1": 0.849679269639744,
"eval_ethnic_boundaries_accuracy": 0.9379084967320261,
"eval_ethnic_boundaries_f1": 0.9377319200237987,
"eval_language_policy_accuracy": 0.6470588235294118,
"eval_language_policy_f1": 0.5673202614379085,
"eval_loss": NaN,
"eval_mother_tongue_education_accuracy": 0.7777777777777778,
"eval_mother_tongue_education_f1": 0.7407407407407408,
"eval_overall_accuracy": 0.8181454231515234,
"eval_overall_f1": 0.794560338168735,
"eval_religion_ethnic_identity_accuracy": 0.8588007736943907,
"eval_religion_ethnic_identity_f1": 0.8590856390174055,
"eval_runtime": 4.4722,
"eval_samples_per_second": 894.406,
"eval_steps_per_second": 55.9,
"step": 6000
},
{
"epoch": 3.05,
"grad_norm": 0.06797665357589722,
"learning_rate": 7.197463268731871e-06,
"loss": 0.0,
"step": 6100
},
{
"epoch": 3.1,
"grad_norm": 0.05142182484269142,
"learning_rate": 7.145686682380301e-06,
"loss": 0.0002,
"step": 6200
},
{
"epoch": 3.15,
"grad_norm": 0.04868694022297859,
"learning_rate": 7.09391009602873e-06,
"loss": 0.0,
"step": 6300
},
{
"epoch": 3.2,
"grad_norm": 0.026197949424386024,
"learning_rate": 7.042133509677158e-06,
"loss": 0.0005,
"step": 6400
},
{
"epoch": 3.25,
"grad_norm": 12.894441604614258,
"learning_rate": 6.990356923325587e-06,
"loss": 0.0,
"step": 6500
},
{
"epoch": 3.3,
"grad_norm": 4.21354341506958,
"learning_rate": 6.938580336974017e-06,
"loss": 0.0,
"step": 6600
},
{
"epoch": 3.35,
"grad_norm": 0.0193119365721941,
"learning_rate": 6.886803750622446e-06,
"loss": 0.0,
"step": 6700
},
{
"epoch": 3.4,
"grad_norm": 0.07511546462774277,
"learning_rate": 6.835027164270875e-06,
"loss": 0.0,
"step": 6800
},
{
"epoch": 3.45,
"grad_norm": 0.9280033111572266,
"learning_rate": 6.783250577919304e-06,
"loss": 0.0003,
"step": 6900
},
{
"epoch": 3.5,
"grad_norm": 108.68927001953125,
"learning_rate": 6.731473991567734e-06,
"loss": 0.0,
"step": 7000
},
{
"epoch": 3.55,
"grad_norm": 4.475680828094482,
"learning_rate": 6.679697405216162e-06,
"loss": 0.0,
"step": 7100
},
{
"epoch": 3.6,
"grad_norm": 0.040891651064157486,
"learning_rate": 6.6279208188645915e-06,
"loss": 0.0,
"step": 7200
},
{
"epoch": 3.65,
"grad_norm": 0.032305650413036346,
"learning_rate": 6.576144232513021e-06,
"loss": 0.0,
"step": 7300
},
{
"epoch": 3.7,
"grad_norm": 0.027986843138933182,
"learning_rate": 6.524367646161449e-06,
"loss": 0.0002,
"step": 7400
},
{
"epoch": 3.75,
"grad_norm": 0.023173924535512924,
"learning_rate": 6.472591059809879e-06,
"loss": 0.0,
"step": 7500
},
{
"epoch": 3.8,
"grad_norm": 0.10551729053258896,
"learning_rate": 6.420814473458308e-06,
"loss": 0.0,
"step": 7600
},
{
"epoch": 3.85,
"grad_norm": 4.631871223449707,
"learning_rate": 6.3690378871067366e-06,
"loss": 0.0,
"step": 7700
},
{
"epoch": 3.9,
"grad_norm": 0.09392645955085754,
"learning_rate": 6.317261300755166e-06,
"loss": 0.0,
"step": 7800
},
{
"epoch": 3.95,
"grad_norm": 0.32573947310447693,
"learning_rate": 6.265484714403596e-06,
"loss": 0.0,
"step": 7900
},
{
"epoch": 4.0,
"grad_norm": 0.012679875828325748,
"learning_rate": 6.213708128052024e-06,
"loss": 0.0,
"step": 8000
},
{
"epoch": 4.0,
"eval_economic_inequality_accuracy": 0.809823677581864,
"eval_economic_inequality_f1": 0.8053367188692558,
"eval_economic_policy_benefits_accuracy": 0.8451178451178452,
"eval_economic_policy_benefits_f1": 0.8262377279238239,
"eval_ethnic_boundaries_accuracy": 0.9379084967320261,
"eval_ethnic_boundaries_f1": 0.9368969810146281,
"eval_language_policy_accuracy": 0.7058823529411765,
"eval_language_policy_f1": 0.6071428571428571,
"eval_loss": NaN,
"eval_mother_tongue_education_accuracy": 0.8888888888888888,
"eval_mother_tongue_education_f1": 0.837037037037037,
"eval_overall_accuracy": 0.8436493204617509,
"eval_overall_f1": 0.8135939571355589,
"eval_religion_ethnic_identity_accuracy": 0.874274661508704,
"eval_religion_ethnic_identity_f1": 0.8689124208257524,
"eval_runtime": 4.4538,
"eval_samples_per_second": 898.1,
"eval_steps_per_second": 56.131,
"step": 8000
},
{
"epoch": 4.05,
"grad_norm": 0.08179322630167007,
"learning_rate": 6.161931541700453e-06,
"loss": 0.0,
"step": 8100
},
{
"epoch": 4.1,
"grad_norm": 1.8278212547302246,
"learning_rate": 6.1101549553488824e-06,
"loss": 0.0,
"step": 8200
},
{
"epoch": 4.15,
"grad_norm": 0.9412699341773987,
"learning_rate": 6.058378368997311e-06,
"loss": 0.0,
"step": 8300
},
{
"epoch": 4.2,
"grad_norm": 0.3116857707500458,
"learning_rate": 6.006601782645741e-06,
"loss": 0.0,
"step": 8400
},
{
"epoch": 4.25,
"grad_norm": 0.04464244097471237,
"learning_rate": 5.95482519629417e-06,
"loss": 0.0,
"step": 8500
},
{
"epoch": 4.3,
"grad_norm": 0.0,
"learning_rate": 5.903048609942598e-06,
"loss": 0.0,
"step": 8600
},
{
"epoch": 4.35,
"grad_norm": 0.011906923726201057,
"learning_rate": 5.8512720235910274e-06,
"loss": 0.0,
"step": 8700
},
{
"epoch": 4.4,
"grad_norm": 0.022279787808656693,
"learning_rate": 5.7994954372394575e-06,
"loss": 0.0002,
"step": 8800
},
{
"epoch": 4.45,
"grad_norm": 0.07576213777065277,
"learning_rate": 5.747718850887886e-06,
"loss": 0.0001,
"step": 8900
},
{
"epoch": 4.5,
"grad_norm": 0.02016213722527027,
"learning_rate": 5.695942264536315e-06,
"loss": 0.0,
"step": 9000
},
{
"epoch": 4.55,
"grad_norm": 0.07930700480937958,
"learning_rate": 5.644165678184744e-06,
"loss": 0.0,
"step": 9100
},
{
"epoch": 4.6,
"grad_norm": 0.016734231263399124,
"learning_rate": 5.592389091833174e-06,
"loss": 0.0,
"step": 9200
},
{
"epoch": 4.65,
"grad_norm": 0.2023557871580124,
"learning_rate": 5.5406125054816025e-06,
"loss": 0.0,
"step": 9300
},
{
"epoch": 4.7,
"grad_norm": 0.03384646028280258,
"learning_rate": 5.488835919130032e-06,
"loss": 0.0,
"step": 9400
},
{
"epoch": 4.75,
"grad_norm": 0.04355572536587715,
"learning_rate": 5.437059332778461e-06,
"loss": 0.0001,
"step": 9500
},
{
"epoch": 4.8,
"grad_norm": 0.02533441036939621,
"learning_rate": 5.385282746426889e-06,
"loss": 0.0001,
"step": 9600
},
{
"epoch": 4.85,
"grad_norm": 0.10049561411142349,
"learning_rate": 5.333506160075319e-06,
"loss": 0.0,
"step": 9700
},
{
"epoch": 4.9,
"grad_norm": 0.018155105412006378,
"learning_rate": 5.281729573723748e-06,
"loss": 0.0,
"step": 9800
},
{
"epoch": 4.95,
"grad_norm": 0.00853881984949112,
"learning_rate": 5.229952987372177e-06,
"loss": 0.0,
"step": 9900
},
{
"epoch": 5.0,
"grad_norm": 0.17767295241355896,
"learning_rate": 5.178176401020606e-06,
"loss": 0.0,
"step": 10000
},
{
"epoch": 5.0,
"eval_economic_inequality_accuracy": 0.8261964735516373,
"eval_economic_inequality_f1": 0.8121087395907841,
"eval_economic_policy_benefits_accuracy": 0.8653198653198653,
"eval_economic_policy_benefits_f1": 0.8471939977021603,
"eval_ethnic_boundaries_accuracy": 0.9362745098039216,
"eval_ethnic_boundaries_f1": 0.9365673542144131,
"eval_language_policy_accuracy": 0.7352941176470589,
"eval_language_policy_f1": 0.6572786690433748,
"eval_loss": NaN,
"eval_mother_tongue_education_accuracy": 0.8888888888888888,
"eval_mother_tongue_education_f1": 0.837037037037037,
"eval_overall_accuracy": 0.8553418707750738,
"eval_overall_f1": 0.8275137305036369,
"eval_religion_ethnic_identity_accuracy": 0.8800773694390716,
"eval_religion_ethnic_identity_f1": 0.874896585434052,
"eval_runtime": 4.4693,
"eval_samples_per_second": 895.0,
"eval_steps_per_second": 55.937,
"step": 10000
}
],
"logging_steps": 100,
"max_steps": 20000,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"stateful_callbacks": {
"EarlyStoppingCallback": {
"args": {
"early_stopping_patience": 2,
"early_stopping_threshold": 0.0
},
"attributes": {
"early_stopping_patience_counter": 0
}
},
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.105190825984e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": {
"gradient_accumulation_steps": 1,
"learning_rate": 1.0001165419669436e-05,
"num_train_epochs": 10,
"per_device_train_batch_size": 8,
"warmup_steps": 684,
"weight_decay": 0.07522641729243515
}
}