Aliguinga01's picture
Upload folder using huggingface_hub
e422b11 verified
Invalid JSON: Unexpected token 'N', ..."ad_norm": NaN, "... is not valid JSON
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 122,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.00819672131147541,
"grad_norm": NaN,
"learning_rate": 0.0,
"loss": 1.3561,
"step": 1
},
{
"epoch": 0.01639344262295082,
"grad_norm": Infinity,
"learning_rate": 0.0,
"loss": 1.1834,
"step": 2
},
{
"epoch": 0.02459016393442623,
"grad_norm": Infinity,
"learning_rate": 0.0,
"loss": 1.4623,
"step": 3
},
{
"epoch": 0.03278688524590164,
"grad_norm": 15.98621940612793,
"learning_rate": 0.0,
"loss": 1.2439,
"step": 4
},
{
"epoch": 0.040983606557377046,
"grad_norm": Infinity,
"learning_rate": 1e-05,
"loss": 1.1846,
"step": 5
},
{
"epoch": 0.04918032786885246,
"grad_norm": 11.108942031860352,
"learning_rate": 1e-05,
"loss": 1.3067,
"step": 6
},
{
"epoch": 0.05737704918032787,
"grad_norm": 43.746212005615234,
"learning_rate": 2e-05,
"loss": 1.1056,
"step": 7
},
{
"epoch": 0.06557377049180328,
"grad_norm": 23.63981819152832,
"learning_rate": 3e-05,
"loss": 1.0988,
"step": 8
},
{
"epoch": 0.07377049180327869,
"grad_norm": 13.135039329528809,
"learning_rate": 4e-05,
"loss": 0.9535,
"step": 9
},
{
"epoch": 0.08196721311475409,
"grad_norm": 26.240875244140625,
"learning_rate": 5e-05,
"loss": 0.8525,
"step": 10
},
{
"epoch": 0.09016393442622951,
"grad_norm": 29.70737075805664,
"learning_rate": 6e-05,
"loss": 0.7922,
"step": 11
},
{
"epoch": 0.09836065573770492,
"grad_norm": 30.103702545166016,
"learning_rate": 7e-05,
"loss": 0.835,
"step": 12
},
{
"epoch": 0.10655737704918032,
"grad_norm": 8.32728385925293,
"learning_rate": 8e-05,
"loss": 0.71,
"step": 13
},
{
"epoch": 0.11475409836065574,
"grad_norm": 42.42969512939453,
"learning_rate": 9e-05,
"loss": 0.6767,
"step": 14
},
{
"epoch": 0.12295081967213115,
"grad_norm": 48.355735778808594,
"learning_rate": 0.0001,
"loss": 0.9952,
"step": 15
},
{
"epoch": 0.13114754098360656,
"grad_norm": 12.929658889770508,
"learning_rate": 9.998033131915266e-05,
"loss": 0.6263,
"step": 16
},
{
"epoch": 0.13934426229508196,
"grad_norm": 33.13911437988281,
"learning_rate": 9.992134075089084e-05,
"loss": 0.6271,
"step": 17
},
{
"epoch": 0.14754098360655737,
"grad_norm": 18.275238037109375,
"learning_rate": 9.982307470588098e-05,
"loss": 0.6788,
"step": 18
},
{
"epoch": 0.1557377049180328,
"grad_norm": 21.644994735717773,
"learning_rate": 9.968561049466214e-05,
"loss": 0.7019,
"step": 19
},
{
"epoch": 0.16393442622950818,
"grad_norm": 13.200664520263672,
"learning_rate": 9.950905626682228e-05,
"loss": 0.4314,
"step": 20
},
{
"epoch": 0.1721311475409836,
"grad_norm": 30.9163875579834,
"learning_rate": 9.92935509259118e-05,
"loss": 0.6739,
"step": 21
},
{
"epoch": 0.18032786885245902,
"grad_norm": 13.788652420043945,
"learning_rate": 9.903926402016153e-05,
"loss": 0.4574,
"step": 22
},
{
"epoch": 0.1885245901639344,
"grad_norm": 9.37610149383545,
"learning_rate": 9.874639560909117e-05,
"loss": 0.504,
"step": 23
},
{
"epoch": 0.19672131147540983,
"grad_norm": 4.596352577209473,
"learning_rate": 9.841517610611309e-05,
"loss": 0.381,
"step": 24
},
{
"epoch": 0.20491803278688525,
"grad_norm": 34.62461853027344,
"learning_rate": 9.804586609725499e-05,
"loss": 0.5659,
"step": 25
},
{
"epoch": 0.21311475409836064,
"grad_norm": 41.479732513427734,
"learning_rate": 9.763875613614482e-05,
"loss": 0.8486,
"step": 26
},
{
"epoch": 0.22131147540983606,
"grad_norm": 42.0236930847168,
"learning_rate": 9.719416651541839e-05,
"loss": 1.0237,
"step": 27
},
{
"epoch": 0.22950819672131148,
"grad_norm": 13.166512489318848,
"learning_rate": 9.671244701472999e-05,
"loss": 0.4578,
"step": 28
},
{
"epoch": 0.23770491803278687,
"grad_norm": 9.905980110168457,
"learning_rate": 9.619397662556435e-05,
"loss": 0.2581,
"step": 29
},
{
"epoch": 0.2459016393442623,
"grad_norm": 15.204133033752441,
"learning_rate": 9.563916325306594e-05,
"loss": 0.3053,
"step": 30
},
{
"epoch": 0.2540983606557377,
"grad_norm": 35.76464080810547,
"learning_rate": 9.504844339512095e-05,
"loss": 0.6698,
"step": 31
},
{
"epoch": 0.26229508196721313,
"grad_norm": 38.28012466430664,
"learning_rate": 9.442228179894362e-05,
"loss": 0.6832,
"step": 32
},
{
"epoch": 0.27049180327868855,
"grad_norm": 53.34618377685547,
"learning_rate": 9.376117109543769e-05,
"loss": 0.6321,
"step": 33
},
{
"epoch": 0.2786885245901639,
"grad_norm": 37.23402786254883,
"learning_rate": 9.306563141162046e-05,
"loss": 0.7292,
"step": 34
},
{
"epoch": 0.28688524590163933,
"grad_norm": 29.553579330444336,
"learning_rate": 9.233620996141421e-05,
"loss": 0.4497,
"step": 35
},
{
"epoch": 0.29508196721311475,
"grad_norm": 9.313833236694336,
"learning_rate": 9.157348061512727e-05,
"loss": 0.4458,
"step": 36
},
{
"epoch": 0.30327868852459017,
"grad_norm": 9.01462459564209,
"learning_rate": 9.077804344796302e-05,
"loss": 0.3081,
"step": 37
},
{
"epoch": 0.3114754098360656,
"grad_norm": 13.8030424118042,
"learning_rate": 8.995052426791247e-05,
"loss": 0.3955,
"step": 38
},
{
"epoch": 0.319672131147541,
"grad_norm": 30.403034210205078,
"learning_rate": 8.90915741234015e-05,
"loss": 0.7952,
"step": 39
},
{
"epoch": 0.32786885245901637,
"grad_norm": 14.309679985046387,
"learning_rate": 8.820186879108038e-05,
"loss": 0.2402,
"step": 40
},
{
"epoch": 0.3360655737704918,
"grad_norm": 33.29140853881836,
"learning_rate": 8.728210824415827e-05,
"loss": 0.6867,
"step": 41
},
{
"epoch": 0.3442622950819672,
"grad_norm": 17.823509216308594,
"learning_rate": 8.633301610170135e-05,
"loss": 0.3311,
"step": 42
},
{
"epoch": 0.3524590163934426,
"grad_norm": 10.712453842163086,
"learning_rate": 8.535533905932738e-05,
"loss": 0.3251,
"step": 43
},
{
"epoch": 0.36065573770491804,
"grad_norm": 14.378042221069336,
"learning_rate": 8.434984630174509e-05,
"loss": 0.3781,
"step": 44
},
{
"epoch": 0.36885245901639346,
"grad_norm": 11.261249542236328,
"learning_rate": 8.33173288976002e-05,
"loss": 0.2272,
"step": 45
},
{
"epoch": 0.3770491803278688,
"grad_norm": 10.504888534545898,
"learning_rate": 8.225859917710439e-05,
"loss": 0.4378,
"step": 46
},
{
"epoch": 0.38524590163934425,
"grad_norm": 12.617971420288086,
"learning_rate": 8.117449009293668e-05,
"loss": 0.3306,
"step": 47
},
{
"epoch": 0.39344262295081966,
"grad_norm": 16.494400024414062,
"learning_rate": 8.006585456492029e-05,
"loss": 0.4195,
"step": 48
},
{
"epoch": 0.4016393442622951,
"grad_norm": 4.538206100463867,
"learning_rate": 7.89335648089903e-05,
"loss": 0.2288,
"step": 49
},
{
"epoch": 0.4098360655737705,
"grad_norm": 3.4017157554626465,
"learning_rate": 7.777851165098012e-05,
"loss": 0.1776,
"step": 50
},
{
"epoch": 0.4180327868852459,
"grad_norm": 6.894968509674072,
"learning_rate": 7.660160382576683e-05,
"loss": 0.2644,
"step": 51
},
{
"epoch": 0.4262295081967213,
"grad_norm": 7.190054893493652,
"learning_rate": 7.540376726232648e-05,
"loss": 0.2066,
"step": 52
},
{
"epoch": 0.4344262295081967,
"grad_norm": 33.77849578857422,
"learning_rate": 7.4185944355262e-05,
"loss": 0.4325,
"step": 53
},
{
"epoch": 0.4426229508196721,
"grad_norm": 16.263246536254883,
"learning_rate": 7.294909322337689e-05,
"loss": 0.2738,
"step": 54
},
{
"epoch": 0.45081967213114754,
"grad_norm": 17.391746520996094,
"learning_rate": 7.169418695587791e-05,
"loss": 0.3253,
"step": 55
},
{
"epoch": 0.45901639344262296,
"grad_norm": 20.385494232177734,
"learning_rate": 7.042221284679982e-05,
"loss": 0.3355,
"step": 56
},
{
"epoch": 0.4672131147540984,
"grad_norm": 25.4639892578125,
"learning_rate": 6.91341716182545e-05,
"loss": 0.5549,
"step": 57
},
{
"epoch": 0.47540983606557374,
"grad_norm": 18.658313751220703,
"learning_rate": 6.783107663311565e-05,
"loss": 0.2025,
"step": 58
},
{
"epoch": 0.48360655737704916,
"grad_norm": 20.296470642089844,
"learning_rate": 6.651395309775837e-05,
"loss": 0.5972,
"step": 59
},
{
"epoch": 0.4918032786885246,
"grad_norm": 21.115795135498047,
"learning_rate": 6.518383725548074e-05,
"loss": 0.4646,
"step": 60
},
{
"epoch": 0.5,
"grad_norm": 52.6274299621582,
"learning_rate": 6.384177557124247e-05,
"loss": 0.8483,
"step": 61
},
{
"epoch": 0.5081967213114754,
"grad_norm": 36.52357864379883,
"learning_rate": 6.248882390836135e-05,
"loss": 0.9086,
"step": 62
},
{
"epoch": 0.5163934426229508,
"grad_norm": 15.678121566772461,
"learning_rate": 6.112604669781572e-05,
"loss": 0.5953,
"step": 63
},
{
"epoch": 0.5245901639344263,
"grad_norm": 18.18658447265625,
"learning_rate": 5.9754516100806423e-05,
"loss": 0.4511,
"step": 64
},
{
"epoch": 0.5327868852459017,
"grad_norm": 5.188011646270752,
"learning_rate": 5.837531116523682e-05,
"loss": 0.3454,
"step": 65
},
{
"epoch": 0.5409836065573771,
"grad_norm": 17.617431640625,
"learning_rate": 5.698951697677498e-05,
"loss": 0.6944,
"step": 66
},
{
"epoch": 0.5491803278688525,
"grad_norm": 18.674060821533203,
"learning_rate": 5.559822380516539e-05,
"loss": 0.6018,
"step": 67
},
{
"epoch": 0.5573770491803278,
"grad_norm": 9.069574356079102,
"learning_rate": 5.420252624646238e-05,
"loss": 0.3661,
"step": 68
},
{
"epoch": 0.5655737704918032,
"grad_norm": 32.6479606628418,
"learning_rate": 5.2803522361859594e-05,
"loss": 0.4797,
"step": 69
},
{
"epoch": 0.5737704918032787,
"grad_norm": 5.975889205932617,
"learning_rate": 5.140231281379345e-05,
"loss": 0.1784,
"step": 70
},
{
"epoch": 0.5819672131147541,
"grad_norm": 5.846829414367676,
"learning_rate": 5e-05,
"loss": 0.2431,
"step": 71
},
{
"epoch": 0.5901639344262295,
"grad_norm": 11.340662956237793,
"learning_rate": 4.859768718620656e-05,
"loss": 0.3237,
"step": 72
},
{
"epoch": 0.5983606557377049,
"grad_norm": 9.923660278320312,
"learning_rate": 4.7196477638140404e-05,
"loss": 0.3903,
"step": 73
},
{
"epoch": 0.6065573770491803,
"grad_norm": 3.1883232593536377,
"learning_rate": 4.579747375353763e-05,
"loss": 0.248,
"step": 74
},
{
"epoch": 0.6147540983606558,
"grad_norm": 8.905538558959961,
"learning_rate": 4.4401776194834613e-05,
"loss": 0.1613,
"step": 75
},
{
"epoch": 0.6229508196721312,
"grad_norm": 19.726444244384766,
"learning_rate": 4.3010483023225045e-05,
"loss": 0.2736,
"step": 76
},
{
"epoch": 0.6311475409836066,
"grad_norm": 7.428593158721924,
"learning_rate": 4.162468883476319e-05,
"loss": 0.1273,
"step": 77
},
{
"epoch": 0.639344262295082,
"grad_norm": 9.217852592468262,
"learning_rate": 4.0245483899193595e-05,
"loss": 0.1887,
"step": 78
},
{
"epoch": 0.6475409836065574,
"grad_norm": 15.629423141479492,
"learning_rate": 3.887395330218429e-05,
"loss": 0.3924,
"step": 79
},
{
"epoch": 0.6557377049180327,
"grad_norm": 9.791216850280762,
"learning_rate": 3.7511176091638653e-05,
"loss": 0.3166,
"step": 80
},
{
"epoch": 0.6639344262295082,
"grad_norm": 8.492615699768066,
"learning_rate": 3.6158224428757535e-05,
"loss": 0.2088,
"step": 81
},
{
"epoch": 0.6721311475409836,
"grad_norm": 18.473737716674805,
"learning_rate": 3.4816162744519263e-05,
"loss": 0.4732,
"step": 82
},
{
"epoch": 0.680327868852459,
"grad_norm": 9.715383529663086,
"learning_rate": 3.3486046902241664e-05,
"loss": 0.5531,
"step": 83
},
{
"epoch": 0.6885245901639344,
"grad_norm": 13.272611618041992,
"learning_rate": 3.216892336688435e-05,
"loss": 0.1446,
"step": 84
},
{
"epoch": 0.6967213114754098,
"grad_norm": 6.765244960784912,
"learning_rate": 3.086582838174551e-05,
"loss": 0.0961,
"step": 85
},
{
"epoch": 0.7049180327868853,
"grad_norm": 13.067438125610352,
"learning_rate": 2.9577787153200197e-05,
"loss": 0.1131,
"step": 86
},
{
"epoch": 0.7131147540983607,
"grad_norm": 7.727385997772217,
"learning_rate": 2.8305813044122097e-05,
"loss": 0.4602,
"step": 87
},
{
"epoch": 0.7213114754098361,
"grad_norm": 10.270739555358887,
"learning_rate": 2.705090677662311e-05,
"loss": 0.4261,
"step": 88
},
{
"epoch": 0.7295081967213115,
"grad_norm": 3.8513522148132324,
"learning_rate": 2.581405564473801e-05,
"loss": 0.356,
"step": 89
},
{
"epoch": 0.7377049180327869,
"grad_norm": 17.689189910888672,
"learning_rate": 2.459623273767354e-05,
"loss": 0.5393,
"step": 90
},
{
"epoch": 0.7459016393442623,
"grad_norm": 14.543610572814941,
"learning_rate": 2.3398396174233178e-05,
"loss": 0.37,
"step": 91
},
{
"epoch": 0.7540983606557377,
"grad_norm": Infinity,
"learning_rate": 2.2221488349019903e-05,
"loss": 0.8326,
"step": 92
},
{
"epoch": 0.7622950819672131,
"grad_norm": 19.77420997619629,
"learning_rate": 2.2221488349019903e-05,
"loss": 0.3247,
"step": 93
},
{
"epoch": 0.7704918032786885,
"grad_norm": 19.445480346679688,
"learning_rate": 2.1066435191009715e-05,
"loss": 0.4273,
"step": 94
},
{
"epoch": 0.7786885245901639,
"grad_norm": 49.049041748046875,
"learning_rate": 1.9934145435079702e-05,
"loss": 0.4686,
"step": 95
},
{
"epoch": 0.7868852459016393,
"grad_norm": 23.511159896850586,
"learning_rate": 1.8825509907063327e-05,
"loss": 0.3971,
"step": 96
},
{
"epoch": 0.7950819672131147,
"grad_norm": 10.313678741455078,
"learning_rate": 1.774140082289563e-05,
"loss": 0.399,
"step": 97
},
{
"epoch": 0.8032786885245902,
"grad_norm": 21.878589630126953,
"learning_rate": 1.6682671102399805e-05,
"loss": 0.3466,
"step": 98
},
{
"epoch": 0.8114754098360656,
"grad_norm": 10.881017684936523,
"learning_rate": 1.5650153698254916e-05,
"loss": 0.2696,
"step": 99
},
{
"epoch": 0.819672131147541,
"grad_norm": 5.622803211212158,
"learning_rate": 1.4644660940672627e-05,
"loss": 0.719,
"step": 100
},
{
"epoch": 0.8278688524590164,
"grad_norm": 20.312349319458008,
"learning_rate": 1.3666983898298657e-05,
"loss": 0.3302,
"step": 101
},
{
"epoch": 0.8360655737704918,
"grad_norm": 1.5690298080444336,
"learning_rate": 1.2717891755841722e-05,
"loss": 0.1621,
"step": 102
},
{
"epoch": 0.8442622950819673,
"grad_norm": 14.779884338378906,
"learning_rate": 1.1798131208919627e-05,
"loss": 0.4733,
"step": 103
},
{
"epoch": 0.8524590163934426,
"grad_norm": 18.317291259765625,
"learning_rate": 1.090842587659851e-05,
"loss": 0.327,
"step": 104
},
{
"epoch": 0.860655737704918,
"grad_norm": 5.87141227722168,
"learning_rate": 1.004947573208756e-05,
"loss": 0.4066,
"step": 105
},
{
"epoch": 0.8688524590163934,
"grad_norm": 8.89691162109375,
"learning_rate": 9.221956552036992e-06,
"loss": 0.1725,
"step": 106
},
{
"epoch": 0.8770491803278688,
"grad_norm": 22.22539520263672,
"learning_rate": 8.426519384872733e-06,
"loss": 0.4812,
"step": 107
},
{
"epoch": 0.8852459016393442,
"grad_norm": 16.512264251708984,
"learning_rate": 7.663790038585793e-06,
"loss": 0.2801,
"step": 108
},
{
"epoch": 0.8934426229508197,
"grad_norm": 11.542386054992676,
"learning_rate": 6.934368588379553e-06,
"loss": 0.3944,
"step": 109
},
{
"epoch": 0.9016393442622951,
"grad_norm": 2.5426712036132812,
"learning_rate": 6.238828904562316e-06,
"loss": 0.2072,
"step": 110
},
{
"epoch": 0.9098360655737705,
"grad_norm": 6.43900203704834,
"learning_rate": 5.577718201056392e-06,
"loss": 0.2337,
"step": 111
},
{
"epoch": 0.9180327868852459,
"grad_norm": 2.6949198246002197,
"learning_rate": 4.951556604879048e-06,
"loss": 0.2164,
"step": 112
},
{
"epoch": 0.9262295081967213,
"grad_norm": 12.75400161743164,
"learning_rate": 4.360836746934055e-06,
"loss": 0.2642,
"step": 113
},
{
"epoch": 0.9344262295081968,
"grad_norm": 7.314509868621826,
"learning_rate": 3.8060233744356633e-06,
"loss": 0.2681,
"step": 114
},
{
"epoch": 0.9426229508196722,
"grad_norm": 7.261117458343506,
"learning_rate": 3.2875529852700147e-06,
"loss": 0.1757,
"step": 115
},
{
"epoch": 0.9508196721311475,
"grad_norm": 10.839629173278809,
"learning_rate": 2.8058334845816213e-06,
"loss": 0.1706,
"step": 116
},
{
"epoch": 0.9590163934426229,
"grad_norm": 6.028248310089111,
"learning_rate": 2.361243863855184e-06,
"loss": 0.351,
"step": 117
},
{
"epoch": 0.9672131147540983,
"grad_norm": 5.811429023742676,
"learning_rate": 1.9541339027450256e-06,
"loss": 0.2582,
"step": 118
},
{
"epoch": 0.9754098360655737,
"grad_norm": 11.192886352539062,
"learning_rate": 1.584823893886933e-06,
"loss": 0.392,
"step": 119
},
{
"epoch": 0.9836065573770492,
"grad_norm": 11.7086820602417,
"learning_rate": 1.2536043909088191e-06,
"loss": 0.1863,
"step": 120
},
{
"epoch": 0.9918032786885246,
"grad_norm": 25.053173065185547,
"learning_rate": 9.607359798384785e-07,
"loss": 0.4225,
"step": 121
},
{
"epoch": 1.0,
"grad_norm": 9.306795120239258,
"learning_rate": 7.064490740882057e-07,
"loss": 0.1589,
"step": 122
}
],
"logging_steps": 1,
"max_steps": 122,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6324556423495680.0,
"train_batch_size": 32,
"trial_name": null,
"trial_params": null
}