Extractor_Adaptor_final_loser_2 / trainer_state.json
abdo-Mansour's picture
Upload folder using huggingface_hub
6612fca verified
{
"best_global_step": 300,
"best_metric": 0.09494102001190186,
"best_model_checkpoint": "/workspace/Paper/Llama-Factory-out-FINAL/checkpoint-300",
"epoch": 2.0,
"eval_steps": 50,
"global_step": 938,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.021333333333333333,
"grad_norm": 3.370833158493042,
"learning_rate": 9.574468085106384e-07,
"loss": 0.034,
"step": 10
},
{
"epoch": 0.042666666666666665,
"grad_norm": 2.170051097869873,
"learning_rate": 2.021276595744681e-06,
"loss": 0.0408,
"step": 20
},
{
"epoch": 0.064,
"grad_norm": 1.6947985887527466,
"learning_rate": 3.0851063829787237e-06,
"loss": 0.0315,
"step": 30
},
{
"epoch": 0.08533333333333333,
"grad_norm": 0.45860642194747925,
"learning_rate": 4.148936170212766e-06,
"loss": 0.0357,
"step": 40
},
{
"epoch": 0.10666666666666667,
"grad_norm": 2.4352638721466064,
"learning_rate": 5.212765957446809e-06,
"loss": 0.035,
"step": 50
},
{
"epoch": 0.10666666666666667,
"eval_loss": 0.10324329882860184,
"eval_runtime": 95.3623,
"eval_samples_per_second": 10.486,
"eval_steps_per_second": 1.049,
"step": 50
},
{
"epoch": 0.128,
"grad_norm": 0.7604093551635742,
"learning_rate": 6.276595744680851e-06,
"loss": 0.0268,
"step": 60
},
{
"epoch": 0.14933333333333335,
"grad_norm": 4.535794734954834,
"learning_rate": 7.340425531914894e-06,
"loss": 0.0411,
"step": 70
},
{
"epoch": 0.17066666666666666,
"grad_norm": 0.10152211785316467,
"learning_rate": 8.404255319148937e-06,
"loss": 0.0228,
"step": 80
},
{
"epoch": 0.192,
"grad_norm": 0.8933190703392029,
"learning_rate": 9.46808510638298e-06,
"loss": 0.0287,
"step": 90
},
{
"epoch": 0.21333333333333335,
"grad_norm": 1.6419641971588135,
"learning_rate": 9.999134070902206e-06,
"loss": 0.0209,
"step": 100
},
{
"epoch": 0.21333333333333335,
"eval_loss": 0.14628423750400543,
"eval_runtime": 94.6984,
"eval_samples_per_second": 10.56,
"eval_steps_per_second": 1.056,
"step": 100
},
{
"epoch": 0.23466666666666666,
"grad_norm": 8.57272720336914,
"learning_rate": 9.992208437615651e-06,
"loss": 0.031,
"step": 110
},
{
"epoch": 0.256,
"grad_norm": 2.0105576515197754,
"learning_rate": 9.97836676558346e-06,
"loss": 0.0218,
"step": 120
},
{
"epoch": 0.2773333333333333,
"grad_norm": 15.240336418151855,
"learning_rate": 9.957628230595527e-06,
"loss": 0.028,
"step": 130
},
{
"epoch": 0.2986666666666667,
"grad_norm": 3.8736119270324707,
"learning_rate": 9.930021563125204e-06,
"loss": 0.0254,
"step": 140
},
{
"epoch": 0.32,
"grad_norm": 1.5256679058074951,
"learning_rate": 9.895585008527075e-06,
"loss": 0.0392,
"step": 150
},
{
"epoch": 0.32,
"eval_loss": 0.11115244776010513,
"eval_runtime": 94.3352,
"eval_samples_per_second": 10.6,
"eval_steps_per_second": 1.06,
"step": 150
},
{
"epoch": 0.3413333333333333,
"grad_norm": 1.2751268148422241,
"learning_rate": 9.854366274053125e-06,
"loss": 0.0263,
"step": 160
},
{
"epoch": 0.3626666666666667,
"grad_norm": 0.8286654353141785,
"learning_rate": 9.806422462760687e-06,
"loss": 0.0349,
"step": 170
},
{
"epoch": 0.384,
"grad_norm": 0.5155846476554871,
"learning_rate": 9.751819994403802e-06,
"loss": 0.0281,
"step": 180
},
{
"epoch": 0.4053333333333333,
"grad_norm": 4.299561500549316,
"learning_rate": 9.690634513417487e-06,
"loss": 0.0263,
"step": 190
},
{
"epoch": 0.4266666666666667,
"grad_norm": 3.9921865463256836,
"learning_rate": 9.622950784122473e-06,
"loss": 0.0424,
"step": 200
},
{
"epoch": 0.4266666666666667,
"eval_loss": 0.11345715820789337,
"eval_runtime": 94.7429,
"eval_samples_per_second": 10.555,
"eval_steps_per_second": 1.055,
"step": 200
},
{
"epoch": 0.448,
"grad_norm": 3.4969747066497803,
"learning_rate": 9.548862573295552e-06,
"loss": 0.0405,
"step": 210
},
{
"epoch": 0.4693333333333333,
"grad_norm": 3.1925570964813232,
"learning_rate": 9.468472520268207e-06,
"loss": 0.0332,
"step": 220
},
{
"epoch": 0.49066666666666664,
"grad_norm": 1.0224530696868896,
"learning_rate": 9.38189199473352e-06,
"loss": 0.0355,
"step": 230
},
{
"epoch": 0.512,
"grad_norm": 0.4921875596046448,
"learning_rate": 9.289240942458322e-06,
"loss": 0.0345,
"step": 240
},
{
"epoch": 0.5333333333333333,
"grad_norm": 2.0238680839538574,
"learning_rate": 9.190647719114328e-06,
"loss": 0.031,
"step": 250
},
{
"epoch": 0.5333333333333333,
"eval_loss": 0.10441213101148605,
"eval_runtime": 95.2767,
"eval_samples_per_second": 10.496,
"eval_steps_per_second": 1.05,
"step": 250
},
{
"epoch": 0.5546666666666666,
"grad_norm": 1.4228683710098267,
"learning_rate": 9.086248912458484e-06,
"loss": 0.0301,
"step": 260
},
{
"epoch": 0.576,
"grad_norm": 7.124210357666016,
"learning_rate": 8.976189153108853e-06,
"loss": 0.0319,
"step": 270
},
{
"epoch": 0.5973333333333334,
"grad_norm": 3.1352102756500244,
"learning_rate": 8.860620914178188e-06,
"loss": 0.0309,
"step": 280
},
{
"epoch": 0.6186666666666667,
"grad_norm": 3.6215434074401855,
"learning_rate": 8.73970430004278e-06,
"loss": 0.0376,
"step": 290
},
{
"epoch": 0.64,
"grad_norm": 2.639646530151367,
"learning_rate": 8.613606824539198e-06,
"loss": 0.0338,
"step": 300
},
{
"epoch": 0.64,
"eval_loss": 0.09494102001190186,
"eval_runtime": 93.7305,
"eval_samples_per_second": 10.669,
"eval_steps_per_second": 1.067,
"step": 300
},
{
"epoch": 0.6613333333333333,
"grad_norm": 1.0701638460159302,
"learning_rate": 8.482503178896227e-06,
"loss": 0.0381,
"step": 310
},
{
"epoch": 0.6826666666666666,
"grad_norm": 1.0814051628112793,
"learning_rate": 8.34657498972347e-06,
"loss": 0.0301,
"step": 320
},
{
"epoch": 0.704,
"grad_norm": 0.67435222864151,
"learning_rate": 8.206010567391918e-06,
"loss": 0.0285,
"step": 330
},
{
"epoch": 0.7253333333333334,
"grad_norm": 1.270016074180603,
"learning_rate": 8.061004645155049e-06,
"loss": 0.0323,
"step": 340
},
{
"epoch": 0.7466666666666667,
"grad_norm": 1.92872953414917,
"learning_rate": 7.91175810937189e-06,
"loss": 0.0478,
"step": 350
},
{
"epoch": 0.7466666666666667,
"eval_loss": 0.11240354925394058,
"eval_runtime": 94.9322,
"eval_samples_per_second": 10.534,
"eval_steps_per_second": 1.053,
"step": 350
},
{
"epoch": 0.768,
"grad_norm": 0.8001224398612976,
"learning_rate": 7.758477721205767e-06,
"loss": 0.0328,
"step": 360
},
{
"epoch": 0.7893333333333333,
"grad_norm": 0.2665117084980011,
"learning_rate": 7.601375830184297e-06,
"loss": 0.0198,
"step": 370
},
{
"epoch": 0.8106666666666666,
"grad_norm": 0.7271229028701782,
"learning_rate": 7.4406700800174545e-06,
"loss": 0.0283,
"step": 380
},
{
"epoch": 0.832,
"grad_norm": 9.665359497070312,
"learning_rate": 7.276583107081243e-06,
"loss": 0.0354,
"step": 390
},
{
"epoch": 0.8533333333333334,
"grad_norm": 23.276309967041016,
"learning_rate": 7.109342231984698e-06,
"loss": 0.0306,
"step": 400
},
{
"epoch": 0.8533333333333334,
"eval_loss": 0.11211390793323517,
"eval_runtime": 95.1213,
"eval_samples_per_second": 10.513,
"eval_steps_per_second": 1.051,
"step": 400
},
{
"epoch": 0.8746666666666667,
"grad_norm": 0.7906249761581421,
"learning_rate": 6.939179144647516e-06,
"loss": 0.0298,
"step": 410
},
{
"epoch": 0.896,
"grad_norm": 0.3667755722999573,
"learning_rate": 6.766329583324582e-06,
"loss": 0.0286,
"step": 420
},
{
"epoch": 0.9173333333333333,
"grad_norm": 1.2041046619415283,
"learning_rate": 6.591033008022067e-06,
"loss": 0.0152,
"step": 430
},
{
"epoch": 0.9386666666666666,
"grad_norm": 2.856255531311035,
"learning_rate": 6.413532268757537e-06,
"loss": 0.0303,
"step": 440
},
{
"epoch": 0.96,
"grad_norm": 1.6765844821929932,
"learning_rate": 6.234073269123654e-06,
"loss": 0.0352,
"step": 450
},
{
"epoch": 0.96,
"eval_loss": 0.12221185117959976,
"eval_runtime": 94.6193,
"eval_samples_per_second": 10.569,
"eval_steps_per_second": 1.057,
"step": 450
},
{
"epoch": 0.9813333333333333,
"grad_norm": 1.552643060684204,
"learning_rate": 6.052904625621556e-06,
"loss": 0.0265,
"step": 460
},
{
"epoch": 1.0021333333333333,
"grad_norm": 1.4141130447387695,
"learning_rate": 5.870277323235871e-06,
"loss": 0.0333,
"step": 470
},
{
"epoch": 1.0234666666666667,
"grad_norm": 0.3539254665374756,
"learning_rate": 5.686444367728494e-06,
"loss": 0.0259,
"step": 480
},
{
"epoch": 1.0448,
"grad_norm": 0.9785445332527161,
"learning_rate": 5.501660435132871e-06,
"loss": 0.0318,
"step": 490
},
{
"epoch": 1.0661333333333334,
"grad_norm": 0.2237013727426529,
"learning_rate": 5.316181518934319e-06,
"loss": 0.0193,
"step": 500
},
{
"epoch": 1.0661333333333334,
"eval_loss": 0.14582864940166473,
"eval_runtime": 93.6363,
"eval_samples_per_second": 10.68,
"eval_steps_per_second": 1.068,
"step": 500
},
{
"epoch": 1.0874666666666666,
"grad_norm": 1.417123556137085,
"learning_rate": 5.130264575425225e-06,
"loss": 0.0224,
"step": 510
},
{
"epoch": 1.1088,
"grad_norm": 0.6007033586502075,
"learning_rate": 4.944167167726367e-06,
"loss": 0.0325,
"step": 520
},
{
"epoch": 1.1301333333333332,
"grad_norm": 0.901002049446106,
"learning_rate": 4.758147108967585e-06,
"loss": 0.0333,
"step": 530
},
{
"epoch": 1.1514666666666666,
"grad_norm": 0.7341068387031555,
"learning_rate": 4.572462105122078e-06,
"loss": 0.0251,
"step": 540
},
{
"epoch": 1.1728,
"grad_norm": 0.6192936897277832,
"learning_rate": 4.3873693979891705e-06,
"loss": 0.0332,
"step": 550
},
{
"epoch": 1.1728,
"eval_loss": 0.15217936038970947,
"eval_runtime": 93.0881,
"eval_samples_per_second": 10.743,
"eval_steps_per_second": 1.074,
"step": 550
},
{
"epoch": 1.1941333333333333,
"grad_norm": 1.448577880859375,
"learning_rate": 4.203125408820106e-06,
"loss": 0.0334,
"step": 560
},
{
"epoch": 1.2154666666666667,
"grad_norm": 0.5954573154449463,
"learning_rate": 4.019985383080632e-06,
"loss": 0.0213,
"step": 570
},
{
"epoch": 1.2368000000000001,
"grad_norm": 2.0211377143859863,
"learning_rate": 3.838203036842446e-06,
"loss": 0.0252,
"step": 580
},
{
"epoch": 1.2581333333333333,
"grad_norm": 6.006139278411865,
"learning_rate": 3.65803020529343e-06,
"loss": 0.0237,
"step": 590
},
{
"epoch": 1.2794666666666665,
"grad_norm": 1.2327594757080078,
"learning_rate": 3.4797164938536113e-06,
"loss": 0.0264,
"step": 600
},
{
"epoch": 1.2794666666666665,
"eval_loss": 0.125369593501091,
"eval_runtime": 93.3127,
"eval_samples_per_second": 10.717,
"eval_steps_per_second": 1.072,
"step": 600
},
{
"epoch": 1.3008,
"grad_norm": 0.6135037541389465,
"learning_rate": 3.3035089323801323e-06,
"loss": 0.0254,
"step": 610
},
{
"epoch": 1.3221333333333334,
"grad_norm": 0.7361701726913452,
"learning_rate": 3.1296516329403626e-06,
"loss": 0.0222,
"step": 620
},
{
"epoch": 1.3434666666666666,
"grad_norm": 6.391972064971924,
"learning_rate": 2.9583854516271815e-06,
"loss": 0.0206,
"step": 630
},
{
"epoch": 1.3648,
"grad_norm": 0.5487813949584961,
"learning_rate": 2.7899476548850046e-06,
"loss": 0.02,
"step": 640
},
{
"epoch": 1.3861333333333334,
"grad_norm": 0.12174206227064133,
"learning_rate": 2.624571590808781e-06,
"loss": 0.0246,
"step": 650
},
{
"epoch": 1.3861333333333334,
"eval_loss": 0.1312500536441803,
"eval_runtime": 94.5226,
"eval_samples_per_second": 10.579,
"eval_steps_per_second": 1.058,
"step": 650
},
{
"epoch": 1.4074666666666666,
"grad_norm": 4.603039264678955,
"learning_rate": 2.4624863658713383e-06,
"loss": 0.0271,
"step": 660
},
{
"epoch": 1.4288,
"grad_norm": 0.7074569463729858,
"learning_rate": 2.3039165275269214e-06,
"loss": 0.0234,
"step": 670
},
{
"epoch": 1.4501333333333333,
"grad_norm": 1.057719111442566,
"learning_rate": 2.1490817531306778e-06,
"loss": 0.016,
"step": 680
},
{
"epoch": 1.4714666666666667,
"grad_norm": 0.960046648979187,
"learning_rate": 1.99819654560496e-06,
"loss": 0.0303,
"step": 690
},
{
"epoch": 1.4928,
"grad_norm": 2.026841878890991,
"learning_rate": 1.851469936274174e-06,
"loss": 0.0201,
"step": 700
},
{
"epoch": 1.4928,
"eval_loss": 0.14220145344734192,
"eval_runtime": 92.0165,
"eval_samples_per_second": 10.868,
"eval_steps_per_second": 1.087,
"step": 700
},
{
"epoch": 1.5141333333333333,
"grad_norm": 0.7687983512878418,
"learning_rate": 1.7091051952797404e-06,
"loss": 0.0185,
"step": 710
},
{
"epoch": 1.5354666666666668,
"grad_norm": 0.5626060962677002,
"learning_rate": 1.571299549976456e-06,
"loss": 0.0251,
"step": 720
},
{
"epoch": 1.5568,
"grad_norm": 0.8809129595756531,
"learning_rate": 1.4382439117002938e-06,
"loss": 0.0257,
"step": 730
},
{
"epoch": 1.5781333333333334,
"grad_norm": 0.23133313655853271,
"learning_rate": 1.3101226112862232e-06,
"loss": 0.0183,
"step": 740
},
{
"epoch": 1.5994666666666668,
"grad_norm": 0.40013575553894043,
"learning_rate": 1.1871131437024291e-06,
"loss": 0.0151,
"step": 750
},
{
"epoch": 1.5994666666666668,
"eval_loss": 0.14588288962841034,
"eval_runtime": 92.6003,
"eval_samples_per_second": 10.799,
"eval_steps_per_second": 1.08,
"step": 750
},
{
"epoch": 1.6208,
"grad_norm": 4.253695487976074,
"learning_rate": 1.0693859221547115e-06,
"loss": 0.0228,
"step": 760
},
{
"epoch": 1.6421333333333332,
"grad_norm": 0.33811044692993164,
"learning_rate": 9.571040420017324e-07,
"loss": 0.0171,
"step": 770
},
{
"epoch": 1.6634666666666666,
"grad_norm": 1.7618757486343384,
"learning_rate": 8.504230548081499e-07,
"loss": 0.0289,
"step": 780
},
{
"epoch": 1.6848,
"grad_norm": 1.377174973487854,
"learning_rate": 7.4949075284868e-07,
"loss": 0.0274,
"step": 790
},
{
"epoch": 1.7061333333333333,
"grad_norm": 0.5281645655632019,
"learning_rate": 6.544469643616192e-07,
"loss": 0.0206,
"step": 800
},
{
"epoch": 1.7061333333333333,
"eval_loss": 0.14863522350788116,
"eval_runtime": 92.7258,
"eval_samples_per_second": 10.784,
"eval_steps_per_second": 1.078,
"step": 800
},
{
"epoch": 1.7274666666666667,
"grad_norm": 0.5536366701126099,
"learning_rate": 5.654233598354752e-07,
"loss": 0.0235,
"step": 810
},
{
"epoch": 1.7488000000000001,
"grad_norm": 0.9278081059455872,
"learning_rate": 4.825432695970673e-07,
"loss": 0.0378,
"step": 820
},
{
"epoch": 1.7701333333333333,
"grad_norm": 1.1352832317352295,
"learning_rate": 4.0592151295382464e-07,
"loss": 0.0225,
"step": 830
},
{
"epoch": 1.7914666666666665,
"grad_norm": 0.7254970669746399,
"learning_rate": 3.356642391269405e-07,
"loss": 0.0186,
"step": 840
},
{
"epoch": 1.8128,
"grad_norm": 0.21338777244091034,
"learning_rate": 2.7186878019580197e-07,
"loss": 0.0166,
"step": 850
},
{
"epoch": 1.8128,
"eval_loss": 0.14758259057998657,
"eval_runtime": 91.7262,
"eval_samples_per_second": 10.902,
"eval_steps_per_second": 1.09,
"step": 850
},
{
"epoch": 1.8341333333333334,
"grad_norm": 1.0462194681167603,
"learning_rate": 2.1462351625736677e-07,
"loss": 0.0133,
"step": 860
},
{
"epoch": 1.8554666666666666,
"grad_norm": 1.0201510190963745,
"learning_rate": 1.6400775298734017e-07,
"loss": 0.0315,
"step": 870
},
{
"epoch": 1.8768,
"grad_norm": 1.2303000688552856,
"learning_rate": 1.2009161177273742e-07,
"loss": 0.022,
"step": 880
},
{
"epoch": 1.8981333333333335,
"grad_norm": 0.4334850013256073,
"learning_rate": 8.293593256805842e-08,
"loss": 0.0233,
"step": 890
},
{
"epoch": 1.9194666666666667,
"grad_norm": 0.186690554022789,
"learning_rate": 5.259218960964874e-08,
"loss": 0.0189,
"step": 900
},
{
"epoch": 1.9194666666666667,
"eval_loss": 0.1466066986322403,
"eval_runtime": 93.9322,
"eval_samples_per_second": 10.646,
"eval_steps_per_second": 1.065,
"step": 900
},
{
"epoch": 1.9407999999999999,
"grad_norm": 0.8606347441673279,
"learning_rate": 2.9102420105009966e-08,
"loss": 0.0222,
"step": 910
},
{
"epoch": 1.9621333333333333,
"grad_norm": 0.2717781662940979,
"learning_rate": 1.249916599585954e-08,
"loss": 0.0229,
"step": 920
},
{
"epoch": 1.9834666666666667,
"grad_norm": 1.6033730506896973,
"learning_rate": 2.80542887561297e-09,
"loss": 0.0173,
"step": 930
}
],
"logging_steps": 10,
"max_steps": 938,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.267684534551839e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}