gpt2-xl-lora-ecthr / trainer_state.json
MHGanainy's picture
MHGanainy/gpt2-xl-lora-ecthr
ee90871 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 10745,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.009306654257794323,
"grad_norm": 0.21737903356552124,
"learning_rate": 2.0000000000000003e-06,
"loss": 2.4302,
"step": 100
},
{
"epoch": 0.018613308515588647,
"grad_norm": 0.19527916610240936,
"learning_rate": 4.000000000000001e-06,
"loss": 2.4011,
"step": 200
},
{
"epoch": 0.02791996277338297,
"grad_norm": 0.23809583485126495,
"learning_rate": 6e-06,
"loss": 2.3287,
"step": 300
},
{
"epoch": 0.03722661703117729,
"grad_norm": 0.3224714696407318,
"learning_rate": 8.000000000000001e-06,
"loss": 2.25,
"step": 400
},
{
"epoch": 0.04653327128897161,
"grad_norm": 0.3738478720188141,
"learning_rate": 1e-05,
"loss": 2.2078,
"step": 500
},
{
"epoch": 0.05583992554676594,
"grad_norm": 0.3933287560939789,
"learning_rate": 1.2e-05,
"loss": 2.1489,
"step": 600
},
{
"epoch": 0.06514657980456026,
"grad_norm": 0.4187352657318115,
"learning_rate": 1.4e-05,
"loss": 2.1003,
"step": 700
},
{
"epoch": 0.07445323406235459,
"grad_norm": 0.5026760101318359,
"learning_rate": 1.6000000000000003e-05,
"loss": 2.0642,
"step": 800
},
{
"epoch": 0.08375988832014891,
"grad_norm": 0.44242504239082336,
"learning_rate": 1.8e-05,
"loss": 2.0519,
"step": 900
},
{
"epoch": 0.09306654257794322,
"grad_norm": 0.44066929817199707,
"learning_rate": 2e-05,
"loss": 2.0234,
"step": 1000
},
{
"epoch": 0.10237319683573755,
"grad_norm": 0.47815296053886414,
"learning_rate": 1.999480400829132e-05,
"loss": 1.9904,
"step": 1100
},
{
"epoch": 0.11167985109353187,
"grad_norm": 0.7336047291755676,
"learning_rate": 1.997922143283124e-05,
"loss": 1.9952,
"step": 1200
},
{
"epoch": 0.1209865053513262,
"grad_norm": 0.5518016219139099,
"learning_rate": 1.995326846700634e-05,
"loss": 1.993,
"step": 1300
},
{
"epoch": 0.13029315960912052,
"grad_norm": 0.5234813094139099,
"learning_rate": 1.9916972081095672e-05,
"loss": 1.9719,
"step": 1400
},
{
"epoch": 0.13959981386691483,
"grad_norm": 0.5788673758506775,
"learning_rate": 1.9870369994243283e-05,
"loss": 1.9328,
"step": 1500
},
{
"epoch": 0.14890646812470917,
"grad_norm": 0.4599522650241852,
"learning_rate": 1.981351063526055e-05,
"loss": 1.9526,
"step": 1600
},
{
"epoch": 0.15821312238250348,
"grad_norm": 0.5187841057777405,
"learning_rate": 1.9746453092299042e-05,
"loss": 1.9583,
"step": 1700
},
{
"epoch": 0.16751977664029782,
"grad_norm": 0.501110851764679,
"learning_rate": 1.9669267051446208e-05,
"loss": 1.9363,
"step": 1800
},
{
"epoch": 0.17682643089809214,
"grad_norm": 0.6504944562911987,
"learning_rate": 1.95820327243077e-05,
"loss": 1.9434,
"step": 1900
},
{
"epoch": 0.18613308515588645,
"grad_norm": 0.7284849286079407,
"learning_rate": 1.9484840764651624e-05,
"loss": 1.8943,
"step": 2000
},
{
"epoch": 0.19543973941368079,
"grad_norm": 0.4998358190059662,
"learning_rate": 1.9377792174201295e-05,
"loss": 1.9032,
"step": 2100
},
{
"epoch": 0.2047463936714751,
"grad_norm": 0.6522166728973389,
"learning_rate": 1.9260998197674385e-05,
"loss": 1.899,
"step": 2200
},
{
"epoch": 0.21405304792926944,
"grad_norm": 0.48694345355033875,
"learning_rate": 1.9134580207177625e-05,
"loss": 1.8996,
"step": 2300
},
{
"epoch": 0.22335970218706375,
"grad_norm": 0.5545830130577087,
"learning_rate": 1.899866957607711e-05,
"loss": 1.9032,
"step": 2400
},
{
"epoch": 0.23266635644485809,
"grad_norm": 0.4725993871688843,
"learning_rate": 1.88534075424753e-05,
"loss": 1.908,
"step": 2500
},
{
"epoch": 0.2419730107026524,
"grad_norm": 0.5502188801765442,
"learning_rate": 1.8698945062436633e-05,
"loss": 1.9072,
"step": 2600
},
{
"epoch": 0.25127966496044674,
"grad_norm": 0.4873671233654022,
"learning_rate": 1.8535442653114228e-05,
"loss": 1.9038,
"step": 2700
},
{
"epoch": 0.26058631921824105,
"grad_norm": 0.5511804819107056,
"learning_rate": 1.8363070225940714e-05,
"loss": 1.8881,
"step": 2800
},
{
"epoch": 0.26989297347603536,
"grad_norm": 0.5408052802085876,
"learning_rate": 1.8182006910056582e-05,
"loss": 1.8774,
"step": 2900
},
{
"epoch": 0.27919962773382967,
"grad_norm": 0.7886127829551697,
"learning_rate": 1.7992440866159443e-05,
"loss": 1.867,
"step": 3000
},
{
"epoch": 0.28850628199162404,
"grad_norm": 0.7290304899215698,
"learning_rate": 1.7794569090967763e-05,
"loss": 1.8672,
"step": 3100
},
{
"epoch": 0.29781293624941835,
"grad_norm": 0.41706952452659607,
"learning_rate": 1.7588597212502204e-05,
"loss": 1.871,
"step": 3200
},
{
"epoch": 0.30711959050721266,
"grad_norm": 0.5717401504516602,
"learning_rate": 1.73747392763973e-05,
"loss": 1.8407,
"step": 3300
},
{
"epoch": 0.31642624476500697,
"grad_norm": 0.5187168717384338,
"learning_rate": 1.715321752346563e-05,
"loss": 1.8762,
"step": 3400
},
{
"epoch": 0.3257328990228013,
"grad_norm": 0.3989369869232178,
"learning_rate": 1.6924262158745493e-05,
"loss": 1.8801,
"step": 3500
},
{
"epoch": 0.33503955328059565,
"grad_norm": 0.5155192017555237,
"learning_rate": 1.668811111227224e-05,
"loss": 1.8639,
"step": 3600
},
{
"epoch": 0.34434620753838996,
"grad_norm": 0.4534810781478882,
"learning_rate": 1.644500979182176e-05,
"loss": 1.8918,
"step": 3700
},
{
"epoch": 0.35365286179618427,
"grad_norm": 0.7457600235939026,
"learning_rate": 1.6195210827883146e-05,
"loss": 1.8822,
"step": 3800
},
{
"epoch": 0.3629595160539786,
"grad_norm": 0.448901891708374,
"learning_rate": 1.5938973811125493e-05,
"loss": 1.8462,
"step": 3900
},
{
"epoch": 0.3722661703117729,
"grad_norm": 0.5300151705741882,
"learning_rate": 1.5676565022631696e-05,
"loss": 1.86,
"step": 4000
},
{
"epoch": 0.38157282456956726,
"grad_norm": 0.44534504413604736,
"learning_rate": 1.5408257157179627e-05,
"loss": 1.8671,
"step": 4100
},
{
"epoch": 0.39087947882736157,
"grad_norm": 0.44823992252349854,
"learning_rate": 1.513432903985813e-05,
"loss": 1.8553,
"step": 4200
},
{
"epoch": 0.4001861330851559,
"grad_norm": 0.4523938298225403,
"learning_rate": 1.4855065336312482e-05,
"loss": 1.8587,
"step": 4300
},
{
"epoch": 0.4094927873429502,
"grad_norm": 0.4166322648525238,
"learning_rate": 1.4570756256920318e-05,
"loss": 1.834,
"step": 4400
},
{
"epoch": 0.41879944160074456,
"grad_norm": 0.4794185161590576,
"learning_rate": 1.4281697255205478e-05,
"loss": 1.8562,
"step": 4500
},
{
"epoch": 0.42810609585853887,
"grad_norm": 0.5009357333183289,
"learning_rate": 1.3988188720803213e-05,
"loss": 1.8414,
"step": 4600
},
{
"epoch": 0.4374127501163332,
"grad_norm": 0.5215742588043213,
"learning_rate": 1.3690535667295759e-05,
"loss": 1.827,
"step": 4700
},
{
"epoch": 0.4467194043741275,
"grad_norm": 0.49268630146980286,
"learning_rate": 1.338904741524273e-05,
"loss": 1.8466,
"step": 4800
},
{
"epoch": 0.4560260586319218,
"grad_norm": 0.4806290864944458,
"learning_rate": 1.3084037270735714e-05,
"loss": 1.8492,
"step": 4900
},
{
"epoch": 0.46533271288971617,
"grad_norm": 0.4720674157142639,
"learning_rate": 1.2775822199811097e-05,
"loss": 1.8495,
"step": 5000
},
{
"epoch": 0.4746393671475105,
"grad_norm": 0.5893864631652832,
"learning_rate": 1.2464722499059481e-05,
"loss": 1.8153,
"step": 5100
},
{
"epoch": 0.4839460214053048,
"grad_norm": 0.41443273425102234,
"learning_rate": 1.2151061462774006e-05,
"loss": 1.852,
"step": 5200
},
{
"epoch": 0.4932526756630991,
"grad_norm": 0.46977949142456055,
"learning_rate": 1.1835165046983436e-05,
"loss": 1.8144,
"step": 5300
},
{
"epoch": 0.5025593299208935,
"grad_norm": 0.41084080934524536,
"learning_rate": 1.1517361530719233e-05,
"loss": 1.815,
"step": 5400
},
{
"epoch": 0.5118659841786878,
"grad_norm": 0.4808320701122284,
"learning_rate": 1.1197981174868488e-05,
"loss": 1.8341,
"step": 5500
},
{
"epoch": 0.5211726384364821,
"grad_norm": 0.46916091442108154,
"learning_rate": 1.0877355878967391e-05,
"loss": 1.8478,
"step": 5600
},
{
"epoch": 0.5304792926942764,
"grad_norm": 0.5183443427085876,
"learning_rate": 1.0555818836291759e-05,
"loss": 1.834,
"step": 5700
},
{
"epoch": 0.5397859469520707,
"grad_norm": 0.4422902762889862,
"learning_rate": 1.0233704187603143e-05,
"loss": 1.7978,
"step": 5800
},
{
"epoch": 0.549092601209865,
"grad_norm": 0.5260526537895203,
"learning_rate": 9.911346673910318e-06,
"loss": 1.8229,
"step": 5900
},
{
"epoch": 0.5583992554676593,
"grad_norm": 0.4582798480987549,
"learning_rate": 9.589081288606952e-06,
"loss": 1.8066,
"step": 6000
},
{
"epoch": 0.5677059097254537,
"grad_norm": 0.4076642096042633,
"learning_rate": 9.267242929347052e-06,
"loss": 1.8012,
"step": 6100
},
{
"epoch": 0.5770125639832481,
"grad_norm": 0.467779278755188,
"learning_rate": 8.946166050019875e-06,
"loss": 1.8263,
"step": 6200
},
{
"epoch": 0.5863192182410424,
"grad_norm": 0.4323495030403137,
"learning_rate": 8.626184313185979e-06,
"loss": 1.8198,
"step": 6300
},
{
"epoch": 0.5956258724988367,
"grad_norm": 0.5493050217628479,
"learning_rate": 8.307630243335676e-06,
"loss": 1.8031,
"step": 6400
},
{
"epoch": 0.604932526756631,
"grad_norm": 0.49775922298431396,
"learning_rate": 7.990834881330098e-06,
"loss": 1.818,
"step": 6500
},
{
"epoch": 0.6142391810144253,
"grad_norm": 0.48729395866394043,
"learning_rate": 7.67612744038412e-06,
"loss": 1.8327,
"step": 6600
},
{
"epoch": 0.6235458352722196,
"grad_norm": 0.47022220492362976,
"learning_rate": 7.363834963948499e-06,
"loss": 1.8216,
"step": 6700
},
{
"epoch": 0.6328524895300139,
"grad_norm": 0.5118712186813354,
"learning_rate": 7.0542819858468895e-06,
"loss": 1.8131,
"step": 6800
},
{
"epoch": 0.6421591437878083,
"grad_norm": 0.5807639956474304,
"learning_rate": 6.747790193020808e-06,
"loss": 1.832,
"step": 6900
},
{
"epoch": 0.6514657980456026,
"grad_norm": 0.6032737493515015,
"learning_rate": 6.444678091233122e-06,
"loss": 1.8331,
"step": 7000
},
{
"epoch": 0.6607724523033969,
"grad_norm": 0.481652170419693,
"learning_rate": 6.145260674077363e-06,
"loss": 1.8386,
"step": 7100
},
{
"epoch": 0.6700791065611913,
"grad_norm": 0.446353554725647,
"learning_rate": 5.84984909563693e-06,
"loss": 1.8328,
"step": 7200
},
{
"epoch": 0.6793857608189856,
"grad_norm": 0.5481130480766296,
"learning_rate": 5.558750347134265e-06,
"loss": 1.8057,
"step": 7300
},
{
"epoch": 0.6886924150767799,
"grad_norm": 0.45337405800819397,
"learning_rate": 5.2722669379061e-06,
"loss": 1.8336,
"step": 7400
},
{
"epoch": 0.6979990693345742,
"grad_norm": 0.5786927342414856,
"learning_rate": 4.990696581036231e-06,
"loss": 1.8045,
"step": 7500
},
{
"epoch": 0.7073057235923685,
"grad_norm": 0.4723381996154785,
"learning_rate": 4.7143318839726035e-06,
"loss": 1.8324,
"step": 7600
},
{
"epoch": 0.7166123778501629,
"grad_norm": 0.477597713470459,
"learning_rate": 4.443460044450125e-06,
"loss": 1.8287,
"step": 7700
},
{
"epoch": 0.7259190321079572,
"grad_norm": 0.4698321223258972,
"learning_rate": 4.1783625520352435e-06,
"loss": 1.8065,
"step": 7800
},
{
"epoch": 0.7352256863657515,
"grad_norm": 0.6439311504364014,
"learning_rate": 3.9193148956024795e-06,
"loss": 1.8123,
"step": 7900
},
{
"epoch": 0.7445323406235458,
"grad_norm": 0.5482531189918518,
"learning_rate": 3.666586277046825e-06,
"loss": 1.8095,
"step": 8000
},
{
"epoch": 0.7538389948813402,
"grad_norm": 0.5213837623596191,
"learning_rate": 3.420439331529597e-06,
"loss": 1.8495,
"step": 8100
},
{
"epoch": 0.7631456491391345,
"grad_norm": 0.5174638032913208,
"learning_rate": 3.1811298545483937e-06,
"loss": 1.8022,
"step": 8200
},
{
"epoch": 0.7724523033969288,
"grad_norm": 0.504177987575531,
"learning_rate": 2.948906536114864e-06,
"loss": 1.7781,
"step": 8300
},
{
"epoch": 0.7817589576547231,
"grad_norm": 0.4790373146533966,
"learning_rate": 2.724010702316429e-06,
"loss": 1.8379,
"step": 8400
},
{
"epoch": 0.7910656119125175,
"grad_norm": 0.599861204624176,
"learning_rate": 2.506676064530641e-06,
"loss": 1.8089,
"step": 8500
},
{
"epoch": 0.8003722661703118,
"grad_norm": 0.45454996824264526,
"learning_rate": 2.2971284765526847e-06,
"loss": 1.8191,
"step": 8600
},
{
"epoch": 0.8096789204281061,
"grad_norm": 0.5365225076675415,
"learning_rate": 2.095585699888504e-06,
"loss": 1.8032,
"step": 8700
},
{
"epoch": 0.8189855746859004,
"grad_norm": 0.5229623913764954,
"learning_rate": 1.9022571774573995e-06,
"loss": 1.811,
"step": 8800
},
{
"epoch": 0.8282922289436947,
"grad_norm": 0.516040563583374,
"learning_rate": 1.7173438159392863e-06,
"loss": 1.7983,
"step": 8900
},
{
"epoch": 0.8375988832014891,
"grad_norm": 0.4820205271244049,
"learning_rate": 1.541037776992822e-06,
"loss": 1.814,
"step": 9000
},
{
"epoch": 0.8469055374592834,
"grad_norm": 0.4865548014640808,
"learning_rate": 1.373522277561321e-06,
"loss": 1.8004,
"step": 9100
},
{
"epoch": 0.8562121917170777,
"grad_norm": 0.4806259870529175,
"learning_rate": 1.214971399474002e-06,
"loss": 1.8003,
"step": 9200
},
{
"epoch": 0.865518845974872,
"grad_norm": 0.47025734186172485,
"learning_rate": 1.0655499085404587e-06,
"loss": 1.7906,
"step": 9300
},
{
"epoch": 0.8748255002326664,
"grad_norm": 0.5928367376327515,
"learning_rate": 9.254130833262876e-07,
"loss": 1.7938,
"step": 9400
},
{
"epoch": 0.8841321544904607,
"grad_norm": 0.4831337332725525,
"learning_rate": 7.94706553787864e-07,
"loss": 1.8095,
"step": 9500
},
{
"epoch": 0.893438808748255,
"grad_norm": 0.43040913343429565,
"learning_rate": 6.735661499339441e-07,
"loss": 1.8055,
"step": 9600
},
{
"epoch": 0.9027454630060493,
"grad_norm": 0.49869057536125183,
"learning_rate": 5.621177606713257e-07,
"loss": 1.7809,
"step": 9700
},
{
"epoch": 0.9120521172638436,
"grad_norm": 0.5046131014823914,
"learning_rate": 4.6047720298132205e-07,
"loss": 1.7748,
"step": 9800
},
{
"epoch": 0.9213587715216379,
"grad_norm": 0.5693614482879639,
"learning_rate": 3.687501015629369e-07,
"loss": 1.8394,
"step": 9900
},
{
"epoch": 0.9306654257794323,
"grad_norm": 0.4276902377605438,
"learning_rate": 2.8703177906785675e-07,
"loss": 1.8034,
"step": 10000
},
{
"epoch": 0.9399720800372267,
"grad_norm": 0.47486281394958496,
"learning_rate": 2.1540715704130745e-07,
"loss": 1.8187,
"step": 10100
},
{
"epoch": 0.949278734295021,
"grad_norm": 0.43235480785369873,
"learning_rate": 1.539506676717284e-07,
"loss": 1.8203,
"step": 10200
},
{
"epoch": 0.9585853885528153,
"grad_norm": 0.5110422968864441,
"learning_rate": 1.0272617644095928e-07,
"loss": 1.776,
"step": 10300
},
{
"epoch": 0.9678920428106096,
"grad_norm": 0.5000057220458984,
"learning_rate": 6.178691575534412e-08,
"loss": 1.8324,
"step": 10400
},
{
"epoch": 0.9771986970684039,
"grad_norm": 0.45896321535110474,
"learning_rate": 3.143030863470342e-08,
"loss": 1.8218,
"step": 10500
},
{
"epoch": 0.9865053513261982,
"grad_norm": 0.42800936102867126,
"learning_rate": 1.1074690469762284e-08,
"loss": 1.7944,
"step": 10600
},
{
"epoch": 0.9958120055839925,
"grad_norm": 0.4289446175098419,
"learning_rate": 1.0995469222141453e-09,
"loss": 1.8245,
"step": 10700
},
{
"epoch": 1.0,
"step": 10745,
"total_flos": 8.078554667920916e+17,
"train_loss": 1.8799324656930732,
"train_runtime": 2971.2666,
"train_samples_per_second": 28.928,
"train_steps_per_second": 3.616
}
],
"logging_steps": 100,
"max_steps": 10745,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 8.078554667920916e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}