16-clusters-imbalanced-8 / trainer_state.json
MHGanainy's picture
MHGanainy/16-clusters-imbalanced-8
2f3ab50 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 6689,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.014949917775452235,
"grad_norm": 0.09534981846809387,
"learning_rate": 6.666666666666667e-06,
"loss": 2.4036,
"step": 100
},
{
"epoch": 0.02989983555090447,
"grad_norm": 0.20187412202358246,
"learning_rate": 1.3333333333333333e-05,
"loss": 2.3891,
"step": 200
},
{
"epoch": 0.044849753326356705,
"grad_norm": 0.28210878372192383,
"learning_rate": 2e-05,
"loss": 2.338,
"step": 300
},
{
"epoch": 0.05979967110180894,
"grad_norm": 0.40970563888549805,
"learning_rate": 1.9987913057228085e-05,
"loss": 2.2689,
"step": 400
},
{
"epoch": 0.07474958887726117,
"grad_norm": 0.49385392665863037,
"learning_rate": 1.995168144774945e-05,
"loss": 2.1997,
"step": 500
},
{
"epoch": 0.08969950665271341,
"grad_norm": 0.523034393787384,
"learning_rate": 1.989139275744216e-05,
"loss": 2.2136,
"step": 600
},
{
"epoch": 0.10464942442816565,
"grad_norm": 0.5628554224967957,
"learning_rate": 1.9807192727496118e-05,
"loss": 2.1828,
"step": 700
},
{
"epoch": 0.11959934220361788,
"grad_norm": 0.6401134133338928,
"learning_rate": 1.9699284902099995e-05,
"loss": 2.1685,
"step": 800
},
{
"epoch": 0.1345492599790701,
"grad_norm": 0.6661084890365601,
"learning_rate": 1.9567930136395833e-05,
"loss": 2.1815,
"step": 900
},
{
"epoch": 0.14949917775452234,
"grad_norm": 0.6044815182685852,
"learning_rate": 1.941344596589081e-05,
"loss": 2.1246,
"step": 1000
},
{
"epoch": 0.16444909552997458,
"grad_norm": 0.8036034107208252,
"learning_rate": 1.923620583885054e-05,
"loss": 2.17,
"step": 1100
},
{
"epoch": 0.17939901330542682,
"grad_norm": 0.7904087901115417,
"learning_rate": 1.9036638213529502e-05,
"loss": 2.1114,
"step": 1200
},
{
"epoch": 0.19434893108087906,
"grad_norm": 0.7112478613853455,
"learning_rate": 1.8815225522420974e-05,
"loss": 2.0999,
"step": 1300
},
{
"epoch": 0.2092988488563313,
"grad_norm": 0.7812981605529785,
"learning_rate": 1.857250300603024e-05,
"loss": 2.1258,
"step": 1400
},
{
"epoch": 0.22424876663178353,
"grad_norm": 0.7886794805526733,
"learning_rate": 1.8309057418990312e-05,
"loss": 2.0669,
"step": 1500
},
{
"epoch": 0.23919868440723577,
"grad_norm": 1.2216062545776367,
"learning_rate": 1.8025525611648004e-05,
"loss": 2.0778,
"step": 1600
},
{
"epoch": 0.254148602182688,
"grad_norm": 0.6833907961845398,
"learning_rate": 1.772259299054919e-05,
"loss": 2.0822,
"step": 1700
},
{
"epoch": 0.2690985199581402,
"grad_norm": 0.7946938276290894,
"learning_rate": 1.740099186154486e-05,
"loss": 2.0679,
"step": 1800
},
{
"epoch": 0.28404843773359245,
"grad_norm": 0.8658699989318848,
"learning_rate": 1.706149965952335e-05,
"loss": 2.083,
"step": 1900
},
{
"epoch": 0.2989983555090447,
"grad_norm": 0.9359118342399597,
"learning_rate": 1.6704937069048125e-05,
"loss": 2.0376,
"step": 2000
},
{
"epoch": 0.3139482732844969,
"grad_norm": 0.8539633750915527,
"learning_rate": 1.6332166040444325e-05,
"loss": 2.0303,
"step": 2100
},
{
"epoch": 0.32889819105994916,
"grad_norm": 1.2063894271850586,
"learning_rate": 1.5944087706129897e-05,
"loss": 2.0159,
"step": 2200
},
{
"epoch": 0.3438481088354014,
"grad_norm": 0.9335761666297913,
"learning_rate": 1.554164020222842e-05,
"loss": 2.0396,
"step": 2300
},
{
"epoch": 0.35879802661085364,
"grad_norm": 1.0571390390396118,
"learning_rate": 1.512579640072957e-05,
"loss": 2.0499,
"step": 2400
},
{
"epoch": 0.3737479443863059,
"grad_norm": 0.9539825916290283,
"learning_rate": 1.4697561557679496e-05,
"loss": 2.075,
"step": 2500
},
{
"epoch": 0.3886978621617581,
"grad_norm": 0.9199327230453491,
"learning_rate": 1.4257970883086375e-05,
"loss": 2.0483,
"step": 2600
},
{
"epoch": 0.40364777993721035,
"grad_norm": 0.8960707783699036,
"learning_rate": 1.3808087038415587e-05,
"loss": 2.0356,
"step": 2700
},
{
"epoch": 0.4185976977126626,
"grad_norm": 0.9793922305107117,
"learning_rate": 1.3348997567724035e-05,
"loss": 2.0384,
"step": 2800
},
{
"epoch": 0.4335476154881148,
"grad_norm": 0.9594945907592773,
"learning_rate": 1.2881812268643615e-05,
"loss": 2.0479,
"step": 2900
},
{
"epoch": 0.44849753326356706,
"grad_norm": 0.8938071131706238,
"learning_rate": 1.2407660509569089e-05,
"loss": 1.9737,
"step": 3000
},
{
"epoch": 0.4634474510390193,
"grad_norm": 0.9922735691070557,
"learning_rate": 1.192768849953589e-05,
"loss": 2.0189,
"step": 3100
},
{
"epoch": 0.47839736881447154,
"grad_norm": 1.1176081895828247,
"learning_rate": 1.1443056517387501e-05,
"loss": 2.0092,
"step": 3200
},
{
"epoch": 0.4933472865899238,
"grad_norm": 1.1819413900375366,
"learning_rate": 1.0954936106930646e-05,
"loss": 1.952,
"step": 3300
},
{
"epoch": 0.508297204365376,
"grad_norm": 1.1036977767944336,
"learning_rate": 1.0464507244858733e-05,
"loss": 1.9881,
"step": 3400
},
{
"epoch": 0.5232471221408282,
"grad_norm": 0.8991079330444336,
"learning_rate": 9.97295548828967e-06,
"loss": 2.0025,
"step": 3500
},
{
"epoch": 0.5381970399162804,
"grad_norm": 1.0416226387023926,
"learning_rate": 9.481469108813673e-06,
"loss": 1.9782,
"step": 3600
},
{
"epoch": 0.5531469576917327,
"grad_norm": 1.1091338396072388,
"learning_rate": 8.991236219979127e-06,
"loss": 1.955,
"step": 3700
},
{
"epoch": 0.5680968754671849,
"grad_norm": 1.1093144416809082,
"learning_rate": 8.503441905160476e-06,
"loss": 1.9671,
"step": 3800
},
{
"epoch": 0.5830467932426372,
"grad_norm": 1.1423636674880981,
"learning_rate": 8.019265352751264e-06,
"loss": 1.9745,
"step": 3900
},
{
"epoch": 0.5979967110180894,
"grad_norm": 1.053029179573059,
"learning_rate": 7.539877005607577e-06,
"loss": 1.9616,
"step": 4000
},
{
"epoch": 0.6129466287935417,
"grad_norm": 1.101280689239502,
"learning_rate": 7.066435731632912e-06,
"loss": 1.9893,
"step": 4100
},
{
"epoch": 0.6278965465689939,
"grad_norm": 0.8910824060440063,
"learning_rate": 6.600086022344145e-06,
"loss": 1.9976,
"step": 4200
},
{
"epoch": 0.6428464643444461,
"grad_norm": 0.9344325661659241,
"learning_rate": 6.141955226190851e-06,
"loss": 1.9563,
"step": 4300
},
{
"epoch": 0.6577963821198983,
"grad_norm": 1.0802608728408813,
"learning_rate": 5.69315082331606e-06,
"loss": 2.0009,
"step": 4400
},
{
"epoch": 0.6727462998953506,
"grad_norm": 1.1256142854690552,
"learning_rate": 5.2547577483464396e-06,
"loss": 1.9526,
"step": 4500
},
{
"epoch": 0.6876962176708028,
"grad_norm": 0.9710372090339661,
"learning_rate": 4.8278357676837474e-06,
"loss": 2.046,
"step": 4600
},
{
"epoch": 0.7026461354462551,
"grad_norm": 1.6460671424865723,
"learning_rate": 4.4134169176376416e-06,
"loss": 1.982,
"step": 4700
},
{
"epoch": 0.7175960532217073,
"grad_norm": 1.0602085590362549,
"learning_rate": 4.0125030095929505e-06,
"loss": 2.0024,
"step": 4800
},
{
"epoch": 0.7325459709971596,
"grad_norm": 0.9644867181777954,
"learning_rate": 3.6260632082422733e-06,
"loss": 1.9592,
"step": 4900
},
{
"epoch": 0.7474958887726117,
"grad_norm": 1.122348427772522,
"learning_rate": 3.255031688738354e-06,
"loss": 1.9671,
"step": 5000
},
{
"epoch": 0.7624458065480639,
"grad_norm": 1.3032220602035522,
"learning_rate": 2.9003053784297598e-06,
"loss": 1.9744,
"step": 5100
},
{
"epoch": 0.7773957243235162,
"grad_norm": 1.0730985403060913,
"learning_rate": 2.5627417886389614e-06,
"loss": 1.9526,
"step": 5200
},
{
"epoch": 0.7923456420989684,
"grad_norm": 0.9929338097572327,
"learning_rate": 2.2431569417243016e-06,
"loss": 2.016,
"step": 5300
},
{
"epoch": 0.8072955598744207,
"grad_norm": 0.9761891961097717,
"learning_rate": 1.942323398436864e-06,
"loss": 1.9794,
"step": 5400
},
{
"epoch": 0.8222454776498729,
"grad_norm": 1.0812045335769653,
"learning_rate": 1.660968390340969e-06,
"loss": 1.9758,
"step": 5500
},
{
"epoch": 0.8371953954253252,
"grad_norm": 1.8594285249710083,
"learning_rate": 1.3997720618128995e-06,
"loss": 1.9317,
"step": 5600
},
{
"epoch": 0.8521453132007774,
"grad_norm": 0.884612500667572,
"learning_rate": 1.1593658258676953e-06,
"loss": 1.9349,
"step": 5700
},
{
"epoch": 0.8670952309762296,
"grad_norm": 1.1157561540603638,
"learning_rate": 9.403308377885268e-07,
"loss": 2.0075,
"step": 5800
},
{
"epoch": 0.8820451487516818,
"grad_norm": 0.8770403861999512,
"learning_rate": 7.43196590248586e-07,
"loss": 1.9874,
"step": 5900
},
{
"epoch": 0.8969950665271341,
"grad_norm": 0.9389327764511108,
"learning_rate": 5.684396333215559e-07,
"loss": 1.9794,
"step": 6000
},
{
"epoch": 0.9119449843025863,
"grad_norm": 1.3195427656173706,
"learning_rate": 4.1648242247490753e-07,
"loss": 1.973,
"step": 6100
},
{
"epoch": 0.9268949020780386,
"grad_norm": 1.1389738321304321,
"learning_rate": 2.876922973308993e-07,
"loss": 1.9679,
"step": 6200
},
{
"epoch": 0.9418448198534908,
"grad_norm": 0.9646668434143066,
"learning_rate": 1.8238059366397222e-07,
"loss": 1.9918,
"step": 6300
},
{
"epoch": 0.9567947376289431,
"grad_norm": 1.3664929866790771,
"learning_rate": 1.0080189078121138e-07,
"loss": 1.9648,
"step": 6400
},
{
"epoch": 0.9717446554043953,
"grad_norm": 1.1920166015625,
"learning_rate": 4.3153396105249935e-08,
"loss": 2.0209,
"step": 6500
},
{
"epoch": 0.9866945731798475,
"grad_norm": 1.018936276435852,
"learning_rate": 9.57446844729315e-09,
"loss": 1.9294,
"step": 6600
},
{
"epoch": 1.0,
"step": 6689,
"total_flos": 1.215516124053504e+17,
"train_loss": 2.0444058272706354,
"train_runtime": 2120.799,
"train_samples_per_second": 6.308,
"train_steps_per_second": 3.154
}
],
"logging_steps": 100,
"max_steps": 6689,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.215516124053504e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}