sindhi-bert-base / checkpoint-6522 /trainer_state.json
hellosindh's picture
Upload folder using huggingface_hub
e085157 verified
{
"best_global_step": 6522,
"best_metric": 3.4418444633483887,
"best_model_checkpoint": "sindhibert_session6r/checkpoint-6522",
"epoch": 2.0,
"eval_steps": 3261,
"global_step": 6522,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.030672494440610383,
"grad_norm": 5.580650806427002,
"learning_rate": 1.2659846547314581e-06,
"loss": 14.62664794921875,
"step": 100
},
{
"epoch": 0.061344988881220766,
"grad_norm": 5.809163570404053,
"learning_rate": 2.544757033248082e-06,
"loss": 14.634923095703124,
"step": 200
},
{
"epoch": 0.09201748332183114,
"grad_norm": 5.849374294281006,
"learning_rate": 3.8235294117647055e-06,
"loss": 14.635355224609375,
"step": 300
},
{
"epoch": 0.12268997776244153,
"grad_norm": 5.751659870147705,
"learning_rate": 4.999978994815915e-06,
"loss": 14.59581298828125,
"step": 400
},
{
"epoch": 0.1533624722030519,
"grad_norm": 5.633106708526611,
"learning_rate": 4.996172776747675e-06,
"loss": 14.598475341796876,
"step": 500
},
{
"epoch": 0.18403496664366228,
"grad_norm": 5.498394012451172,
"learning_rate": 4.9858139123513936e-06,
"loss": 14.56541259765625,
"step": 600
},
{
"epoch": 0.2147074610842727,
"grad_norm": 5.918303489685059,
"learning_rate": 4.968929594446166e-06,
"loss": 14.57242919921875,
"step": 700
},
{
"epoch": 0.24537995552488306,
"grad_norm": 5.2229814529418945,
"learning_rate": 4.945564145670534e-06,
"loss": 14.53744873046875,
"step": 800
},
{
"epoch": 0.27605244996549344,
"grad_norm": 5.516529560089111,
"learning_rate": 4.9157789021321375e-06,
"loss": 14.531802978515625,
"step": 900
},
{
"epoch": 0.3067249444061038,
"grad_norm": 5.460958003997803,
"learning_rate": 4.879652052395696e-06,
"loss": 14.5186328125,
"step": 1000
},
{
"epoch": 0.3373974388467142,
"grad_norm": 5.609668731689453,
"learning_rate": 4.837278432231982e-06,
"loss": 14.51820068359375,
"step": 1100
},
{
"epoch": 0.36806993328732457,
"grad_norm": 5.466991424560547,
"learning_rate": 4.788769275666605e-06,
"loss": 14.477803955078125,
"step": 1200
},
{
"epoch": 0.398742427727935,
"grad_norm": 5.586793899536133,
"learning_rate": 4.7342519229820996e-06,
"loss": 14.506885986328125,
"step": 1300
},
{
"epoch": 0.4294149221685454,
"grad_norm": 5.488733768463135,
"learning_rate": 4.673869486439846e-06,
"loss": 14.464423828125,
"step": 1400
},
{
"epoch": 0.46008741660915575,
"grad_norm": 5.8738017082214355,
"learning_rate": 4.60778047459934e-06,
"loss": 14.470579833984376,
"step": 1500
},
{
"epoch": 0.49075991104976613,
"grad_norm": 5.592001438140869,
"learning_rate": 4.536158376220971e-06,
"loss": 14.42775146484375,
"step": 1600
},
{
"epoch": 0.5214324054903765,
"grad_norm": 5.235401153564453,
"learning_rate": 4.459191204844634e-06,
"loss": 14.42791259765625,
"step": 1700
},
{
"epoch": 0.5521048999309869,
"grad_norm": 5.959557056427002,
"learning_rate": 4.377081005239665e-06,
"loss": 14.444658203125,
"step": 1800
},
{
"epoch": 0.5827773943715973,
"grad_norm": 5.362053394317627,
"learning_rate": 4.2900433230217155e-06,
"loss": 14.463519287109374,
"step": 1900
},
{
"epoch": 0.6134498888122076,
"grad_norm": 5.445187568664551,
"learning_rate": 4.198306638828863e-06,
"loss": 14.472896728515625,
"step": 2000
},
{
"epoch": 0.644122383252818,
"grad_norm": 5.700764179229736,
"learning_rate": 4.102111768542294e-06,
"loss": 14.44870849609375,
"step": 2100
},
{
"epoch": 0.6747948776934284,
"grad_norm": 5.197051525115967,
"learning_rate": 4.001711231126012e-06,
"loss": 14.42983642578125,
"step": 2200
},
{
"epoch": 0.7054673721340388,
"grad_norm": 5.519675254821777,
"learning_rate": 3.897368585745059e-06,
"loss": 14.4178271484375,
"step": 2300
},
{
"epoch": 0.7361398665746491,
"grad_norm": 5.8170647621154785,
"learning_rate": 3.789357739902342e-06,
"loss": 14.38358642578125,
"step": 2400
},
{
"epoch": 0.7668123610152595,
"grad_norm": 5.513352394104004,
"learning_rate": 3.6779622304102957e-06,
"loss": 14.41911376953125,
"step": 2500
},
{
"epoch": 0.79748485545587,
"grad_norm": 5.644667625427246,
"learning_rate": 3.5634744790848436e-06,
"loss": 14.40310791015625,
"step": 2600
},
{
"epoch": 0.8281573498964804,
"grad_norm": 5.676363945007324,
"learning_rate": 3.4461950251155457e-06,
"loss": 14.40308349609375,
"step": 2700
},
{
"epoch": 0.8588298443370908,
"grad_norm": 5.251427173614502,
"learning_rate": 3.326431736127017e-06,
"loss": 14.39386474609375,
"step": 2800
},
{
"epoch": 0.8895023387777011,
"grad_norm": 5.631777286529541,
"learning_rate": 3.2044990000026366e-06,
"loss": 14.374044189453125,
"step": 2900
},
{
"epoch": 0.9201748332183115,
"grad_norm": 5.5782670974731445,
"learning_rate": 3.080716899592065e-06,
"loss": 14.37978515625,
"step": 3000
},
{
"epoch": 0.9508473276589219,
"grad_norm": 5.784708023071289,
"learning_rate": 2.9554103724690526e-06,
"loss": 14.3670703125,
"step": 3100
},
{
"epoch": 0.9815198220995323,
"grad_norm": 5.501222610473633,
"learning_rate": 2.8289083579452043e-06,
"loss": 14.374014892578124,
"step": 3200
},
{
"epoch": 1.0,
"eval_loss": 3.45804762840271,
"eval_runtime": 13.1436,
"eval_samples_per_second": 641.376,
"eval_steps_per_second": 10.043,
"step": 3261
},
{
"epoch": 1.011962272831838,
"grad_norm": 5.622377872467041,
"learning_rate": 2.7015429335788844e-06,
"loss": 14.245347900390625,
"step": 3300
},
{
"epoch": 1.0426347672724485,
"grad_norm": 5.506319522857666,
"learning_rate": 2.57364844344598e-06,
"loss": 14.36246826171875,
"step": 3400
},
{
"epoch": 1.0733072617130588,
"grad_norm": 5.29400634765625,
"learning_rate": 2.4455606204608764e-06,
"loss": 14.3725537109375,
"step": 3500
},
{
"epoch": 1.1039797561536693,
"grad_norm": 5.597475528717041,
"learning_rate": 2.3176157050516264e-06,
"loss": 14.357423095703124,
"step": 3600
},
{
"epoch": 1.1346522505942795,
"grad_norm": 5.25772762298584,
"learning_rate": 2.1901495625028606e-06,
"loss": 14.315875244140624,
"step": 3700
},
{
"epoch": 1.16532474503489,
"grad_norm": 5.585110187530518,
"learning_rate": 2.063496801283472e-06,
"loss": 14.327174072265626,
"step": 3800
},
{
"epoch": 1.1959972394755003,
"grad_norm": 5.402249813079834,
"learning_rate": 1.9379898946735452e-06,
"loss": 14.34656494140625,
"step": 3900
},
{
"epoch": 1.2266697339161108,
"grad_norm": 5.390650749206543,
"learning_rate": 1.8139583079963144e-06,
"loss": 14.36371826171875,
"step": 4000
},
{
"epoch": 1.257342228356721,
"grad_norm": 5.242497444152832,
"learning_rate": 1.6917276337462466e-06,
"loss": 14.323082275390625,
"step": 4100
},
{
"epoch": 1.2880147227973315,
"grad_norm": 5.234506130218506,
"learning_rate": 1.5716187368835971e-06,
"loss": 14.337457275390625,
"step": 4200
},
{
"epoch": 1.3186872172379418,
"grad_norm": 5.626885890960693,
"learning_rate": 1.4539469125391031e-06,
"loss": 14.354864501953125,
"step": 4300
},
{
"epoch": 1.3493597116785523,
"grad_norm": 5.311978816986084,
"learning_rate": 1.3390210583399215e-06,
"loss": 14.32729248046875,
"step": 4400
},
{
"epoch": 1.3800322061191626,
"grad_norm": 5.375194072723389,
"learning_rate": 1.2271428635294922e-06,
"loss": 14.337626953125,
"step": 4500
},
{
"epoch": 1.410704700559773,
"grad_norm": 5.57642936706543,
"learning_rate": 1.118606017009978e-06,
"loss": 14.334080810546874,
"step": 4600
},
{
"epoch": 1.4413771950003835,
"grad_norm": 5.510963439941406,
"learning_rate": 1.0136954363862126e-06,
"loss": 14.33389404296875,
"step": 4700
},
{
"epoch": 1.4720496894409938,
"grad_norm": 5.4500041007995605,
"learning_rate": 9.126865200349847e-07,
"loss": 14.31758056640625,
"step": 4800
},
{
"epoch": 1.502722183881604,
"grad_norm": 5.399460792541504,
"learning_rate": 8.158444241630245e-07,
"loss": 14.280606689453125,
"step": 4900
},
{
"epoch": 1.5333946783222145,
"grad_norm": 5.639800071716309,
"learning_rate": 7.234233667514868e-07,
"loss": 14.342408447265624,
"step": 5000
},
{
"epoch": 1.564067172762825,
"grad_norm": 5.429558277130127,
"learning_rate": 6.356659602141116e-07,
"loss": 14.314246826171875,
"step": 5100
},
{
"epoch": 1.5947396672034353,
"grad_norm": 5.131410598754883,
"learning_rate": 5.528025745209039e-07,
"loss": 14.32427978515625,
"step": 5200
},
{
"epoch": 1.6254121616440456,
"grad_norm": 5.480471611022949,
"learning_rate": 4.7505073245916774e-07,
"loss": 14.32928466796875,
"step": 5300
},
{
"epoch": 1.656084656084656,
"grad_norm": 5.693525791168213,
"learning_rate": 4.026145386193914e-07,
"loss": 14.34359375,
"step": 5400
},
{
"epoch": 1.6867571505252665,
"grad_norm": 5.723335266113281,
"learning_rate": 3.3568414360493075e-07,
"loss": 14.31550537109375,
"step": 5500
},
{
"epoch": 1.7174296449658768,
"grad_norm": 5.319007873535156,
"learning_rate": 2.7443524487199333e-07,
"loss": 14.32018310546875,
"step": 5600
},
{
"epoch": 1.7481021394064873,
"grad_norm": 5.462660312652588,
"learning_rate": 2.1902862551024112e-07,
"loss": 14.333775634765624,
"step": 5700
},
{
"epoch": 1.7787746338470978,
"grad_norm": 5.3777546882629395,
"learning_rate": 1.6960973217476778e-07,
"loss": 14.32505126953125,
"step": 5800
},
{
"epoch": 1.809447128287708,
"grad_norm": 5.701692581176758,
"learning_rate": 1.2630829327738987e-07,
"loss": 14.33883056640625,
"step": 5900
},
{
"epoch": 1.8401196227283183,
"grad_norm": 5.285277843475342,
"learning_rate": 8.92379784395514e-08,
"loss": 14.3292626953125,
"step": 6000
},
{
"epoch": 1.8707921171689288,
"grad_norm": 5.558449745178223,
"learning_rate": 5.849610010078499e-08,
"loss": 14.3488134765625,
"step": 6100
},
{
"epoch": 1.9014646116095393,
"grad_norm": 5.662689685821533,
"learning_rate": 3.416335806604443e-08,
"loss": 14.28878173828125,
"step": 6200
},
{
"epoch": 1.9321371060501495,
"grad_norm": 5.507394790649414,
"learning_rate": 1.630362766248256e-08,
"loss": 14.3447216796875,
"step": 6300
},
{
"epoch": 1.9628096004907598,
"grad_norm": 5.682081699371338,
"learning_rate": 4.963792061784822e-09,
"loss": 14.32853515625,
"step": 6400
},
{
"epoch": 1.9934820949313703,
"grad_norm": 5.3612589836120605,
"learning_rate": 1.736192082227306e-10,
"loss": 14.27859130859375,
"step": 6500
},
{
"epoch": 2.0,
"eval_loss": 3.4418444633483887,
"eval_runtime": 13.1653,
"eval_samples_per_second": 640.317,
"eval_steps_per_second": 10.026,
"step": 6522
}
],
"logging_steps": 100,
"max_steps": 6522,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 3261,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.393516896094925e+17,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}