sindhi-bert-base / checkpoint-3261 /trainer_state.json
hellosindh's picture
Upload folder using huggingface_hub
e085157 verified
{
"best_global_step": 3261,
"best_metric": 3.45804762840271,
"best_model_checkpoint": "sindhibert_session6r/checkpoint-3261",
"epoch": 1.0,
"eval_steps": 3261,
"global_step": 3261,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.030672494440610383,
"grad_norm": 5.580650806427002,
"learning_rate": 1.2659846547314581e-06,
"loss": 14.62664794921875,
"step": 100
},
{
"epoch": 0.061344988881220766,
"grad_norm": 5.809163570404053,
"learning_rate": 2.544757033248082e-06,
"loss": 14.634923095703124,
"step": 200
},
{
"epoch": 0.09201748332183114,
"grad_norm": 5.849374294281006,
"learning_rate": 3.8235294117647055e-06,
"loss": 14.635355224609375,
"step": 300
},
{
"epoch": 0.12268997776244153,
"grad_norm": 5.751659870147705,
"learning_rate": 4.999978994815915e-06,
"loss": 14.59581298828125,
"step": 400
},
{
"epoch": 0.1533624722030519,
"grad_norm": 5.633106708526611,
"learning_rate": 4.996172776747675e-06,
"loss": 14.598475341796876,
"step": 500
},
{
"epoch": 0.18403496664366228,
"grad_norm": 5.498394012451172,
"learning_rate": 4.9858139123513936e-06,
"loss": 14.56541259765625,
"step": 600
},
{
"epoch": 0.2147074610842727,
"grad_norm": 5.918303489685059,
"learning_rate": 4.968929594446166e-06,
"loss": 14.57242919921875,
"step": 700
},
{
"epoch": 0.24537995552488306,
"grad_norm": 5.2229814529418945,
"learning_rate": 4.945564145670534e-06,
"loss": 14.53744873046875,
"step": 800
},
{
"epoch": 0.27605244996549344,
"grad_norm": 5.516529560089111,
"learning_rate": 4.9157789021321375e-06,
"loss": 14.531802978515625,
"step": 900
},
{
"epoch": 0.3067249444061038,
"grad_norm": 5.460958003997803,
"learning_rate": 4.879652052395696e-06,
"loss": 14.5186328125,
"step": 1000
},
{
"epoch": 0.3373974388467142,
"grad_norm": 5.609668731689453,
"learning_rate": 4.837278432231982e-06,
"loss": 14.51820068359375,
"step": 1100
},
{
"epoch": 0.36806993328732457,
"grad_norm": 5.466991424560547,
"learning_rate": 4.788769275666605e-06,
"loss": 14.477803955078125,
"step": 1200
},
{
"epoch": 0.398742427727935,
"grad_norm": 5.586793899536133,
"learning_rate": 4.7342519229820996e-06,
"loss": 14.506885986328125,
"step": 1300
},
{
"epoch": 0.4294149221685454,
"grad_norm": 5.488733768463135,
"learning_rate": 4.673869486439846e-06,
"loss": 14.464423828125,
"step": 1400
},
{
"epoch": 0.46008741660915575,
"grad_norm": 5.8738017082214355,
"learning_rate": 4.60778047459934e-06,
"loss": 14.470579833984376,
"step": 1500
},
{
"epoch": 0.49075991104976613,
"grad_norm": 5.592001438140869,
"learning_rate": 4.536158376220971e-06,
"loss": 14.42775146484375,
"step": 1600
},
{
"epoch": 0.5214324054903765,
"grad_norm": 5.235401153564453,
"learning_rate": 4.459191204844634e-06,
"loss": 14.42791259765625,
"step": 1700
},
{
"epoch": 0.5521048999309869,
"grad_norm": 5.959557056427002,
"learning_rate": 4.377081005239665e-06,
"loss": 14.444658203125,
"step": 1800
},
{
"epoch": 0.5827773943715973,
"grad_norm": 5.362053394317627,
"learning_rate": 4.2900433230217155e-06,
"loss": 14.463519287109374,
"step": 1900
},
{
"epoch": 0.6134498888122076,
"grad_norm": 5.445187568664551,
"learning_rate": 4.198306638828863e-06,
"loss": 14.472896728515625,
"step": 2000
},
{
"epoch": 0.644122383252818,
"grad_norm": 5.700764179229736,
"learning_rate": 4.102111768542294e-06,
"loss": 14.44870849609375,
"step": 2100
},
{
"epoch": 0.6747948776934284,
"grad_norm": 5.197051525115967,
"learning_rate": 4.001711231126012e-06,
"loss": 14.42983642578125,
"step": 2200
},
{
"epoch": 0.7054673721340388,
"grad_norm": 5.519675254821777,
"learning_rate": 3.897368585745059e-06,
"loss": 14.4178271484375,
"step": 2300
},
{
"epoch": 0.7361398665746491,
"grad_norm": 5.8170647621154785,
"learning_rate": 3.789357739902342e-06,
"loss": 14.38358642578125,
"step": 2400
},
{
"epoch": 0.7668123610152595,
"grad_norm": 5.513352394104004,
"learning_rate": 3.6779622304102957e-06,
"loss": 14.41911376953125,
"step": 2500
},
{
"epoch": 0.79748485545587,
"grad_norm": 5.644667625427246,
"learning_rate": 3.5634744790848436e-06,
"loss": 14.40310791015625,
"step": 2600
},
{
"epoch": 0.8281573498964804,
"grad_norm": 5.676363945007324,
"learning_rate": 3.4461950251155457e-06,
"loss": 14.40308349609375,
"step": 2700
},
{
"epoch": 0.8588298443370908,
"grad_norm": 5.251427173614502,
"learning_rate": 3.326431736127017e-06,
"loss": 14.39386474609375,
"step": 2800
},
{
"epoch": 0.8895023387777011,
"grad_norm": 5.631777286529541,
"learning_rate": 3.2044990000026366e-06,
"loss": 14.374044189453125,
"step": 2900
},
{
"epoch": 0.9201748332183115,
"grad_norm": 5.5782670974731445,
"learning_rate": 3.080716899592065e-06,
"loss": 14.37978515625,
"step": 3000
},
{
"epoch": 0.9508473276589219,
"grad_norm": 5.784708023071289,
"learning_rate": 2.9554103724690526e-06,
"loss": 14.3670703125,
"step": 3100
},
{
"epoch": 0.9815198220995323,
"grad_norm": 5.501222610473633,
"learning_rate": 2.8289083579452043e-06,
"loss": 14.374014892578124,
"step": 3200
},
{
"epoch": 1.0,
"eval_loss": 3.45804762840271,
"eval_runtime": 13.1436,
"eval_samples_per_second": 641.376,
"eval_steps_per_second": 10.043,
"step": 3261
}
],
"logging_steps": 100,
"max_steps": 6522,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 3261,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.1967584480474624e+17,
"train_batch_size": 64,
"trial_name": null,
"trial_params": null
}