BhBHT5New / last-checkpoint /trainer_state.json
Sabbir772's picture
Training in progress, step 7015, checkpoint
6d03942 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 400,
"global_step": 7015,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07127583749109052,
"grad_norm": 3.4722585678100586,
"learning_rate": 4.8823948681397006e-05,
"loss": 0.4528,
"step": 100
},
{
"epoch": 0.14255167498218105,
"grad_norm": 4.105640411376953,
"learning_rate": 4.76360180565455e-05,
"loss": 0.4275,
"step": 200
},
{
"epoch": 0.21382751247327156,
"grad_norm": 3.594306468963623,
"learning_rate": 4.644808743169399e-05,
"loss": 0.4413,
"step": 300
},
{
"epoch": 0.2851033499643621,
"grad_norm": 2.822234630584717,
"learning_rate": 4.5260156806842485e-05,
"loss": 0.4088,
"step": 400
},
{
"epoch": 0.2851033499643621,
"eval_bleu": 53.90945744364011,
"eval_chrf": 76.49653340048762,
"eval_loss": 1.2396413087844849,
"eval_runtime": 45.5827,
"eval_samples_per_second": 9.302,
"eval_steps_per_second": 1.163,
"step": 400
},
{
"epoch": 0.3563791874554526,
"grad_norm": 1.6471904516220093,
"learning_rate": 4.407222618199097e-05,
"loss": 0.401,
"step": 500
},
{
"epoch": 0.4276550249465431,
"grad_norm": 1.9229692220687866,
"learning_rate": 4.288429555713946e-05,
"loss": 0.4401,
"step": 600
},
{
"epoch": 0.4989308624376336,
"grad_norm": 4.6807146072387695,
"learning_rate": 4.1696364932287956e-05,
"loss": 0.458,
"step": 700
},
{
"epoch": 0.5702066999287242,
"grad_norm": 6.216049671173096,
"learning_rate": 4.050843430743645e-05,
"loss": 0.4244,
"step": 800
},
{
"epoch": 0.5702066999287242,
"eval_bleu": 53.985067919574576,
"eval_chrf": 76.4778045864617,
"eval_loss": 1.2613844871520996,
"eval_runtime": 45.1467,
"eval_samples_per_second": 9.392,
"eval_steps_per_second": 1.174,
"step": 800
},
{
"epoch": 0.6414825374198146,
"grad_norm": 2.3269271850585938,
"learning_rate": 3.932050368258494e-05,
"loss": 0.5048,
"step": 900
},
{
"epoch": 0.7127583749109052,
"grad_norm": 1.9887986183166504,
"learning_rate": 3.813257305773343e-05,
"loss": 0.4283,
"step": 1000
},
{
"epoch": 0.7840342124019958,
"grad_norm": 2.3376266956329346,
"learning_rate": 3.694464243288192e-05,
"loss": 0.4426,
"step": 1100
},
{
"epoch": 0.8553100498930862,
"grad_norm": 6.204941749572754,
"learning_rate": 3.5756711808030413e-05,
"loss": 0.496,
"step": 1200
},
{
"epoch": 0.8553100498930862,
"eval_bleu": 54.35826381811427,
"eval_chrf": 76.58829965762467,
"eval_loss": 1.2592484951019287,
"eval_runtime": 46.2675,
"eval_samples_per_second": 9.164,
"eval_steps_per_second": 1.146,
"step": 1200
},
{
"epoch": 0.9265858873841768,
"grad_norm": 6.887669086456299,
"learning_rate": 3.4568781183178906e-05,
"loss": 0.486,
"step": 1300
},
{
"epoch": 0.9978617248752673,
"grad_norm": 4.209754943847656,
"learning_rate": 3.33808505583274e-05,
"loss": 0.5017,
"step": 1400
},
{
"epoch": 1.0691375623663577,
"grad_norm": 0.9429085850715637,
"learning_rate": 3.2192919933475885e-05,
"loss": 0.4126,
"step": 1500
},
{
"epoch": 1.1404133998574484,
"grad_norm": 3.9917092323303223,
"learning_rate": 3.100498930862438e-05,
"loss": 0.4931,
"step": 1600
},
{
"epoch": 1.1404133998574484,
"eval_bleu": 54.607724883250675,
"eval_chrf": 76.84759656839454,
"eval_loss": 1.253143310546875,
"eval_runtime": 44.8458,
"eval_samples_per_second": 9.455,
"eval_steps_per_second": 1.182,
"step": 1600
},
{
"epoch": 1.2116892373485388,
"grad_norm": 4.448713779449463,
"learning_rate": 2.981705868377287e-05,
"loss": 0.4985,
"step": 1700
},
{
"epoch": 1.2829650748396293,
"grad_norm": 4.838741779327393,
"learning_rate": 2.862912805892136e-05,
"loss": 0.5203,
"step": 1800
},
{
"epoch": 1.3542409123307197,
"grad_norm": 5.198791980743408,
"learning_rate": 2.7441197434069853e-05,
"loss": 0.5296,
"step": 1900
},
{
"epoch": 1.4255167498218104,
"grad_norm": 4.123748779296875,
"learning_rate": 2.6253266809218342e-05,
"loss": 0.5622,
"step": 2000
},
{
"epoch": 1.4255167498218104,
"eval_bleu": 54.551652859378116,
"eval_chrf": 77.10128262017952,
"eval_loss": 1.2271041870117188,
"eval_runtime": 45.7524,
"eval_samples_per_second": 9.267,
"eval_steps_per_second": 1.158,
"step": 2000
},
{
"epoch": 1.4967925873129009,
"grad_norm": 5.228442668914795,
"learning_rate": 2.506533618436683e-05,
"loss": 0.5832,
"step": 2100
},
{
"epoch": 1.5680684248039913,
"grad_norm": 2.6589853763580322,
"learning_rate": 2.3877405559515324e-05,
"loss": 0.5352,
"step": 2200
},
{
"epoch": 1.639344262295082,
"grad_norm": 4.812443256378174,
"learning_rate": 2.2689474934663817e-05,
"loss": 0.5949,
"step": 2300
},
{
"epoch": 1.7106200997861725,
"grad_norm": 3.439000368118286,
"learning_rate": 2.1501544309812307e-05,
"loss": 0.6057,
"step": 2400
},
{
"epoch": 1.7106200997861725,
"eval_bleu": 55.315616609389814,
"eval_chrf": 77.2954024298945,
"eval_loss": 1.2040876150131226,
"eval_runtime": 46.0789,
"eval_samples_per_second": 9.202,
"eval_steps_per_second": 1.15,
"step": 2400
},
{
"epoch": 1.781895937277263,
"grad_norm": 7.226606369018555,
"learning_rate": 2.03136136849608e-05,
"loss": 0.6565,
"step": 2500
},
{
"epoch": 1.8531717747683536,
"grad_norm": 2.998560667037964,
"learning_rate": 1.912568306010929e-05,
"loss": 0.6565,
"step": 2600
},
{
"epoch": 1.924447612259444,
"grad_norm": 7.860637187957764,
"learning_rate": 1.7937752435257782e-05,
"loss": 0.6673,
"step": 2700
},
{
"epoch": 1.9957234497505345,
"grad_norm": 3.4744679927825928,
"learning_rate": 1.6749821810406275e-05,
"loss": 0.6896,
"step": 2800
},
{
"epoch": 1.9957234497505345,
"eval_bleu": 54.289161444971484,
"eval_chrf": 76.92398542441326,
"eval_loss": 1.1615335941314697,
"eval_runtime": 44.9642,
"eval_samples_per_second": 9.43,
"eval_steps_per_second": 1.179,
"step": 2800
},
{
"epoch": 2.066999287241625,
"grad_norm": 2.9694347381591797,
"learning_rate": 1.5561891185554764e-05,
"loss": 0.6495,
"step": 2900
},
{
"epoch": 2.1382751247327154,
"grad_norm": 5.613991737365723,
"learning_rate": 1.4373960560703257e-05,
"loss": 0.721,
"step": 3000
},
{
"epoch": 2.209550962223806,
"grad_norm": 4.432507514953613,
"learning_rate": 1.3186029935851746e-05,
"loss": 0.61,
"step": 3100
},
{
"epoch": 2.2808267997148968,
"grad_norm": 3.720311403274536,
"learning_rate": 1.1998099311000239e-05,
"loss": 0.6916,
"step": 3200
},
{
"epoch": 2.2808267997148968,
"eval_bleu": 54.50245782197904,
"eval_chrf": 76.54392547225733,
"eval_loss": 1.1484729051589966,
"eval_runtime": 46.1795,
"eval_samples_per_second": 9.182,
"eval_steps_per_second": 1.148,
"step": 3200
},
{
"epoch": 2.352102637205987,
"grad_norm": 4.093074798583984,
"learning_rate": 1.0810168686148728e-05,
"loss": 0.6521,
"step": 3300
},
{
"epoch": 2.4233784746970777,
"grad_norm": 3.274430274963379,
"learning_rate": 9.62223806129722e-06,
"loss": 0.6732,
"step": 3400
},
{
"epoch": 2.4946543121881684,
"grad_norm": 4.472115516662598,
"learning_rate": 8.434307436445712e-06,
"loss": 0.6547,
"step": 3500
},
{
"epoch": 2.5659301496792586,
"grad_norm": 6.019277095794678,
"learning_rate": 7.246376811594203e-06,
"loss": 0.6725,
"step": 3600
},
{
"epoch": 2.5659301496792586,
"eval_bleu": 54.62227679885763,
"eval_chrf": 76.83109982664264,
"eval_loss": 1.1441200971603394,
"eval_runtime": 45.9414,
"eval_samples_per_second": 9.229,
"eval_steps_per_second": 1.154,
"step": 3600
},
{
"epoch": 2.6372059871703493,
"grad_norm": 4.059388637542725,
"learning_rate": 6.0584461867426945e-06,
"loss": 0.6782,
"step": 3700
},
{
"epoch": 2.7084818246614395,
"grad_norm": 7.349984169006348,
"learning_rate": 4.870515561891186e-06,
"loss": 0.7001,
"step": 3800
},
{
"epoch": 2.77975766215253,
"grad_norm": 5.185818672180176,
"learning_rate": 3.682584937039677e-06,
"loss": 0.667,
"step": 3900
},
{
"epoch": 2.851033499643621,
"grad_norm": 4.289496898651123,
"learning_rate": 2.4946543121881682e-06,
"loss": 0.7136,
"step": 4000
},
{
"epoch": 2.851033499643621,
"eval_bleu": 54.4356613408887,
"eval_chrf": 76.90152284424269,
"eval_loss": 1.137866497039795,
"eval_runtime": 44.6351,
"eval_samples_per_second": 9.499,
"eval_steps_per_second": 1.187,
"step": 4000
},
{
"epoch": 2.9223093371347115,
"grad_norm": 2.778644323348999,
"learning_rate": 1.3067236873366596e-06,
"loss": 0.6648,
"step": 4100
},
{
"epoch": 2.9935851746258018,
"grad_norm": 6.11302375793457,
"learning_rate": 1.1879306248515088e-07,
"loss": 0.6361,
"step": 4200
},
{
"epoch": 3.0648610121168924,
"grad_norm": 4.8953704833984375,
"learning_rate": 1.9358517462580187e-05,
"loss": 0.6203,
"step": 4300
},
{
"epoch": 3.1361368496079827,
"grad_norm": 4.9622979164123535,
"learning_rate": 1.864575908766928e-05,
"loss": 0.6883,
"step": 4400
},
{
"epoch": 3.1361368496079827,
"eval_bleu": 54.4615551796715,
"eval_chrf": 76.75255355595692,
"eval_loss": 1.152636170387268,
"eval_runtime": 45.8574,
"eval_samples_per_second": 9.246,
"eval_steps_per_second": 1.156,
"step": 4400
},
{
"epoch": 3.2074126870990733,
"grad_norm": 5.924427509307861,
"learning_rate": 1.7933000712758377e-05,
"loss": 0.6534,
"step": 4500
},
{
"epoch": 3.278688524590164,
"grad_norm": 3.3319082260131836,
"learning_rate": 1.722024233784747e-05,
"loss": 0.6561,
"step": 4600
},
{
"epoch": 3.3499643620812547,
"grad_norm": 5.027530193328857,
"learning_rate": 1.6507483962936566e-05,
"loss": 0.6614,
"step": 4700
},
{
"epoch": 3.421240199572345,
"grad_norm": 12.282238006591797,
"learning_rate": 1.579472558802566e-05,
"loss": 0.6871,
"step": 4800
},
{
"epoch": 3.421240199572345,
"eval_bleu": 54.65121394864807,
"eval_chrf": 76.99318341492122,
"eval_loss": 1.1458663940429688,
"eval_runtime": 46.7948,
"eval_samples_per_second": 9.061,
"eval_steps_per_second": 1.133,
"step": 4800
},
{
"epoch": 3.4925160370634356,
"grad_norm": 4.480852127075195,
"learning_rate": 1.5081967213114755e-05,
"loss": 0.6364,
"step": 4900
},
{
"epoch": 3.563791874554526,
"grad_norm": 3.2055609226226807,
"learning_rate": 1.4369208838203848e-05,
"loss": 0.6718,
"step": 5000
},
{
"epoch": 3.6350677120456165,
"grad_norm": 3.66184663772583,
"learning_rate": 1.3656450463292945e-05,
"loss": 0.6814,
"step": 5100
},
{
"epoch": 3.706343549536707,
"grad_norm": 4.526830196380615,
"learning_rate": 1.294369208838204e-05,
"loss": 0.6839,
"step": 5200
},
{
"epoch": 3.706343549536707,
"eval_bleu": 54.43538330740564,
"eval_chrf": 76.83467170225417,
"eval_loss": 1.1387040615081787,
"eval_runtime": 47.0575,
"eval_samples_per_second": 9.01,
"eval_steps_per_second": 1.126,
"step": 5200
},
{
"epoch": 3.7776193870277974,
"grad_norm": 3.954864501953125,
"learning_rate": 1.2230933713471134e-05,
"loss": 0.7204,
"step": 5300
},
{
"epoch": 3.848895224518888,
"grad_norm": 4.9757280349731445,
"learning_rate": 1.1518175338560229e-05,
"loss": 0.6839,
"step": 5400
},
{
"epoch": 3.920171062009979,
"grad_norm": 4.421995639801025,
"learning_rate": 1.0805416963649323e-05,
"loss": 0.6234,
"step": 5500
},
{
"epoch": 3.991446899501069,
"grad_norm": 2.944256067276001,
"learning_rate": 1.0092658588738418e-05,
"loss": 0.7029,
"step": 5600
},
{
"epoch": 3.991446899501069,
"eval_bleu": 54.47230508466166,
"eval_chrf": 76.7475667588551,
"eval_loss": 1.143419861793518,
"eval_runtime": 46.7827,
"eval_samples_per_second": 9.063,
"eval_steps_per_second": 1.133,
"step": 5600
},
{
"epoch": 4.06272273699216,
"grad_norm": 4.3462114334106445,
"learning_rate": 9.379900213827513e-06,
"loss": 0.6416,
"step": 5700
},
{
"epoch": 4.13399857448325,
"grad_norm": 5.6380815505981445,
"learning_rate": 8.667141838916607e-06,
"loss": 0.6659,
"step": 5800
},
{
"epoch": 4.205274411974341,
"grad_norm": 3.9297401905059814,
"learning_rate": 7.954383464005702e-06,
"loss": 0.6263,
"step": 5900
},
{
"epoch": 4.276550249465431,
"grad_norm": 3.566382884979248,
"learning_rate": 7.2416250890947975e-06,
"loss": 0.688,
"step": 6000
},
{
"epoch": 4.276550249465431,
"eval_bleu": 54.58523394049827,
"eval_chrf": 76.97146289458047,
"eval_loss": 1.1411081552505493,
"eval_runtime": 47.418,
"eval_samples_per_second": 8.942,
"eval_steps_per_second": 1.118,
"step": 6000
},
{
"epoch": 4.3478260869565215,
"grad_norm": 3.055271625518799,
"learning_rate": 6.528866714183891e-06,
"loss": 0.6621,
"step": 6100
},
{
"epoch": 4.419101924447612,
"grad_norm": 5.575135231018066,
"learning_rate": 5.816108339272987e-06,
"loss": 0.6598,
"step": 6200
},
{
"epoch": 4.490377761938703,
"grad_norm": 2.131157159805298,
"learning_rate": 5.1033499643620815e-06,
"loss": 0.6608,
"step": 6300
},
{
"epoch": 4.5616535994297935,
"grad_norm": 5.105737209320068,
"learning_rate": 4.390591589451176e-06,
"loss": 0.6396,
"step": 6400
},
{
"epoch": 4.5616535994297935,
"eval_bleu": 54.33271899871316,
"eval_chrf": 76.81831179228314,
"eval_loss": 1.1372572183609009,
"eval_runtime": 47.3318,
"eval_samples_per_second": 8.958,
"eval_steps_per_second": 1.12,
"step": 6400
},
{
"epoch": 4.632929436920884,
"grad_norm": 4.760735988616943,
"learning_rate": 3.677833214540271e-06,
"loss": 0.6339,
"step": 6500
},
{
"epoch": 4.704205274411974,
"grad_norm": 3.7660269737243652,
"learning_rate": 2.9650748396293655e-06,
"loss": 0.63,
"step": 6600
},
{
"epoch": 4.775481111903065,
"grad_norm": 5.468283176422119,
"learning_rate": 2.2523164647184606e-06,
"loss": 0.6319,
"step": 6700
},
{
"epoch": 4.846756949394155,
"grad_norm": 11.634507179260254,
"learning_rate": 1.5395580898075552e-06,
"loss": 0.6241,
"step": 6800
},
{
"epoch": 4.846756949394155,
"eval_bleu": 54.55812484028709,
"eval_chrf": 76.97250388922113,
"eval_loss": 1.136724829673767,
"eval_runtime": 46.6989,
"eval_samples_per_second": 9.079,
"eval_steps_per_second": 1.135,
"step": 6800
},
{
"epoch": 4.918032786885246,
"grad_norm": 3.7019028663635254,
"learning_rate": 8.2679971489665e-07,
"loss": 0.6544,
"step": 6900
},
{
"epoch": 4.989308624376337,
"grad_norm": 3.0245063304901123,
"learning_rate": 1.1404133998574484e-07,
"loss": 0.7076,
"step": 7000
}
],
"logging_steps": 100,
"max_steps": 7015,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 400,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.92057231015936e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}