Text2glossMarianMT / trainer_state.json
Suparnpreet's picture
Upload folder using huggingface_hub
1010106 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"eval_steps": 500,
"global_step": 21930,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11399908800729594,
"grad_norm": 1.4812531471252441,
"learning_rate": 1.9544915640674875e-05,
"loss": 0.1287,
"step": 500
},
{
"epoch": 0.22799817601459188,
"grad_norm": 2.198870897293091,
"learning_rate": 1.9088919288645692e-05,
"loss": 0.1045,
"step": 1000
},
{
"epoch": 0.34199726402188785,
"grad_norm": 1.3517733812332153,
"learning_rate": 1.863292293661651e-05,
"loss": 0.0895,
"step": 1500
},
{
"epoch": 0.45599635202918376,
"grad_norm": 1.352580189704895,
"learning_rate": 1.8176926584587324e-05,
"loss": 0.0766,
"step": 2000
},
{
"epoch": 0.5699954400364797,
"grad_norm": 1.960016131401062,
"learning_rate": 1.772093023255814e-05,
"loss": 0.0713,
"step": 2500
},
{
"epoch": 0.6839945280437757,
"grad_norm": 0.5876455903053284,
"learning_rate": 1.7264933880528958e-05,
"loss": 0.0657,
"step": 3000
},
{
"epoch": 0.7979936160510716,
"grad_norm": 0.9299254417419434,
"learning_rate": 1.6808937528499772e-05,
"loss": 0.0623,
"step": 3500
},
{
"epoch": 0.9119927040583675,
"grad_norm": 1.3892526626586914,
"learning_rate": 1.635294117647059e-05,
"loss": 0.0621,
"step": 4000
},
{
"epoch": 1.0259917920656634,
"grad_norm": 1.7030184268951416,
"learning_rate": 1.5896944824441403e-05,
"loss": 0.0585,
"step": 4500
},
{
"epoch": 1.1399908800729595,
"grad_norm": 0.9616603255271912,
"learning_rate": 1.5440948472412224e-05,
"loss": 0.0448,
"step": 5000
},
{
"epoch": 1.2539899680802553,
"grad_norm": 1.245125412940979,
"learning_rate": 1.4984952120383038e-05,
"loss": 0.0429,
"step": 5500
},
{
"epoch": 1.3679890560875512,
"grad_norm": 1.2846943140029907,
"learning_rate": 1.4528955768353854e-05,
"loss": 0.0408,
"step": 6000
},
{
"epoch": 1.4819881440948472,
"grad_norm": 1.0010948181152344,
"learning_rate": 1.407295941632467e-05,
"loss": 0.0386,
"step": 6500
},
{
"epoch": 1.5959872321021433,
"grad_norm": 1.1298205852508545,
"learning_rate": 1.3616963064295488e-05,
"loss": 0.0364,
"step": 7000
},
{
"epoch": 1.7099863201094392,
"grad_norm": 0.5932161211967468,
"learning_rate": 1.3160966712266304e-05,
"loss": 0.0344,
"step": 7500
},
{
"epoch": 1.823985408116735,
"grad_norm": 1.195202112197876,
"learning_rate": 1.270497036023712e-05,
"loss": 0.033,
"step": 8000
},
{
"epoch": 1.937984496124031,
"grad_norm": 1.2507325410842896,
"learning_rate": 1.2248974008207935e-05,
"loss": 0.0321,
"step": 8500
},
{
"epoch": 2.0519835841313268,
"grad_norm": 2.5148258209228516,
"learning_rate": 1.1792977656178753e-05,
"loss": 0.0278,
"step": 9000
},
{
"epoch": 2.165982672138623,
"grad_norm": 0.7329283356666565,
"learning_rate": 1.1336981304149568e-05,
"loss": 0.0233,
"step": 9500
},
{
"epoch": 2.279981760145919,
"grad_norm": 0.42915332317352295,
"learning_rate": 1.0880984952120384e-05,
"loss": 0.0226,
"step": 10000
},
{
"epoch": 2.3939808481532148,
"grad_norm": 0.7090346813201904,
"learning_rate": 1.04249886000912e-05,
"loss": 0.0223,
"step": 10500
},
{
"epoch": 2.5079799361605106,
"grad_norm": 0.6480665802955627,
"learning_rate": 9.968992248062017e-06,
"loss": 0.0204,
"step": 11000
},
{
"epoch": 2.621979024167807,
"grad_norm": 1.7029985189437866,
"learning_rate": 9.512995896032832e-06,
"loss": 0.0221,
"step": 11500
},
{
"epoch": 2.7359781121751023,
"grad_norm": 0.9722331166267395,
"learning_rate": 9.056999544003648e-06,
"loss": 0.0208,
"step": 12000
},
{
"epoch": 2.8499772001823986,
"grad_norm": 0.3082011938095093,
"learning_rate": 8.601003191974465e-06,
"loss": 0.0204,
"step": 12500
},
{
"epoch": 2.9639762881896945,
"grad_norm": 0.33128827810287476,
"learning_rate": 8.145006839945281e-06,
"loss": 0.0207,
"step": 13000
},
{
"epoch": 3.0779753761969904,
"grad_norm": 0.7698410749435425,
"learning_rate": 7.689010487916098e-06,
"loss": 0.0171,
"step": 13500
},
{
"epoch": 3.191974464204286,
"grad_norm": 0.7875366806983948,
"learning_rate": 7.233014135886913e-06,
"loss": 0.0156,
"step": 14000
},
{
"epoch": 3.305973552211582,
"grad_norm": 0.6971263289451599,
"learning_rate": 6.77701778385773e-06,
"loss": 0.0157,
"step": 14500
},
{
"epoch": 3.4199726402188784,
"grad_norm": 0.464100182056427,
"learning_rate": 6.321021431828546e-06,
"loss": 0.0143,
"step": 15000
},
{
"epoch": 3.5339717282261742,
"grad_norm": 0.5803436636924744,
"learning_rate": 5.8650250797993626e-06,
"loss": 0.0147,
"step": 15500
},
{
"epoch": 3.64797081623347,
"grad_norm": 0.7879688739776611,
"learning_rate": 5.409028727770178e-06,
"loss": 0.0137,
"step": 16000
},
{
"epoch": 3.761969904240766,
"grad_norm": 1.0888534784317017,
"learning_rate": 4.953032375740995e-06,
"loss": 0.0139,
"step": 16500
},
{
"epoch": 3.875968992248062,
"grad_norm": 0.3893286883831024,
"learning_rate": 4.49703602371181e-06,
"loss": 0.0137,
"step": 17000
},
{
"epoch": 3.989968080255358,
"grad_norm": 0.4917721748352051,
"learning_rate": 4.041039671682627e-06,
"loss": 0.0136,
"step": 17500
},
{
"epoch": 4.1039671682626535,
"grad_norm": 1.25238835811615,
"learning_rate": 3.585043319653443e-06,
"loss": 0.0117,
"step": 18000
},
{
"epoch": 4.21796625626995,
"grad_norm": 0.9064317941665649,
"learning_rate": 3.129046967624259e-06,
"loss": 0.0108,
"step": 18500
},
{
"epoch": 4.331965344277246,
"grad_norm": 0.4522368311882019,
"learning_rate": 2.6730506155950754e-06,
"loss": 0.0105,
"step": 19000
},
{
"epoch": 4.4459644322845415,
"grad_norm": 1.0190762281417847,
"learning_rate": 2.217054263565892e-06,
"loss": 0.0108,
"step": 19500
},
{
"epoch": 4.559963520291838,
"grad_norm": 0.23909200727939606,
"learning_rate": 1.7610579115367079e-06,
"loss": 0.0114,
"step": 20000
},
{
"epoch": 4.673962608299133,
"grad_norm": 0.8679990768432617,
"learning_rate": 1.3050615595075241e-06,
"loss": 0.0105,
"step": 20500
},
{
"epoch": 4.7879616963064295,
"grad_norm": 0.4421948790550232,
"learning_rate": 8.490652074783402e-07,
"loss": 0.0109,
"step": 21000
},
{
"epoch": 4.901960784313726,
"grad_norm": 1.2229384183883667,
"learning_rate": 3.930688554491564e-07,
"loss": 0.0103,
"step": 21500
}
],
"logging_steps": 500,
"max_steps": 21930,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2352410932543488.0,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}