TCS_MLM / trainer_state.json
mgh6's picture
Training in progress, step 70000
e630e90 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 8.6665325513378,
"eval_steps": 5000,
"global_step": 70000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.12,
"learning_rate": 9.958730551772523e-05,
"loss": 1.4035,
"step": 1000
},
{
"epoch": 0.25,
"learning_rate": 9.917461103545046e-05,
"loss": 1.332,
"step": 2000
},
{
"epoch": 0.37,
"learning_rate": 9.87619165531757e-05,
"loss": 1.2981,
"step": 3000
},
{
"epoch": 0.5,
"learning_rate": 9.834922207090091e-05,
"loss": 1.272,
"step": 4000
},
{
"epoch": 0.62,
"learning_rate": 9.793652758862615e-05,
"loss": 1.2501,
"step": 5000
},
{
"epoch": 0.62,
"eval_loss": 1.1940463781356812,
"eval_runtime": 120.3265,
"eval_samples_per_second": 134.692,
"eval_steps_per_second": 16.838,
"step": 5000
},
{
"epoch": 0.74,
"learning_rate": 9.752383310635137e-05,
"loss": 1.2328,
"step": 6000
},
{
"epoch": 0.87,
"learning_rate": 9.71111386240766e-05,
"loss": 1.208,
"step": 7000
},
{
"epoch": 0.99,
"learning_rate": 9.669844414180183e-05,
"loss": 1.1886,
"step": 8000
},
{
"epoch": 1.11,
"learning_rate": 9.628574965952706e-05,
"loss": 1.182,
"step": 9000
},
{
"epoch": 1.24,
"learning_rate": 9.587305517725228e-05,
"loss": 1.1761,
"step": 10000
},
{
"epoch": 1.24,
"eval_loss": 1.1365982294082642,
"eval_runtime": 124.7536,
"eval_samples_per_second": 129.912,
"eval_steps_per_second": 16.24,
"step": 10000
},
{
"epoch": 1.36,
"learning_rate": 9.546036069497752e-05,
"loss": 1.1594,
"step": 11000
},
{
"epoch": 1.49,
"learning_rate": 9.504766621270275e-05,
"loss": 1.1449,
"step": 12000
},
{
"epoch": 1.61,
"learning_rate": 9.463497173042796e-05,
"loss": 1.1387,
"step": 13000
},
{
"epoch": 1.73,
"learning_rate": 9.422227724815321e-05,
"loss": 1.1318,
"step": 14000
},
{
"epoch": 1.86,
"learning_rate": 9.380958276587842e-05,
"loss": 1.1222,
"step": 15000
},
{
"epoch": 1.86,
"eval_loss": 1.103798270225525,
"eval_runtime": 119.9985,
"eval_samples_per_second": 135.06,
"eval_steps_per_second": 16.884,
"step": 15000
},
{
"epoch": 1.98,
"learning_rate": 9.339688828360365e-05,
"loss": 1.1164,
"step": 16000
},
{
"epoch": 2.1,
"learning_rate": 9.298419380132888e-05,
"loss": 1.1073,
"step": 17000
},
{
"epoch": 2.23,
"learning_rate": 9.257149931905411e-05,
"loss": 1.1026,
"step": 18000
},
{
"epoch": 2.35,
"learning_rate": 9.215880483677933e-05,
"loss": 1.0957,
"step": 19000
},
{
"epoch": 2.48,
"learning_rate": 9.174611035450457e-05,
"loss": 1.0859,
"step": 20000
},
{
"epoch": 2.48,
"eval_loss": 1.0691076517105103,
"eval_runtime": 120.0736,
"eval_samples_per_second": 134.976,
"eval_steps_per_second": 16.873,
"step": 20000
},
{
"epoch": 2.6,
"learning_rate": 9.133341587222979e-05,
"loss": 1.0856,
"step": 21000
},
{
"epoch": 2.72,
"learning_rate": 9.092072138995502e-05,
"loss": 1.0769,
"step": 22000
},
{
"epoch": 2.85,
"learning_rate": 9.050802690768025e-05,
"loss": 1.0768,
"step": 23000
},
{
"epoch": 2.97,
"learning_rate": 9.009533242540548e-05,
"loss": 1.0671,
"step": 24000
},
{
"epoch": 3.09,
"learning_rate": 8.968263794313071e-05,
"loss": 1.0703,
"step": 25000
},
{
"epoch": 3.09,
"eval_loss": 1.0342341661453247,
"eval_runtime": 142.3338,
"eval_samples_per_second": 113.866,
"eval_steps_per_second": 14.234,
"step": 25000
},
{
"epoch": 3.22,
"learning_rate": 8.926994346085594e-05,
"loss": 1.0535,
"step": 26000
},
{
"epoch": 3.34,
"learning_rate": 8.885724897858117e-05,
"loss": 1.0537,
"step": 27000
},
{
"epoch": 3.47,
"learning_rate": 8.844455449630638e-05,
"loss": 1.0502,
"step": 28000
},
{
"epoch": 3.59,
"learning_rate": 8.803186001403163e-05,
"loss": 1.0401,
"step": 29000
},
{
"epoch": 3.71,
"learning_rate": 8.761916553175684e-05,
"loss": 1.0461,
"step": 30000
},
{
"epoch": 3.71,
"eval_loss": 1.0171171426773071,
"eval_runtime": 155.2187,
"eval_samples_per_second": 104.414,
"eval_steps_per_second": 13.053,
"step": 30000
},
{
"epoch": 3.84,
"learning_rate": 8.720647104948207e-05,
"loss": 1.0362,
"step": 31000
},
{
"epoch": 3.96,
"learning_rate": 8.67937765672073e-05,
"loss": 1.0332,
"step": 32000
},
{
"epoch": 4.09,
"learning_rate": 8.638108208493253e-05,
"loss": 1.0293,
"step": 33000
},
{
"epoch": 4.21,
"learning_rate": 8.596838760265775e-05,
"loss": 1.0286,
"step": 34000
},
{
"epoch": 4.33,
"learning_rate": 8.555569312038299e-05,
"loss": 1.025,
"step": 35000
},
{
"epoch": 4.33,
"eval_loss": 0.9936763048171997,
"eval_runtime": 119.72,
"eval_samples_per_second": 135.374,
"eval_steps_per_second": 16.923,
"step": 35000
},
{
"epoch": 4.46,
"learning_rate": 8.514299863810822e-05,
"loss": 1.0184,
"step": 36000
},
{
"epoch": 4.58,
"learning_rate": 8.473030415583344e-05,
"loss": 1.0136,
"step": 37000
},
{
"epoch": 4.7,
"learning_rate": 8.431760967355867e-05,
"loss": 1.0103,
"step": 38000
},
{
"epoch": 4.83,
"learning_rate": 8.39049151912839e-05,
"loss": 1.0081,
"step": 39000
},
{
"epoch": 4.95,
"learning_rate": 8.349222070900913e-05,
"loss": 1.0063,
"step": 40000
},
{
"epoch": 4.95,
"eval_loss": 0.9914126396179199,
"eval_runtime": 176.7148,
"eval_samples_per_second": 91.713,
"eval_steps_per_second": 11.465,
"step": 40000
},
{
"epoch": 5.08,
"learning_rate": 8.307952622673436e-05,
"loss": 1.003,
"step": 41000
},
{
"epoch": 5.2,
"learning_rate": 8.266683174445959e-05,
"loss": 0.9947,
"step": 42000
},
{
"epoch": 5.32,
"learning_rate": 8.22541372621848e-05,
"loss": 0.999,
"step": 43000
},
{
"epoch": 5.45,
"learning_rate": 8.184144277991003e-05,
"loss": 0.9987,
"step": 44000
},
{
"epoch": 5.57,
"learning_rate": 8.142874829763526e-05,
"loss": 0.988,
"step": 45000
},
{
"epoch": 5.57,
"eval_loss": 0.9876496195793152,
"eval_runtime": 149.4553,
"eval_samples_per_second": 108.44,
"eval_steps_per_second": 13.556,
"step": 45000
},
{
"epoch": 5.69,
"learning_rate": 8.101605381536049e-05,
"loss": 0.9899,
"step": 46000
},
{
"epoch": 5.82,
"learning_rate": 8.060335933308572e-05,
"loss": 0.9876,
"step": 47000
},
{
"epoch": 5.94,
"learning_rate": 8.019066485081095e-05,
"loss": 0.9889,
"step": 48000
},
{
"epoch": 6.07,
"learning_rate": 7.977797036853618e-05,
"loss": 0.9807,
"step": 49000
},
{
"epoch": 6.19,
"learning_rate": 7.93652758862614e-05,
"loss": 0.971,
"step": 50000
},
{
"epoch": 6.19,
"eval_loss": 0.9691153764724731,
"eval_runtime": 155.3931,
"eval_samples_per_second": 104.297,
"eval_steps_per_second": 13.038,
"step": 50000
},
{
"epoch": 6.31,
"learning_rate": 7.895258140398664e-05,
"loss": 0.9796,
"step": 51000
},
{
"epoch": 6.44,
"learning_rate": 7.853988692171186e-05,
"loss": 0.9784,
"step": 52000
},
{
"epoch": 6.56,
"learning_rate": 7.812719243943709e-05,
"loss": 0.9682,
"step": 53000
},
{
"epoch": 6.69,
"learning_rate": 7.771449795716232e-05,
"loss": 0.9609,
"step": 54000
},
{
"epoch": 6.81,
"learning_rate": 7.730180347488755e-05,
"loss": 0.9704,
"step": 55000
},
{
"epoch": 6.81,
"eval_loss": 0.9535136222839355,
"eval_runtime": 160.7447,
"eval_samples_per_second": 100.824,
"eval_steps_per_second": 12.604,
"step": 55000
},
{
"epoch": 6.93,
"learning_rate": 7.688910899261278e-05,
"loss": 0.965,
"step": 56000
},
{
"epoch": 7.06,
"learning_rate": 7.6476414510338e-05,
"loss": 0.9574,
"step": 57000
},
{
"epoch": 7.18,
"learning_rate": 7.606372002806322e-05,
"loss": 0.9565,
"step": 58000
},
{
"epoch": 7.3,
"learning_rate": 7.565102554578845e-05,
"loss": 0.9514,
"step": 59000
},
{
"epoch": 7.43,
"learning_rate": 7.52383310635137e-05,
"loss": 0.951,
"step": 60000
},
{
"epoch": 7.43,
"eval_loss": 0.9359220266342163,
"eval_runtime": 124.2464,
"eval_samples_per_second": 130.442,
"eval_steps_per_second": 16.306,
"step": 60000
},
{
"epoch": 7.55,
"learning_rate": 7.482563658123891e-05,
"loss": 0.9477,
"step": 61000
},
{
"epoch": 7.68,
"learning_rate": 7.441294209896414e-05,
"loss": 0.9581,
"step": 62000
},
{
"epoch": 7.8,
"learning_rate": 7.400024761668937e-05,
"loss": 0.948,
"step": 63000
},
{
"epoch": 7.92,
"learning_rate": 7.35875531344146e-05,
"loss": 0.9439,
"step": 64000
},
{
"epoch": 8.05,
"learning_rate": 7.317485865213982e-05,
"loss": 0.9447,
"step": 65000
},
{
"epoch": 8.05,
"eval_loss": 0.9360346794128418,
"eval_runtime": 160.0152,
"eval_samples_per_second": 101.284,
"eval_steps_per_second": 12.661,
"step": 65000
},
{
"epoch": 8.17,
"learning_rate": 7.276216416986506e-05,
"loss": 0.9377,
"step": 66000
},
{
"epoch": 8.29,
"learning_rate": 7.234946968759027e-05,
"loss": 0.9455,
"step": 67000
},
{
"epoch": 8.42,
"learning_rate": 7.19367752053155e-05,
"loss": 0.9332,
"step": 68000
},
{
"epoch": 8.54,
"learning_rate": 7.152408072304073e-05,
"loss": 0.9374,
"step": 69000
},
{
"epoch": 8.67,
"learning_rate": 7.111138624076596e-05,
"loss": 0.9512,
"step": 70000
},
{
"epoch": 8.67,
"eval_loss": 0.930946946144104,
"eval_runtime": 132.6824,
"eval_samples_per_second": 122.149,
"eval_steps_per_second": 15.27,
"step": 70000
}
],
"logging_steps": 1000,
"max_steps": 242310,
"num_input_tokens_seen": 0,
"num_train_epochs": 30,
"save_steps": 500,
"total_flos": 3.5013441751675136e+17,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}