Bespoke-Stratos-17k_sedrick / trainer_state.json
sedrickkeh's picture
End of training
b686614 verified
raw
history blame
6.12 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9221556886227544,
"eval_steps": 500,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.09580838323353294,
"grad_norm": 6.092118256865555,
"learning_rate": 2.6666666666666667e-05,
"loss": 0.9282,
"step": 1
},
{
"epoch": 0.19161676646706588,
"grad_norm": 6.0711467727842345,
"learning_rate": 5.333333333333333e-05,
"loss": 0.9226,
"step": 2
},
{
"epoch": 0.2874251497005988,
"grad_norm": 6.905663723425974,
"learning_rate": 8e-05,
"loss": 0.9058,
"step": 3
},
{
"epoch": 0.38323353293413176,
"grad_norm": 8.1023220637387,
"learning_rate": 7.972953430967773e-05,
"loss": 0.961,
"step": 4
},
{
"epoch": 0.47904191616766467,
"grad_norm": 7.64825728038495,
"learning_rate": 7.892179482319297e-05,
"loss": 0.9535,
"step": 5
},
{
"epoch": 0.5748502994011976,
"grad_norm": 7.401853508914865,
"learning_rate": 7.758770483143634e-05,
"loss": 0.8516,
"step": 6
},
{
"epoch": 0.6706586826347305,
"grad_norm": 5.104892027037074,
"learning_rate": 7.57453056129365e-05,
"loss": 0.8037,
"step": 7
},
{
"epoch": 0.7664670658682635,
"grad_norm": 17.072975173094324,
"learning_rate": 7.341951245651747e-05,
"loss": 0.7848,
"step": 8
},
{
"epoch": 0.8622754491017964,
"grad_norm": 77.57628644353116,
"learning_rate": 7.064177772475912e-05,
"loss": 0.9585,
"step": 9
},
{
"epoch": 0.9580838323353293,
"grad_norm": 855.0346126388312,
"learning_rate": 6.744966551474936e-05,
"loss": 2.4753,
"step": 10
},
{
"epoch": 1.0778443113772456,
"grad_norm": 32.70725612303202,
"learning_rate": 6.388634366811146e-05,
"loss": 1.6476,
"step": 11
},
{
"epoch": 1.1736526946107784,
"grad_norm": 4.481076690988672,
"learning_rate": 6.000000000000001e-05,
"loss": 0.839,
"step": 12
},
{
"epoch": 1.2694610778443114,
"grad_norm": 1.9558613995916376,
"learning_rate": 5.584319064156628e-05,
"loss": 0.7537,
"step": 13
},
{
"epoch": 1.3652694610778444,
"grad_norm": 2.372911388804447,
"learning_rate": 5.1472129308443616e-05,
"loss": 0.7187,
"step": 14
},
{
"epoch": 1.4610778443113772,
"grad_norm": 1.25855371429305,
"learning_rate": 4.694592710667723e-05,
"loss": 0.6989,
"step": 15
},
{
"epoch": 1.55688622754491,
"grad_norm": 1.5943802076498133,
"learning_rate": 4.2325793156419035e-05,
"loss": 0.6719,
"step": 16
},
{
"epoch": 1.6526946107784433,
"grad_norm": 1.3815244957013615,
"learning_rate": 3.767420684358097e-05,
"loss": 0.6559,
"step": 17
},
{
"epoch": 1.748502994011976,
"grad_norm": 0.9766584468675148,
"learning_rate": 3.305407289332279e-05,
"loss": 0.6433,
"step": 18
},
{
"epoch": 1.8443113772455089,
"grad_norm": 1.1460122230416192,
"learning_rate": 2.8527870691556404e-05,
"loss": 0.6438,
"step": 19
},
{
"epoch": 1.9401197604790419,
"grad_norm": 0.8803600726680791,
"learning_rate": 2.4156809358433728e-05,
"loss": 0.6276,
"step": 20
},
{
"epoch": 2.059880239520958,
"grad_norm": 1.2634200332675958,
"learning_rate": 2.0000000000000012e-05,
"loss": 1.1023,
"step": 21
},
{
"epoch": 2.155688622754491,
"grad_norm": 0.6983275924487673,
"learning_rate": 1.6113656331888563e-05,
"loss": 0.6023,
"step": 22
},
{
"epoch": 2.251497005988024,
"grad_norm": 0.7061752632283301,
"learning_rate": 1.2550334485250661e-05,
"loss": 0.6064,
"step": 23
},
{
"epoch": 2.3473053892215567,
"grad_norm": 0.5099830797999658,
"learning_rate": 9.358222275240884e-06,
"loss": 0.5896,
"step": 24
},
{
"epoch": 2.44311377245509,
"grad_norm": 0.5764311304808725,
"learning_rate": 6.58048754348255e-06,
"loss": 0.5967,
"step": 25
},
{
"epoch": 2.538922155688623,
"grad_norm": 0.49769875157579624,
"learning_rate": 4.254694387063514e-06,
"loss": 0.5787,
"step": 26
},
{
"epoch": 2.6347305389221556,
"grad_norm": 0.41906641581877,
"learning_rate": 2.4122951685636674e-06,
"loss": 0.5932,
"step": 27
},
{
"epoch": 2.730538922155689,
"grad_norm": 0.33140868867277057,
"learning_rate": 1.0782051768070477e-06,
"loss": 0.5794,
"step": 28
},
{
"epoch": 2.8263473053892216,
"grad_norm": 0.29806851256209266,
"learning_rate": 2.704656903222791e-07,
"loss": 0.5819,
"step": 29
},
{
"epoch": 2.9221556886227544,
"grad_norm": 0.2754279265834988,
"learning_rate": 0.0,
"loss": 0.5796,
"step": 30
},
{
"epoch": 2.9221556886227544,
"step": 30,
"total_flos": 7.719883132832317e+17,
"train_loss": 0.8285216629505158,
"train_runtime": 4905.9512,
"train_samples_per_second": 3.25,
"train_steps_per_second": 0.006
}
],
"logging_steps": 1.0,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 7.719883132832317e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}