OH_original_wo_unreplicated / trainer_state.json
sedrickkeh's picture
End of training
2e8c69b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 2.9880159786950733,
"eval_steps": 500,
"global_step": 561,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.05326231691078562,
"grad_norm": 10.043872386995645,
"learning_rate": 5e-06,
"loss": 0.9027,
"step": 10
},
{
"epoch": 0.10652463382157124,
"grad_norm": 3.5873792834266096,
"learning_rate": 5e-06,
"loss": 0.812,
"step": 20
},
{
"epoch": 0.15978695073235685,
"grad_norm": 1.6541921693779105,
"learning_rate": 5e-06,
"loss": 0.7822,
"step": 30
},
{
"epoch": 0.21304926764314247,
"grad_norm": 2.9676058520395694,
"learning_rate": 5e-06,
"loss": 0.7606,
"step": 40
},
{
"epoch": 0.2663115845539281,
"grad_norm": 1.2283778691151885,
"learning_rate": 5e-06,
"loss": 0.7332,
"step": 50
},
{
"epoch": 0.3195739014647137,
"grad_norm": 1.5118726910174631,
"learning_rate": 5e-06,
"loss": 0.7221,
"step": 60
},
{
"epoch": 0.37283621837549935,
"grad_norm": 1.4995166965541817,
"learning_rate": 5e-06,
"loss": 0.7165,
"step": 70
},
{
"epoch": 0.42609853528628494,
"grad_norm": 1.1573105641307837,
"learning_rate": 5e-06,
"loss": 0.708,
"step": 80
},
{
"epoch": 0.4793608521970706,
"grad_norm": 0.6329039589127555,
"learning_rate": 5e-06,
"loss": 0.6902,
"step": 90
},
{
"epoch": 0.5326231691078562,
"grad_norm": 0.6967482378462392,
"learning_rate": 5e-06,
"loss": 0.6936,
"step": 100
},
{
"epoch": 0.5858854860186418,
"grad_norm": 1.0258384410770847,
"learning_rate": 5e-06,
"loss": 0.6849,
"step": 110
},
{
"epoch": 0.6391478029294274,
"grad_norm": 0.6790507748606548,
"learning_rate": 5e-06,
"loss": 0.6991,
"step": 120
},
{
"epoch": 0.6924101198402131,
"grad_norm": 0.6426377105957765,
"learning_rate": 5e-06,
"loss": 0.6802,
"step": 130
},
{
"epoch": 0.7456724367509987,
"grad_norm": 1.2825548485478584,
"learning_rate": 5e-06,
"loss": 0.6927,
"step": 140
},
{
"epoch": 0.7989347536617842,
"grad_norm": 1.2061088462661143,
"learning_rate": 5e-06,
"loss": 0.6811,
"step": 150
},
{
"epoch": 0.8521970705725699,
"grad_norm": 0.6711727236495032,
"learning_rate": 5e-06,
"loss": 0.6874,
"step": 160
},
{
"epoch": 0.9054593874833555,
"grad_norm": 0.5700786999189973,
"learning_rate": 5e-06,
"loss": 0.689,
"step": 170
},
{
"epoch": 0.9587217043941412,
"grad_norm": 0.6424611481692418,
"learning_rate": 5e-06,
"loss": 0.6759,
"step": 180
},
{
"epoch": 0.996005326231691,
"eval_loss": 0.6789066195487976,
"eval_runtime": 66.7611,
"eval_samples_per_second": 75.763,
"eval_steps_per_second": 0.599,
"step": 187
},
{
"epoch": 1.0119840213049267,
"grad_norm": 0.7982749587499836,
"learning_rate": 5e-06,
"loss": 0.6589,
"step": 190
},
{
"epoch": 1.0652463382157125,
"grad_norm": 0.7591350142806961,
"learning_rate": 5e-06,
"loss": 0.6137,
"step": 200
},
{
"epoch": 1.118508655126498,
"grad_norm": 0.7399944259795829,
"learning_rate": 5e-06,
"loss": 0.6147,
"step": 210
},
{
"epoch": 1.1717709720372835,
"grad_norm": 0.7128707470380614,
"learning_rate": 5e-06,
"loss": 0.6169,
"step": 220
},
{
"epoch": 1.2250332889480693,
"grad_norm": 0.6390434495921663,
"learning_rate": 5e-06,
"loss": 0.6171,
"step": 230
},
{
"epoch": 1.2782956058588548,
"grad_norm": 0.6835653985742169,
"learning_rate": 5e-06,
"loss": 0.6168,
"step": 240
},
{
"epoch": 1.3315579227696404,
"grad_norm": 0.7382391212184931,
"learning_rate": 5e-06,
"loss": 0.6265,
"step": 250
},
{
"epoch": 1.3848202396804261,
"grad_norm": 0.5665114450124694,
"learning_rate": 5e-06,
"loss": 0.6193,
"step": 260
},
{
"epoch": 1.4380825565912116,
"grad_norm": 0.5296907389070354,
"learning_rate": 5e-06,
"loss": 0.622,
"step": 270
},
{
"epoch": 1.4913448735019974,
"grad_norm": 0.546717125266996,
"learning_rate": 5e-06,
"loss": 0.6239,
"step": 280
},
{
"epoch": 1.544607190412783,
"grad_norm": 0.6531122146180807,
"learning_rate": 5e-06,
"loss": 0.6211,
"step": 290
},
{
"epoch": 1.5978695073235687,
"grad_norm": 0.5744613077225782,
"learning_rate": 5e-06,
"loss": 0.6193,
"step": 300
},
{
"epoch": 1.6511318242343542,
"grad_norm": 0.6725000329350213,
"learning_rate": 5e-06,
"loss": 0.6157,
"step": 310
},
{
"epoch": 1.7043941411451398,
"grad_norm": 0.5467410656311016,
"learning_rate": 5e-06,
"loss": 0.6153,
"step": 320
},
{
"epoch": 1.7576564580559255,
"grad_norm": 0.6575779238804216,
"learning_rate": 5e-06,
"loss": 0.6183,
"step": 330
},
{
"epoch": 1.810918774966711,
"grad_norm": 0.6389106370178078,
"learning_rate": 5e-06,
"loss": 0.6213,
"step": 340
},
{
"epoch": 1.8641810918774966,
"grad_norm": 0.5812488086714392,
"learning_rate": 5e-06,
"loss": 0.616,
"step": 350
},
{
"epoch": 1.9174434087882823,
"grad_norm": 0.662384247335092,
"learning_rate": 5e-06,
"loss": 0.6046,
"step": 360
},
{
"epoch": 1.9707057256990679,
"grad_norm": 0.5577067863493724,
"learning_rate": 5e-06,
"loss": 0.6133,
"step": 370
},
{
"epoch": 1.9973368841544608,
"eval_loss": 0.672442615032196,
"eval_runtime": 66.0876,
"eval_samples_per_second": 76.535,
"eval_steps_per_second": 0.605,
"step": 375
},
{
"epoch": 2.0239680426098534,
"grad_norm": 0.914487856356494,
"learning_rate": 5e-06,
"loss": 0.5838,
"step": 380
},
{
"epoch": 2.077230359520639,
"grad_norm": 1.0411320535838575,
"learning_rate": 5e-06,
"loss": 0.5542,
"step": 390
},
{
"epoch": 2.130492676431425,
"grad_norm": 0.7322670502543674,
"learning_rate": 5e-06,
"loss": 0.5535,
"step": 400
},
{
"epoch": 2.1837549933422102,
"grad_norm": 0.6759246932223949,
"learning_rate": 5e-06,
"loss": 0.5567,
"step": 410
},
{
"epoch": 2.237017310252996,
"grad_norm": 0.6733943048111352,
"learning_rate": 5e-06,
"loss": 0.5531,
"step": 420
},
{
"epoch": 2.2902796271637818,
"grad_norm": 0.5804880093973978,
"learning_rate": 5e-06,
"loss": 0.5543,
"step": 430
},
{
"epoch": 2.343541944074567,
"grad_norm": 0.6550606815593909,
"learning_rate": 5e-06,
"loss": 0.5548,
"step": 440
},
{
"epoch": 2.396804260985353,
"grad_norm": 0.7005642588379877,
"learning_rate": 5e-06,
"loss": 0.56,
"step": 450
},
{
"epoch": 2.4500665778961386,
"grad_norm": 0.6514987724499162,
"learning_rate": 5e-06,
"loss": 0.5553,
"step": 460
},
{
"epoch": 2.5033288948069243,
"grad_norm": 0.598180037970062,
"learning_rate": 5e-06,
"loss": 0.5552,
"step": 470
},
{
"epoch": 2.5565912117177096,
"grad_norm": 0.6716187874030446,
"learning_rate": 5e-06,
"loss": 0.5629,
"step": 480
},
{
"epoch": 2.6098535286284954,
"grad_norm": 0.6417933456363174,
"learning_rate": 5e-06,
"loss": 0.5632,
"step": 490
},
{
"epoch": 2.6631158455392807,
"grad_norm": 0.5930259103461313,
"learning_rate": 5e-06,
"loss": 0.5608,
"step": 500
},
{
"epoch": 2.7163781624500665,
"grad_norm": 0.5696407448709899,
"learning_rate": 5e-06,
"loss": 0.5576,
"step": 510
},
{
"epoch": 2.7696404793608522,
"grad_norm": 0.6956493623428277,
"learning_rate": 5e-06,
"loss": 0.5514,
"step": 520
},
{
"epoch": 2.822902796271638,
"grad_norm": 0.5861405184973314,
"learning_rate": 5e-06,
"loss": 0.5556,
"step": 530
},
{
"epoch": 2.8761651131824233,
"grad_norm": 0.7349516316012534,
"learning_rate": 5e-06,
"loss": 0.5602,
"step": 540
},
{
"epoch": 2.929427430093209,
"grad_norm": 0.7210951093860754,
"learning_rate": 5e-06,
"loss": 0.558,
"step": 550
},
{
"epoch": 2.982689747003995,
"grad_norm": 0.6051360012000235,
"learning_rate": 5e-06,
"loss": 0.5585,
"step": 560
},
{
"epoch": 2.9880159786950733,
"eval_loss": 0.6807268261909485,
"eval_runtime": 63.8046,
"eval_samples_per_second": 79.273,
"eval_steps_per_second": 0.627,
"step": 561
},
{
"epoch": 2.9880159786950733,
"step": 561,
"total_flos": 939277135380480.0,
"train_loss": 0.6320187783071276,
"train_runtime": 9641.2734,
"train_samples_per_second": 29.899,
"train_steps_per_second": 0.058
}
],
"logging_steps": 10,
"max_steps": 561,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 939277135380480.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}