codellama_setting / checkpoint-30 /trainer_state.json
Kei5uke's picture
Training in progress, step 30, checkpoint
19057a4 verified
raw
history blame
8.04 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.01860176716788095,
"eval_steps": 3,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006200589055960316,
"grad_norm": 0.18599237501621246,
"learning_rate": 4.000000000000001e-06,
"loss": 0.9319,
"step": 1
},
{
"epoch": 0.0012401178111920632,
"grad_norm": 0.09086523950099945,
"learning_rate": 8.000000000000001e-06,
"loss": 0.6463,
"step": 2
},
{
"epoch": 0.0018601767167880949,
"grad_norm": 0.09325356036424637,
"learning_rate": 1.2e-05,
"loss": 0.575,
"step": 3
},
{
"epoch": 0.0018601767167880949,
"eval_loss": 0.7272388935089111,
"eval_runtime": 33.9288,
"eval_samples_per_second": 2.947,
"eval_steps_per_second": 2.947,
"step": 3
},
{
"epoch": 0.0024802356223841263,
"grad_norm": 0.09707680344581604,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.7038,
"step": 4
},
{
"epoch": 0.0031002945279801583,
"grad_norm": 0.10810782760381699,
"learning_rate": 2e-05,
"loss": 0.5471,
"step": 5
},
{
"epoch": 0.0037203534335761897,
"grad_norm": 0.13030411303043365,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.6735,
"step": 6
},
{
"epoch": 0.0037203534335761897,
"eval_loss": 0.7256982922554016,
"eval_runtime": 33.9528,
"eval_samples_per_second": 2.945,
"eval_steps_per_second": 2.945,
"step": 6
},
{
"epoch": 0.004340412339172222,
"grad_norm": 0.12336976826190948,
"learning_rate": 1.8400000000000003e-05,
"loss": 0.654,
"step": 7
},
{
"epoch": 0.004960471244768253,
"grad_norm": 0.16046489775180817,
"learning_rate": 1.76e-05,
"loss": 0.6301,
"step": 8
},
{
"epoch": 0.005580530150364285,
"grad_norm": 0.11589628458023071,
"learning_rate": 1.6800000000000002e-05,
"loss": 0.5326,
"step": 9
},
{
"epoch": 0.005580530150364285,
"eval_loss": 0.723076343536377,
"eval_runtime": 34.5303,
"eval_samples_per_second": 2.896,
"eval_steps_per_second": 2.896,
"step": 9
},
{
"epoch": 0.0062005890559603165,
"grad_norm": 0.12244553864002228,
"learning_rate": 1.6000000000000003e-05,
"loss": 0.8055,
"step": 10
},
{
"epoch": 0.0068206479615563476,
"grad_norm": 0.10270956158638,
"learning_rate": 1.5200000000000002e-05,
"loss": 0.5492,
"step": 11
},
{
"epoch": 0.0074407068671523795,
"grad_norm": 0.14439155161380768,
"learning_rate": 1.4400000000000001e-05,
"loss": 0.8579,
"step": 12
},
{
"epoch": 0.0074407068671523795,
"eval_loss": 0.7205619215965271,
"eval_runtime": 33.7297,
"eval_samples_per_second": 2.965,
"eval_steps_per_second": 2.965,
"step": 12
},
{
"epoch": 0.008060765772748411,
"grad_norm": 0.1271936148405075,
"learning_rate": 1.3600000000000002e-05,
"loss": 0.6923,
"step": 13
},
{
"epoch": 0.008680824678344443,
"grad_norm": 0.092640720307827,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.5983,
"step": 14
},
{
"epoch": 0.009300883583940475,
"grad_norm": 0.1161455512046814,
"learning_rate": 1.2e-05,
"loss": 0.6386,
"step": 15
},
{
"epoch": 0.009300883583940475,
"eval_loss": 0.7178443074226379,
"eval_runtime": 33.5878,
"eval_samples_per_second": 2.977,
"eval_steps_per_second": 2.977,
"step": 15
},
{
"epoch": 0.009920942489536505,
"grad_norm": 0.09220079332590103,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.4665,
"step": 16
},
{
"epoch": 0.010541001395132537,
"grad_norm": 0.08713295310735703,
"learning_rate": 1.04e-05,
"loss": 0.6114,
"step": 17
},
{
"epoch": 0.01116106030072857,
"grad_norm": 0.11913106590509415,
"learning_rate": 9.600000000000001e-06,
"loss": 0.5703,
"step": 18
},
{
"epoch": 0.01116106030072857,
"eval_loss": 0.7152560949325562,
"eval_runtime": 33.4806,
"eval_samples_per_second": 2.987,
"eval_steps_per_second": 2.987,
"step": 18
},
{
"epoch": 0.011781119206324601,
"grad_norm": 0.11837327480316162,
"learning_rate": 8.8e-06,
"loss": 0.6049,
"step": 19
},
{
"epoch": 0.012401178111920633,
"grad_norm": 0.08001928776502609,
"learning_rate": 8.000000000000001e-06,
"loss": 0.4691,
"step": 20
},
{
"epoch": 0.013021237017516665,
"grad_norm": 0.11291633546352386,
"learning_rate": 7.2000000000000005e-06,
"loss": 0.6752,
"step": 21
},
{
"epoch": 0.013021237017516665,
"eval_loss": 0.7128345370292664,
"eval_runtime": 34.2622,
"eval_samples_per_second": 2.919,
"eval_steps_per_second": 2.919,
"step": 21
},
{
"epoch": 0.013641295923112695,
"grad_norm": 0.1115325391292572,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.5874,
"step": 22
},
{
"epoch": 0.014261354828708727,
"grad_norm": 0.1305752545595169,
"learning_rate": 5.600000000000001e-06,
"loss": 0.7733,
"step": 23
},
{
"epoch": 0.014881413734304759,
"grad_norm": 0.10779058188199997,
"learning_rate": 4.800000000000001e-06,
"loss": 0.6217,
"step": 24
},
{
"epoch": 0.014881413734304759,
"eval_loss": 0.7108609080314636,
"eval_runtime": 34.6034,
"eval_samples_per_second": 2.89,
"eval_steps_per_second": 2.89,
"step": 24
},
{
"epoch": 0.01550147263990079,
"grad_norm": 0.11457020789384842,
"learning_rate": 4.000000000000001e-06,
"loss": 0.5838,
"step": 25
},
{
"epoch": 0.016121531545496823,
"grad_norm": 0.1093108132481575,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.504,
"step": 26
},
{
"epoch": 0.016741590451092855,
"grad_norm": 0.11828811466693878,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.5179,
"step": 27
},
{
"epoch": 0.016741590451092855,
"eval_loss": 0.7094972133636475,
"eval_runtime": 34.2275,
"eval_samples_per_second": 2.922,
"eval_steps_per_second": 2.922,
"step": 27
},
{
"epoch": 0.017361649356688887,
"grad_norm": 0.09534204006195068,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.4988,
"step": 28
},
{
"epoch": 0.01798170826228492,
"grad_norm": 0.12317203730344772,
"learning_rate": 8.000000000000001e-07,
"loss": 0.6945,
"step": 29
},
{
"epoch": 0.01860176716788095,
"grad_norm": 0.13696961104869843,
"learning_rate": 0.0,
"loss": 0.5378,
"step": 30
},
{
"epoch": 0.01860176716788095,
"eval_loss": 0.7089178562164307,
"eval_runtime": 33.3831,
"eval_samples_per_second": 2.996,
"eval_steps_per_second": 2.996,
"step": 30
}
],
"logging_steps": 1,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 4.029866007950131e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}