llama3_setting / checkpoint-30 /trainer_state.json
Kei5uke's picture
Training in progress, step 30, checkpoint
e41a6f7 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.01860176716788095,
"eval_steps": 3,
"global_step": 30,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0006200589055960316,
"grad_norm": 0.7607094645500183,
"learning_rate": 4.000000000000001e-06,
"loss": 1.1893,
"step": 1
},
{
"epoch": 0.0012401178111920632,
"grad_norm": 0.3518824279308319,
"learning_rate": 8.000000000000001e-06,
"loss": 0.9752,
"step": 2
},
{
"epoch": 0.0018601767167880949,
"grad_norm": 0.4024870693683624,
"learning_rate": 1.2e-05,
"loss": 0.8467,
"step": 3
},
{
"epoch": 0.0018601767167880949,
"eval_loss": 1.0757174491882324,
"eval_runtime": 26.414,
"eval_samples_per_second": 3.786,
"eval_steps_per_second": 3.786,
"step": 3
},
{
"epoch": 0.0024802356223841263,
"grad_norm": 0.36653462052345276,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.0056,
"step": 4
},
{
"epoch": 0.0031002945279801583,
"grad_norm": 0.47098514437675476,
"learning_rate": 2e-05,
"loss": 0.8545,
"step": 5
},
{
"epoch": 0.0037203534335761897,
"grad_norm": 0.5030912160873413,
"learning_rate": 1.9200000000000003e-05,
"loss": 0.9553,
"step": 6
},
{
"epoch": 0.0037203534335761897,
"eval_loss": 1.0662517547607422,
"eval_runtime": 26.7124,
"eval_samples_per_second": 3.744,
"eval_steps_per_second": 3.744,
"step": 6
},
{
"epoch": 0.004340412339172222,
"grad_norm": 0.4823940694332123,
"learning_rate": 1.8400000000000003e-05,
"loss": 0.9266,
"step": 7
},
{
"epoch": 0.004960471244768253,
"grad_norm": 0.5647093653678894,
"learning_rate": 1.76e-05,
"loss": 1.0013,
"step": 8
},
{
"epoch": 0.005580530150364285,
"grad_norm": 0.4058099687099457,
"learning_rate": 1.6800000000000002e-05,
"loss": 0.9206,
"step": 9
},
{
"epoch": 0.005580530150364285,
"eval_loss": 1.0476710796356201,
"eval_runtime": 26.7708,
"eval_samples_per_second": 3.735,
"eval_steps_per_second": 3.735,
"step": 9
},
{
"epoch": 0.0062005890559603165,
"grad_norm": 0.44608208537101746,
"learning_rate": 1.6000000000000003e-05,
"loss": 1.0937,
"step": 10
},
{
"epoch": 0.0068206479615563476,
"grad_norm": 0.7088199853897095,
"learning_rate": 1.5200000000000002e-05,
"loss": 0.8915,
"step": 11
},
{
"epoch": 0.0074407068671523795,
"grad_norm": 0.5858705639839172,
"learning_rate": 1.4400000000000001e-05,
"loss": 1.186,
"step": 12
},
{
"epoch": 0.0074407068671523795,
"eval_loss": 1.0301631689071655,
"eval_runtime": 27.1639,
"eval_samples_per_second": 3.681,
"eval_steps_per_second": 3.681,
"step": 12
},
{
"epoch": 0.008060765772748411,
"grad_norm": 0.5912883281707764,
"learning_rate": 1.3600000000000002e-05,
"loss": 1.0082,
"step": 13
},
{
"epoch": 0.008680824678344443,
"grad_norm": 0.42219021916389465,
"learning_rate": 1.2800000000000001e-05,
"loss": 0.865,
"step": 14
},
{
"epoch": 0.009300883583940475,
"grad_norm": 0.4253869652748108,
"learning_rate": 1.2e-05,
"loss": 0.8849,
"step": 15
},
{
"epoch": 0.009300883583940475,
"eval_loss": 1.012740135192871,
"eval_runtime": 27.032,
"eval_samples_per_second": 3.699,
"eval_steps_per_second": 3.699,
"step": 15
},
{
"epoch": 0.009920942489536505,
"grad_norm": 0.5621253252029419,
"learning_rate": 1.1200000000000001e-05,
"loss": 0.8289,
"step": 16
},
{
"epoch": 0.010541001395132537,
"grad_norm": 0.37459954619407654,
"learning_rate": 1.04e-05,
"loss": 0.9139,
"step": 17
},
{
"epoch": 0.01116106030072857,
"grad_norm": 0.448284387588501,
"learning_rate": 9.600000000000001e-06,
"loss": 0.8571,
"step": 18
},
{
"epoch": 0.01116106030072857,
"eval_loss": 0.99790358543396,
"eval_runtime": 26.9342,
"eval_samples_per_second": 3.713,
"eval_steps_per_second": 3.713,
"step": 18
},
{
"epoch": 0.011781119206324601,
"grad_norm": 0.5347204208374023,
"learning_rate": 8.8e-06,
"loss": 0.8396,
"step": 19
},
{
"epoch": 0.012401178111920633,
"grad_norm": 0.36863428354263306,
"learning_rate": 8.000000000000001e-06,
"loss": 0.6704,
"step": 20
},
{
"epoch": 0.013021237017516665,
"grad_norm": 0.4834197163581848,
"learning_rate": 7.2000000000000005e-06,
"loss": 0.9445,
"step": 21
},
{
"epoch": 0.013021237017516665,
"eval_loss": 0.9843659996986389,
"eval_runtime": 27.0084,
"eval_samples_per_second": 3.703,
"eval_steps_per_second": 3.703,
"step": 21
},
{
"epoch": 0.013641295923112695,
"grad_norm": 0.5544454455375671,
"learning_rate": 6.4000000000000006e-06,
"loss": 0.9225,
"step": 22
},
{
"epoch": 0.014261354828708727,
"grad_norm": 0.6251612901687622,
"learning_rate": 5.600000000000001e-06,
"loss": 1.0261,
"step": 23
},
{
"epoch": 0.014881413734304759,
"grad_norm": 0.5012588500976562,
"learning_rate": 4.800000000000001e-06,
"loss": 0.8526,
"step": 24
},
{
"epoch": 0.014881413734304759,
"eval_loss": 0.9725684523582458,
"eval_runtime": 26.7432,
"eval_samples_per_second": 3.739,
"eval_steps_per_second": 3.739,
"step": 24
},
{
"epoch": 0.01550147263990079,
"grad_norm": 0.5470224618911743,
"learning_rate": 4.000000000000001e-06,
"loss": 0.9036,
"step": 25
},
{
"epoch": 0.016121531545496823,
"grad_norm": 0.7129680514335632,
"learning_rate": 3.2000000000000003e-06,
"loss": 0.7822,
"step": 26
},
{
"epoch": 0.016741590451092855,
"grad_norm": 0.6765540838241577,
"learning_rate": 2.4000000000000003e-06,
"loss": 0.7982,
"step": 27
},
{
"epoch": 0.016741590451092855,
"eval_loss": 0.965038537979126,
"eval_runtime": 27.034,
"eval_samples_per_second": 3.699,
"eval_steps_per_second": 3.699,
"step": 27
},
{
"epoch": 0.017361649356688887,
"grad_norm": 0.6408655643463135,
"learning_rate": 1.6000000000000001e-06,
"loss": 0.6792,
"step": 28
},
{
"epoch": 0.01798170826228492,
"grad_norm": 0.7210974097251892,
"learning_rate": 8.000000000000001e-07,
"loss": 0.9673,
"step": 29
},
{
"epoch": 0.01860176716788095,
"grad_norm": 0.6966854929924011,
"learning_rate": 0.0,
"loss": 0.7089,
"step": 30
},
{
"epoch": 0.01860176716788095,
"eval_loss": 0.9616574048995972,
"eval_runtime": 27.0027,
"eval_samples_per_second": 3.703,
"eval_steps_per_second": 3.703,
"step": 30
}
],
"logging_steps": 1,
"max_steps": 30,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 3,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 3.42368069566464e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}