Llama-3.1-8B-Instruct-Reward-1000 / trainer_state.json
chchen's picture
End of training
eca519f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.955555555555556,
"eval_steps": 50,
"global_step": 280,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.35555555555555557,
"grad_norm": 7.329732894897461,
"learning_rate": 3.571428571428572e-05,
"loss": 0.6531,
"step": 10
},
{
"epoch": 0.7111111111111111,
"grad_norm": 8.706786155700684,
"learning_rate": 7.142857142857143e-05,
"loss": 0.4865,
"step": 20
},
{
"epoch": 1.0666666666666667,
"grad_norm": 4.010909080505371,
"learning_rate": 9.998445910004082e-05,
"loss": 0.4045,
"step": 30
},
{
"epoch": 1.4222222222222223,
"grad_norm": 6.512356281280518,
"learning_rate": 9.944154131125642e-05,
"loss": 0.265,
"step": 40
},
{
"epoch": 1.7777777777777777,
"grad_norm": 5.565410137176514,
"learning_rate": 9.81312123475006e-05,
"loss": 0.2705,
"step": 50
},
{
"epoch": 1.7777777777777777,
"eval_accuracy": 0.9,
"eval_loss": 0.23454897105693817,
"eval_runtime": 3.5018,
"eval_samples_per_second": 28.556,
"eval_steps_per_second": 7.139,
"step": 50
},
{
"epoch": 2.1333333333333333,
"grad_norm": 2.5253825187683105,
"learning_rate": 9.607381059352038e-05,
"loss": 0.1875,
"step": 60
},
{
"epoch": 2.488888888888889,
"grad_norm": 5.666724681854248,
"learning_rate": 9.330127018922194e-05,
"loss": 0.0772,
"step": 70
},
{
"epoch": 2.8444444444444446,
"grad_norm": 7.440430164337158,
"learning_rate": 8.985662536114613e-05,
"loss": 0.1589,
"step": 80
},
{
"epoch": 3.2,
"grad_norm": 1.9519225358963013,
"learning_rate": 8.579334246298593e-05,
"loss": 0.0214,
"step": 90
},
{
"epoch": 3.5555555555555554,
"grad_norm": 0.20509658753871918,
"learning_rate": 8.117449009293668e-05,
"loss": 0.0559,
"step": 100
},
{
"epoch": 3.5555555555555554,
"eval_accuracy": 0.88,
"eval_loss": 0.6940509676933289,
"eval_runtime": 3.5087,
"eval_samples_per_second": 28.5,
"eval_steps_per_second": 7.125,
"step": 100
},
{
"epoch": 3.911111111111111,
"grad_norm": 1.2840397357940674,
"learning_rate": 7.60717601689749e-05,
"loss": 0.0057,
"step": 110
},
{
"epoch": 4.266666666666667,
"grad_norm": 1.4887982606887817,
"learning_rate": 7.056435515653059e-05,
"loss": 0.0182,
"step": 120
},
{
"epoch": 4.622222222222222,
"grad_norm": 0.001216607284732163,
"learning_rate": 6.473775872054521e-05,
"loss": 0.0006,
"step": 130
},
{
"epoch": 4.977777777777778,
"grad_norm": 11.006356239318848,
"learning_rate": 5.868240888334653e-05,
"loss": 0.0709,
"step": 140
},
{
"epoch": 5.333333333333333,
"grad_norm": 0.008083873428404331,
"learning_rate": 5.249229428303486e-05,
"loss": 0.0063,
"step": 150
},
{
"epoch": 5.333333333333333,
"eval_accuracy": 0.89,
"eval_loss": 0.9227009415626526,
"eval_runtime": 3.5005,
"eval_samples_per_second": 28.567,
"eval_steps_per_second": 7.142,
"step": 150
},
{
"epoch": 5.688888888888889,
"grad_norm": 0.06849438697099686,
"learning_rate": 4.626349532067879e-05,
"loss": 0.0024,
"step": 160
},
{
"epoch": 6.044444444444444,
"grad_norm": 0.0030830150935798883,
"learning_rate": 4.0092692840030134e-05,
"loss": 0.0004,
"step": 170
},
{
"epoch": 6.4,
"grad_norm": 0.00013647924060933292,
"learning_rate": 3.4075667487415785e-05,
"loss": 0.0001,
"step": 180
},
{
"epoch": 6.7555555555555555,
"grad_norm": 0.004931746982038021,
"learning_rate": 2.8305813044122097e-05,
"loss": 0.0,
"step": 190
},
{
"epoch": 7.111111111111111,
"grad_norm": 0.00014362263027578592,
"learning_rate": 2.2872686806712035e-05,
"loss": 0.0115,
"step": 200
},
{
"epoch": 7.111111111111111,
"eval_accuracy": 0.88,
"eval_loss": 1.2607765197753906,
"eval_runtime": 3.5114,
"eval_samples_per_second": 28.479,
"eval_steps_per_second": 7.12,
"step": 200
},
{
"epoch": 7.466666666666667,
"grad_norm": 0.00021143814956303686,
"learning_rate": 1.7860619515673033e-05,
"loss": 0.0028,
"step": 210
},
{
"epoch": 7.822222222222222,
"grad_norm": 0.0045998492278158665,
"learning_rate": 1.3347406408508695e-05,
"loss": 0.0001,
"step": 220
},
{
"epoch": 8.177777777777777,
"grad_norm": 0.009200682863593102,
"learning_rate": 9.403099714207175e-06,
"loss": 0.0,
"step": 230
},
{
"epoch": 8.533333333333333,
"grad_norm": 3.014660478584119e-06,
"learning_rate": 6.088921331488568e-06,
"loss": 0.0,
"step": 240
},
{
"epoch": 8.88888888888889,
"grad_norm": 9.906059131026268e-05,
"learning_rate": 3.4563125677897932e-06,
"loss": 0.0025,
"step": 250
},
{
"epoch": 8.88888888888889,
"eval_accuracy": 0.89,
"eval_loss": 1.2526733875274658,
"eval_runtime": 3.504,
"eval_samples_per_second": 28.539,
"eval_steps_per_second": 7.135,
"step": 250
},
{
"epoch": 9.244444444444444,
"grad_norm": 4.950571747031063e-05,
"learning_rate": 1.5461356885461075e-06,
"loss": 0.0,
"step": 260
},
{
"epoch": 9.6,
"grad_norm": 0.0026052340399473906,
"learning_rate": 3.8803966999139684e-07,
"loss": 0.0,
"step": 270
},
{
"epoch": 9.955555555555556,
"grad_norm": 0.0014641884481534362,
"learning_rate": 0.0,
"loss": 0.0013,
"step": 280
},
{
"epoch": 9.955555555555556,
"step": 280,
"total_flos": 0.0,
"train_loss": 0.09654608053981195,
"train_runtime": 1003.5439,
"train_samples_per_second": 8.968,
"train_steps_per_second": 0.279
}
],
"logging_steps": 10,
"max_steps": 280,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}