llamaMerge / trainer_state.json
PragmaticPete's picture
Upload 7 files
d3295cb verified
{
"best_metric": 0.12014812231063843,
"best_model_checkpoint": "./model/checkpoints/checkpoint-100",
"epoch": 1.1594202898550725,
"eval_steps": 50,
"global_step": 100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.1855072463768116,
"grad_norm": 0.9348331689834595,
"learning_rate": 9.62962962962963e-05,
"loss": 1.7376,
"step": 16
},
{
"epoch": 0.3710144927536232,
"grad_norm": 0.2084011733531952,
"learning_rate": 8.148148148148148e-05,
"loss": 0.2418,
"step": 32
},
{
"epoch": 0.5565217391304348,
"grad_norm": 0.2130679488182068,
"learning_rate": 6.666666666666667e-05,
"loss": 0.1875,
"step": 48
},
{
"epoch": 0.5797101449275363,
"eval_loss": 0.16450659930706024,
"eval_model_preparation_time": 0.0017,
"eval_runtime": 107.7969,
"eval_samples_per_second": 1.707,
"eval_steps_per_second": 0.213,
"step": 50
},
{
"epoch": 0.7420289855072464,
"grad_norm": 0.2075987011194229,
"learning_rate": 5.185185185185185e-05,
"loss": 0.1725,
"step": 64
},
{
"epoch": 0.927536231884058,
"grad_norm": 0.19163866341114044,
"learning_rate": 3.7037037037037037e-05,
"loss": 0.1437,
"step": 80
},
{
"epoch": 1.1130434782608696,
"grad_norm": 0.19873079657554626,
"learning_rate": 2.2222222222222223e-05,
"loss": 0.138,
"step": 96
},
{
"epoch": 1.1594202898550725,
"eval_loss": 0.12014812231063843,
"eval_model_preparation_time": 0.0017,
"eval_runtime": 15.2427,
"eval_samples_per_second": 12.071,
"eval_steps_per_second": 1.509,
"step": 100
}
],
"logging_steps": 16,
"max_steps": 120,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1.2500271180693504e+16,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}