FR-GPT2small / checkpoint-500 /trainer_state.json
xiulinyang's picture
Adding model checkpoints and config files
d42d53d
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 11.0225,
"global_step": 500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.9999999999999996e-06,
"loss": 10.9692,
"step": 1
},
{
"epoch": 1.01,
"learning_rate": 0.00025,
"loss": 8.063,
"step": 50
},
{
"epoch": 2.01,
"learning_rate": 0.0005,
"loss": 4.9432,
"step": 100
},
{
"epoch": 3.02,
"learning_rate": 0.0005833333333333333,
"loss": 3.9936,
"step": 150
},
{
"epoch": 4.02,
"learning_rate": 0.0005555555555555556,
"loss": 3.7529,
"step": 200
},
{
"epoch": 5.03,
"learning_rate": 0.0005277777777777777,
"loss": 3.6008,
"step": 250
},
{
"epoch": 6.04,
"learning_rate": 0.0005,
"loss": 3.3332,
"step": 300
},
{
"epoch": 8.01,
"learning_rate": 0.00047222222222222224,
"loss": 3.089,
"step": 350
},
{
"epoch": 9.01,
"learning_rate": 0.00044444444444444436,
"loss": 2.8687,
"step": 400
},
{
"epoch": 10.02,
"learning_rate": 0.00041666666666666664,
"loss": 2.7458,
"step": 450
},
{
"epoch": 11.02,
"learning_rate": 0.00038888888888888887,
"loss": 2.6512,
"step": 500
}
],
"max_steps": 1200,
"num_train_epochs": 9223372036854775807,
"total_flos": 1.35776223756288e+17,
"trial_name": null,
"trial_params": null
}