GPT2-small-AR / checkpoint-1100 /trainer_state.json
xiulinyang's picture
Adding model checkpoints and config files
798b6fe
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 42.00666666666667,
"global_step": 1100,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.9999999999999996e-06,
"loss": 11.1259,
"step": 1
},
{
"epoch": 1.02,
"learning_rate": 0.00025,
"loss": 8.8129,
"step": 50
},
{
"epoch": 3.02,
"learning_rate": 0.0005,
"loss": 7.1869,
"step": 100
},
{
"epoch": 5.02,
"learning_rate": 0.0005833333333333333,
"loss": 6.5568,
"step": 150
},
{
"epoch": 7.01,
"learning_rate": 0.0005555555555555556,
"loss": 6.0674,
"step": 200
},
{
"epoch": 9.01,
"learning_rate": 0.0005277777777777777,
"loss": 5.7599,
"step": 250
},
{
"epoch": 11.01,
"learning_rate": 0.0005,
"loss": 5.5578,
"step": 300
},
{
"epoch": 13.01,
"learning_rate": 0.00047222222222222224,
"loss": 5.3958,
"step": 350
},
{
"epoch": 15.01,
"learning_rate": 0.00044444444444444436,
"loss": 5.2326,
"step": 400
},
{
"epoch": 17.01,
"learning_rate": 0.00041666666666666664,
"loss": 5.0814,
"step": 450
},
{
"epoch": 19.0,
"learning_rate": 0.00038888888888888887,
"loss": 4.9099,
"step": 500
},
{
"epoch": 21.0,
"learning_rate": 0.0003611111111111111,
"loss": 4.7405,
"step": 550
},
{
"epoch": 23.0,
"learning_rate": 0.0003333333333333333,
"loss": 4.5977,
"step": 600
},
{
"epoch": 24.02,
"learning_rate": 0.00030555555555555555,
"loss": 4.4226,
"step": 650
},
{
"epoch": 26.02,
"learning_rate": 0.0002777777777777778,
"loss": 4.3734,
"step": 700
},
{
"epoch": 28.02,
"learning_rate": 0.00025,
"loss": 4.2801,
"step": 750
},
{
"epoch": 30.02,
"learning_rate": 0.00022222222222222218,
"loss": 4.195,
"step": 800
},
{
"epoch": 32.02,
"learning_rate": 0.00019444444444444443,
"loss": 4.117,
"step": 850
},
{
"epoch": 34.01,
"learning_rate": 0.00016666666666666666,
"loss": 4.0469,
"step": 900
},
{
"epoch": 36.01,
"learning_rate": 0.0001388888888888889,
"loss": 3.9843,
"step": 950
},
{
"epoch": 38.01,
"learning_rate": 0.00011111111111111109,
"loss": 3.9288,
"step": 1000
},
{
"epoch": 38.01,
"eval_loss": 5.421392917633057,
"eval_runtime": 5.2963,
"eval_samples_per_second": 16.238,
"eval_steps_per_second": 1.133,
"step": 1000
},
{
"epoch": 38.01,
"eval_/scratch/ykyao/projects/multilingual-LM/training/multilingual_dataset.py_loss": 5.421392917633057,
"eval_/scratch/ykyao/projects/multilingual-LM/training/multilingual_dataset.py_ppl": 226.1939727440261,
"eval_/scratch/ykyao/projects/multilingual-LM/training/multilingual_dataset.py_runtime": 5.2963,
"eval_/scratch/ykyao/projects/multilingual-LM/training/multilingual_dataset.py_samples_per_second": 16.238,
"step": 1000
},
{
"epoch": 40.01,
"learning_rate": 8.333333333333333e-05,
"loss": 3.8811,
"step": 1050
},
{
"epoch": 42.01,
"learning_rate": 5.5555555555555545e-05,
"loss": 3.8399,
"step": 1100
}
],
"max_steps": 1200,
"num_train_epochs": 9223372036854775807,
"total_flos": 3.01211183480832e+17,
"trial_name": null,
"trial_params": null
}