xiulinyang's picture
Adding model checkpoints and config files
5906368
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 44.01,
"global_step": 1200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0,
"learning_rate": 4.9999999999999996e-06,
"loss": 11.0188,
"step": 1
},
{
"epoch": 1.02,
"learning_rate": 0.00025,
"loss": 8.1559,
"step": 50
},
{
"epoch": 3.02,
"learning_rate": 0.0005,
"loss": 6.8029,
"step": 100
},
{
"epoch": 5.01,
"learning_rate": 0.0005833333333333333,
"loss": 6.7121,
"step": 150
},
{
"epoch": 7.01,
"learning_rate": 0.0005555555555555556,
"loss": 6.6454,
"step": 200
},
{
"epoch": 9.01,
"learning_rate": 0.0005277777777777777,
"loss": 6.5652,
"step": 250
},
{
"epoch": 11.0,
"learning_rate": 0.0005,
"loss": 6.3824,
"step": 300
},
{
"epoch": 12.02,
"learning_rate": 0.00047222222222222224,
"loss": 6.0627,
"step": 350
},
{
"epoch": 14.02,
"learning_rate": 0.00044444444444444436,
"loss": 5.9225,
"step": 400
},
{
"epoch": 16.02,
"learning_rate": 0.00041666666666666664,
"loss": 5.7523,
"step": 450
},
{
"epoch": 18.01,
"learning_rate": 0.00038888888888888887,
"loss": 5.6018,
"step": 500
},
{
"epoch": 20.01,
"learning_rate": 0.0003611111111111111,
"loss": 5.4548,
"step": 550
},
{
"epoch": 22.0,
"learning_rate": 0.0003333333333333333,
"loss": 5.3217,
"step": 600
},
{
"epoch": 24.0,
"learning_rate": 0.00030555555555555555,
"loss": 5.2073,
"step": 650
},
{
"epoch": 25.02,
"learning_rate": 0.0002777777777777778,
"loss": 5.0416,
"step": 700
},
{
"epoch": 27.02,
"learning_rate": 0.00025,
"loss": 5.0168,
"step": 750
},
{
"epoch": 29.01,
"learning_rate": 0.00022222222222222218,
"loss": 4.9351,
"step": 800
},
{
"epoch": 31.01,
"learning_rate": 0.00019444444444444443,
"loss": 4.8656,
"step": 850
},
{
"epoch": 33.01,
"learning_rate": 0.00016666666666666666,
"loss": 4.8022,
"step": 900
},
{
"epoch": 35.0,
"learning_rate": 0.0001388888888888889,
"loss": 4.7449,
"step": 950
},
{
"epoch": 37.0,
"learning_rate": 0.00011111111111111109,
"loss": 4.693,
"step": 1000
},
{
"epoch": 37.0,
"eval_loss": 5.544437885284424,
"eval_runtime": 2.7184,
"eval_samples_per_second": 36.786,
"eval_steps_per_second": 2.575,
"step": 1000
},
{
"epoch": 37.0,
"eval_/local/xiulyang/mission-impossible-language-models/training/babylm_dataset.py_loss": 5.544437885284424,
"eval_/local/xiulyang/mission-impossible-language-models/training/babylm_dataset.py_ppl": 255.81074283810784,
"eval_/local/xiulyang/mission-impossible-language-models/training/babylm_dataset.py_runtime": 2.7184,
"eval_/local/xiulyang/mission-impossible-language-models/training/babylm_dataset.py_samples_per_second": 36.786,
"step": 1000
},
{
"epoch": 38.02,
"learning_rate": 8.333333333333333e-05,
"loss": 4.5903,
"step": 1050
},
{
"epoch": 40.02,
"learning_rate": 5.5555555555555545e-05,
"loss": 4.6115,
"step": 1100
},
{
"epoch": 42.01,
"learning_rate": 2.7777777777777772e-05,
"loss": 4.5829,
"step": 1150
},
{
"epoch": 44.01,
"learning_rate": 0.0,
"loss": 4.5627,
"step": 1200
}
],
"max_steps": 1200,
"num_train_epochs": 9223372036854775807,
"total_flos": 3.2843363254272e+17,
"trial_name": null,
"trial_params": null
}