limernyou's picture
Upload folder using huggingface_hub
cd04fe0 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.45,
"eval_steps": 100,
"global_step": 900,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0125,
"grad_norm": 0.14092598855495453,
"learning_rate": 0.0004,
"loss": 1.1529,
"step": 25
},
{
"epoch": 0.025,
"grad_norm": 0.14281609654426575,
"learning_rate": 0.0004998852503731983,
"loss": 1.0472,
"step": 50
},
{
"epoch": 0.0375,
"grad_norm": 0.24040694534778595,
"learning_rate": 0.0004993848168027977,
"loss": 0.8532,
"step": 75
},
{
"epoch": 0.05,
"grad_norm": 0.14735238254070282,
"learning_rate": 0.0004984880506341147,
"loss": 0.9761,
"step": 100
},
{
"epoch": 0.05,
"eval_loss": 0.9448406100273132,
"eval_runtime": 322.186,
"eval_samples_per_second": 3.405,
"eval_steps_per_second": 0.056,
"step": 100
},
{
"epoch": 0.0625,
"grad_norm": 0.1359478235244751,
"learning_rate": 0.0004971963770447935,
"loss": 0.9568,
"step": 125
},
{
"epoch": 0.075,
"grad_norm": 0.17497147619724274,
"learning_rate": 0.0004955118488155782,
"loss": 0.7111,
"step": 150
},
{
"epoch": 0.0875,
"grad_norm": 0.14382271468639374,
"learning_rate": 0.0004934371430679492,
"loss": 0.9413,
"step": 175
},
{
"epoch": 0.1,
"grad_norm": 0.16079047322273254,
"learning_rate": 0.0004909755570095319,
"loss": 0.8981,
"step": 200
},
{
"epoch": 0.1,
"eval_loss": 0.9723050594329834,
"eval_runtime": 322.7576,
"eval_samples_per_second": 3.399,
"eval_steps_per_second": 0.056,
"step": 200
},
{
"epoch": 0.1125,
"grad_norm": 0.14729101955890656,
"learning_rate": 0.0004881310026940389,
"loss": 0.6378,
"step": 225
},
{
"epoch": 0.125,
"grad_norm": 0.15030288696289062,
"learning_rate": 0.0004849080008040734,
"loss": 0.9271,
"step": 250
},
{
"epoch": 0.1375,
"grad_norm": 0.1613348424434662,
"learning_rate": 0.00048131167346667446,
"loss": 0.8457,
"step": 275
},
{
"epoch": 0.15,
"grad_norm": 0.15532569587230682,
"learning_rate": 0.00047734773611302284,
"loss": 0.603,
"step": 300
},
{
"epoch": 0.15,
"eval_loss": 1.006589651107788,
"eval_runtime": 323.2105,
"eval_samples_per_second": 3.394,
"eval_steps_per_second": 0.056,
"step": 300
},
{
"epoch": 0.1625,
"grad_norm": 0.16015686094760895,
"learning_rate": 0.0004730224883952422,
"loss": 0.9036,
"step": 325
},
{
"epoch": 0.175,
"grad_norm": 0.15767253935337067,
"learning_rate": 0.0004683428041747334,
"loss": 0.8283,
"step": 350
},
{
"epoch": 0.1875,
"grad_norm": 0.17757417261600494,
"learning_rate": 0.0004633161205979517,
"loss": 0.5945,
"step": 375
},
{
"epoch": 0.2,
"grad_norm": 0.17248600721359253,
"learning_rate": 0.0004579504262769877,
"loss": 0.8655,
"step": 400
},
{
"epoch": 0.2,
"eval_loss": 1.0158599615097046,
"eval_runtime": 323.8034,
"eval_samples_per_second": 3.388,
"eval_steps_per_second": 0.056,
"step": 400
},
{
"epoch": 0.2125,
"grad_norm": 0.17826460301876068,
"learning_rate": 0.0004522542485937369,
"loss": 0.8079,
"step": 425
},
{
"epoch": 0.225,
"grad_norm": 0.19307631254196167,
"learning_rate": 0.00044623664014783386,
"loss": 0.5737,
"step": 450
},
{
"epoch": 0.2375,
"grad_norm": 0.1877959966659546,
"learning_rate": 0.00043990716436988924,
"loss": 0.8605,
"step": 475
},
{
"epoch": 0.25,
"grad_norm": 0.15268854796886444,
"learning_rate": 0.0004332758803228925,
"loss": 0.7674,
"step": 500
},
{
"epoch": 0.25,
"eval_loss": 1.0431231260299683,
"eval_runtime": 323.1376,
"eval_samples_per_second": 3.395,
"eval_steps_per_second": 0.056,
"step": 500
},
{
"epoch": 0.2625,
"grad_norm": 0.16773808002471924,
"learning_rate": 0.00042635332671593575,
"loss": 0.5884,
"step": 525
},
{
"epoch": 0.275,
"grad_norm": 0.15766142308712006,
"learning_rate": 0.00041915050515566445,
"loss": 0.8178,
"step": 550
},
{
"epoch": 0.2875,
"grad_norm": 0.16790153086185455,
"learning_rate": 0.00041167886266207167,
"loss": 0.7797,
"step": 575
},
{
"epoch": 0.3,
"grad_norm": 0.15149210393428802,
"learning_rate": 0.0004039502734764241,
"loss": 0.7334,
"step": 600
},
{
"epoch": 0.3,
"eval_loss": 1.0655592679977417,
"eval_runtime": 321.6139,
"eval_samples_per_second": 3.411,
"eval_steps_per_second": 0.056,
"step": 600
},
{
"epoch": 0.3125,
"grad_norm": 0.15414434671401978,
"learning_rate": 0.0003959770201902294,
"loss": 0.744,
"step": 625
},
{
"epoch": 0.325,
"grad_norm": 0.1529635190963745,
"learning_rate": 0.0003877717742252371,
"loss": 0.6345,
"step": 650
},
{
"epoch": 0.3375,
"grad_norm": 0.16185611486434937,
"learning_rate": 0.00037934757569549495,
"loss": 0.7354,
"step": 675
},
{
"epoch": 0.35,
"grad_norm": 0.16656433045864105,
"learning_rate": 0.00037071781268346345,
"loss": 0.7455,
"step": 700
},
{
"epoch": 0.35,
"eval_loss": 1.0835301876068115,
"eval_runtime": 322.4593,
"eval_samples_per_second": 3.402,
"eval_steps_per_second": 0.056,
"step": 700
},
{
"epoch": 0.3625,
"grad_norm": 0.15590643882751465,
"learning_rate": 0.00036189619996312495,
"loss": 0.5972,
"step": 725
},
{
"epoch": 0.375,
"grad_norm": 0.1837926208972931,
"learning_rate": 0.00035289675720390174,
"loss": 0.7592,
"step": 750
},
{
"epoch": 0.3875,
"grad_norm": 0.1620703488588333,
"learning_rate": 0.00034373378669002105,
"loss": 0.736,
"step": 775
},
{
"epoch": 0.4,
"grad_norm": 0.16613048315048218,
"learning_rate": 0.00033442185059073706,
"loss": 0.564,
"step": 800
},
{
"epoch": 0.4,
"eval_loss": 1.096523642539978,
"eval_runtime": 321.7553,
"eval_samples_per_second": 3.409,
"eval_steps_per_second": 0.056,
"step": 800
},
{
"epoch": 0.4125,
"grad_norm": 0.1587529182434082,
"learning_rate": 0.00032497574781753367,
"loss": 0.7598,
"step": 825
},
{
"epoch": 0.425,
"grad_norm": 0.16457463800907135,
"learning_rate": 0.000315410490505086,
"loss": 0.7292,
"step": 850
},
{
"epoch": 0.4375,
"grad_norm": 0.16429653763771057,
"learning_rate": 0.0003057412801533589,
"loss": 0.5329,
"step": 875
},
{
"epoch": 0.45,
"grad_norm": 0.16424661874771118,
"learning_rate": 0.0002959834834687587,
"loss": 0.7785,
"step": 900
},
{
"epoch": 0.45,
"eval_loss": 1.0958806276321411,
"eval_runtime": 322.3489,
"eval_samples_per_second": 3.403,
"eval_steps_per_second": 0.056,
"step": 900
}
],
"logging_steps": 25,
"max_steps": 2000,
"num_input_tokens_seen": 0,
"num_train_epochs": 9223372036854775807,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 9.07138389025751e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}