k1h0's picture
Upload folder using huggingface_hub
87e1df2 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9996631862579993,
"eval_steps": 500,
"global_step": 371,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.002694509936005389,
"grad_norm": 12.76484489440918,
"learning_rate": 5.263157894736842e-06,
"loss": 3.5442,
"step": 1
},
{
"epoch": 0.02694509936005389,
"grad_norm": 1.225385308265686,
"learning_rate": 5.2631578947368424e-05,
"loss": 3.4136,
"step": 10
},
{
"epoch": 0.05389019872010778,
"grad_norm": 0.22818362712860107,
"learning_rate": 0.00010526315789473685,
"loss": 1.5345,
"step": 20
},
{
"epoch": 0.08083529808016167,
"grad_norm": 0.11801251769065857,
"learning_rate": 0.00015789473684210527,
"loss": 1.4693,
"step": 30
},
{
"epoch": 0.10778039744021556,
"grad_norm": 0.12770982086658478,
"learning_rate": 0.0001987987987987988,
"loss": 1.3288,
"step": 40
},
{
"epoch": 0.13472549680026946,
"grad_norm": 0.1819867640733719,
"learning_rate": 0.00019279279279279282,
"loss": 1.2308,
"step": 50
},
{
"epoch": 0.16167059616032334,
"grad_norm": 0.09357380867004395,
"learning_rate": 0.0001867867867867868,
"loss": 1.0072,
"step": 60
},
{
"epoch": 0.18861569552037724,
"grad_norm": 0.07331203669309616,
"learning_rate": 0.00018078078078078078,
"loss": 0.9502,
"step": 70
},
{
"epoch": 0.21556079488043112,
"grad_norm": 0.045368436723947525,
"learning_rate": 0.00017477477477477476,
"loss": 0.9393,
"step": 80
},
{
"epoch": 0.24250589424048502,
"grad_norm": 0.04616931825876236,
"learning_rate": 0.00016876876876876877,
"loss": 0.9084,
"step": 90
},
{
"epoch": 0.2694509936005389,
"grad_norm": 0.04786905273795128,
"learning_rate": 0.00016276276276276275,
"loss": 0.9046,
"step": 100
},
{
"epoch": 0.29639609296059277,
"grad_norm": 0.03197158873081207,
"learning_rate": 0.00015675675675675676,
"loss": 0.8554,
"step": 110
},
{
"epoch": 0.3233411923206467,
"grad_norm": 0.02612409181892872,
"learning_rate": 0.00015075075075075077,
"loss": 0.833,
"step": 120
},
{
"epoch": 0.3502862916807006,
"grad_norm": 0.028117205947637558,
"learning_rate": 0.00014474474474474475,
"loss": 0.8364,
"step": 130
},
{
"epoch": 0.3772313910407545,
"grad_norm": 0.018981853500008583,
"learning_rate": 0.00013873873873873876,
"loss": 0.8246,
"step": 140
},
{
"epoch": 0.40417649040080833,
"grad_norm": 0.04023918882012367,
"learning_rate": 0.00013273273273273274,
"loss": 0.8511,
"step": 150
},
{
"epoch": 0.43112158976086223,
"grad_norm": 0.03793207183480263,
"learning_rate": 0.00012672672672672675,
"loss": 0.8217,
"step": 160
},
{
"epoch": 0.45806668912091614,
"grad_norm": 0.02019081450998783,
"learning_rate": 0.00012072072072072073,
"loss": 0.8001,
"step": 170
},
{
"epoch": 0.48501178848097004,
"grad_norm": 0.031807586550712585,
"learning_rate": 0.00011471471471471471,
"loss": 0.8019,
"step": 180
},
{
"epoch": 0.5119568878410239,
"grad_norm": 0.02062528021633625,
"learning_rate": 0.0001087087087087087,
"loss": 0.8047,
"step": 190
},
{
"epoch": 0.5389019872010778,
"grad_norm": 0.03914797678589821,
"learning_rate": 0.0001027027027027027,
"loss": 0.8178,
"step": 200
},
{
"epoch": 0.5658470865611317,
"grad_norm": 0.04694194346666336,
"learning_rate": 9.66966966966967e-05,
"loss": 0.7934,
"step": 210
},
{
"epoch": 0.5927921859211855,
"grad_norm": 0.023888826370239258,
"learning_rate": 9.069069069069069e-05,
"loss": 0.7794,
"step": 220
},
{
"epoch": 0.6197372852812395,
"grad_norm": 0.034731291234493256,
"learning_rate": 8.468468468468469e-05,
"loss": 0.7799,
"step": 230
},
{
"epoch": 0.6466823846412934,
"grad_norm": 0.022078100591897964,
"learning_rate": 7.867867867867868e-05,
"loss": 0.7779,
"step": 240
},
{
"epoch": 0.6736274840013473,
"grad_norm": 0.04586014896631241,
"learning_rate": 7.267267267267268e-05,
"loss": 0.7906,
"step": 250
},
{
"epoch": 0.7005725833614012,
"grad_norm": 0.04206414520740509,
"learning_rate": 6.666666666666667e-05,
"loss": 0.7768,
"step": 260
},
{
"epoch": 0.727517682721455,
"grad_norm": 0.024432417005300522,
"learning_rate": 6.0660660660660665e-05,
"loss": 0.7691,
"step": 270
},
{
"epoch": 0.754462782081509,
"grad_norm": 0.03391208499670029,
"learning_rate": 5.465465465465466e-05,
"loss": 0.7629,
"step": 280
},
{
"epoch": 0.7814078814415628,
"grad_norm": 0.018352922052145004,
"learning_rate": 4.8648648648648654e-05,
"loss": 0.7625,
"step": 290
},
{
"epoch": 0.8083529808016167,
"grad_norm": 0.04901178553700447,
"learning_rate": 4.264264264264264e-05,
"loss": 0.7778,
"step": 300
},
{
"epoch": 0.8352980801616706,
"grad_norm": 0.0407499223947525,
"learning_rate": 3.663663663663664e-05,
"loss": 0.7657,
"step": 310
},
{
"epoch": 0.8622431795217245,
"grad_norm": 0.031635917723178864,
"learning_rate": 3.063063063063063e-05,
"loss": 0.7621,
"step": 320
},
{
"epoch": 0.8891882788817784,
"grad_norm": 0.020033176988363266,
"learning_rate": 2.4624624624624627e-05,
"loss": 0.7571,
"step": 330
},
{
"epoch": 0.9161333782418323,
"grad_norm": 0.045810870826244354,
"learning_rate": 1.8618618618618618e-05,
"loss": 0.7539,
"step": 340
},
{
"epoch": 0.9430784776018861,
"grad_norm": 0.07599364966154099,
"learning_rate": 1.2612612612612611e-05,
"loss": 0.7684,
"step": 350
},
{
"epoch": 0.9700235769619401,
"grad_norm": 0.01792909763753414,
"learning_rate": 6.606606606606607e-06,
"loss": 0.7643,
"step": 360
},
{
"epoch": 0.9969686763219939,
"grad_norm": 0.02676345966756344,
"learning_rate": 6.006006006006006e-07,
"loss": 0.7515,
"step": 370
}
],
"logging_steps": 10,
"max_steps": 371,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 2.331323573248983e+18,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}