Bmcbob76's picture
Upload folder using huggingface_hub
085146e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.9979188345473466,
"eval_steps": 500,
"global_step": 480,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04162330905306972,
"grad_norm": 0.7745468616485596,
"learning_rate": 2e-05,
"loss": 1.7706,
"step": 10
},
{
"epoch": 0.08324661810613944,
"grad_norm": 0.5561801791191101,
"learning_rate": 4e-05,
"loss": 1.6448,
"step": 20
},
{
"epoch": 0.12486992715920915,
"grad_norm": 0.23404787480831146,
"learning_rate": 6e-05,
"loss": 1.4716,
"step": 30
},
{
"epoch": 0.16649323621227888,
"grad_norm": 0.18818368017673492,
"learning_rate": 8e-05,
"loss": 1.3672,
"step": 40
},
{
"epoch": 0.2081165452653486,
"grad_norm": 0.18688304722309113,
"learning_rate": 0.0001,
"loss": 1.2657,
"step": 50
},
{
"epoch": 0.2497398543184183,
"grad_norm": 0.1951465606689453,
"learning_rate": 0.00012,
"loss": 1.125,
"step": 60
},
{
"epoch": 0.29136316337148804,
"grad_norm": 0.1746235191822052,
"learning_rate": 0.00014,
"loss": 1.1003,
"step": 70
},
{
"epoch": 0.33298647242455776,
"grad_norm": 0.20920881628990173,
"learning_rate": 0.00016,
"loss": 1.0207,
"step": 80
},
{
"epoch": 0.37460978147762747,
"grad_norm": 0.22199565172195435,
"learning_rate": 0.00018,
"loss": 0.9757,
"step": 90
},
{
"epoch": 0.4162330905306972,
"grad_norm": 0.18666373193264008,
"learning_rate": 0.0002,
"loss": 0.9414,
"step": 100
},
{
"epoch": 0.4578563995837669,
"grad_norm": 0.2139161080121994,
"learning_rate": 0.000199658449300667,
"loss": 0.9025,
"step": 110
},
{
"epoch": 0.4994797086368366,
"grad_norm": 0.19018647074699402,
"learning_rate": 0.00019863613034027224,
"loss": 0.904,
"step": 120
},
{
"epoch": 0.5411030176899063,
"grad_norm": 0.28649064898490906,
"learning_rate": 0.00019694002659393305,
"loss": 0.9177,
"step": 130
},
{
"epoch": 0.5827263267429761,
"grad_norm": 0.21059155464172363,
"learning_rate": 0.00019458172417006347,
"loss": 0.8929,
"step": 140
},
{
"epoch": 0.6243496357960457,
"grad_norm": 0.21605370938777924,
"learning_rate": 0.00019157733266550575,
"loss": 0.8692,
"step": 150
},
{
"epoch": 0.6659729448491155,
"grad_norm": 0.204218327999115,
"learning_rate": 0.0001879473751206489,
"loss": 0.8661,
"step": 160
},
{
"epoch": 0.7075962539021852,
"grad_norm": 0.20825326442718506,
"learning_rate": 0.00018371664782625287,
"loss": 0.8508,
"step": 170
},
{
"epoch": 0.7492195629552549,
"grad_norm": 0.21748915314674377,
"learning_rate": 0.00017891405093963938,
"loss": 0.8912,
"step": 180
},
{
"epoch": 0.7908428720083247,
"grad_norm": 0.21721281111240387,
"learning_rate": 0.00017357239106731317,
"loss": 0.8368,
"step": 190
},
{
"epoch": 0.8324661810613944,
"grad_norm": 0.2268160879611969,
"learning_rate": 0.00016772815716257412,
"loss": 0.8407,
"step": 200
},
{
"epoch": 0.8740894901144641,
"grad_norm": 0.25088444352149963,
"learning_rate": 0.0001614212712689668,
"loss": 0.8206,
"step": 210
},
{
"epoch": 0.9157127991675338,
"grad_norm": 0.2083650529384613,
"learning_rate": 0.00015469481581224272,
"loss": 0.8191,
"step": 220
},
{
"epoch": 0.9573361082206036,
"grad_norm": 0.20936889946460724,
"learning_rate": 0.00014759473930370736,
"loss": 0.8103,
"step": 230
},
{
"epoch": 0.9989594172736732,
"grad_norm": 0.2282739132642746,
"learning_rate": 0.00014016954246529696,
"loss": 0.7974,
"step": 240
},
{
"epoch": 1.0405827263267429,
"grad_norm": 0.22600172460079193,
"learning_rate": 0.00013246994692046836,
"loss": 0.847,
"step": 250
},
{
"epoch": 1.0822060353798126,
"grad_norm": 0.22329868376255035,
"learning_rate": 0.00012454854871407994,
"loss": 0.8268,
"step": 260
},
{
"epoch": 1.1238293444328824,
"grad_norm": 0.24019195139408112,
"learning_rate": 0.00011645945902807341,
"loss": 0.7869,
"step": 270
},
{
"epoch": 1.1654526534859522,
"grad_norm": 0.24571700394153595,
"learning_rate": 0.00010825793454723325,
"loss": 0.7997,
"step": 280
},
{
"epoch": 1.207075962539022,
"grad_norm": 0.24582228064537048,
"learning_rate": 0.0001,
"loss": 0.8127,
"step": 290
},
{
"epoch": 1.2486992715920915,
"grad_norm": 0.23523323237895966,
"learning_rate": 9.174206545276677e-05,
"loss": 0.7951,
"step": 300
},
{
"epoch": 1.2903225806451613,
"grad_norm": 0.2251071184873581,
"learning_rate": 8.35405409719266e-05,
"loss": 0.7968,
"step": 310
},
{
"epoch": 1.331945889698231,
"grad_norm": 0.27596697211265564,
"learning_rate": 7.54514512859201e-05,
"loss": 0.8039,
"step": 320
},
{
"epoch": 1.3735691987513008,
"grad_norm": 0.2557406723499298,
"learning_rate": 6.753005307953167e-05,
"loss": 0.8214,
"step": 330
},
{
"epoch": 1.4151925078043703,
"grad_norm": 0.24903537333011627,
"learning_rate": 5.983045753470308e-05,
"loss": 0.7744,
"step": 340
},
{
"epoch": 1.45681581685744,
"grad_norm": 0.24901647865772247,
"learning_rate": 5.240526069629265e-05,
"loss": 0.7886,
"step": 350
},
{
"epoch": 1.4984391259105099,
"grad_norm": 0.23960117995738983,
"learning_rate": 4.530518418775733e-05,
"loss": 0.7917,
"step": 360
},
{
"epoch": 1.5400624349635796,
"grad_norm": 0.2526036202907562,
"learning_rate": 3.857872873103322e-05,
"loss": 0.7811,
"step": 370
},
{
"epoch": 1.5816857440166494,
"grad_norm": 0.24781571328639984,
"learning_rate": 3.227184283742591e-05,
"loss": 0.769,
"step": 380
},
{
"epoch": 1.6233090530697192,
"grad_norm": 0.24783489108085632,
"learning_rate": 2.6427608932686843e-05,
"loss": 0.7746,
"step": 390
},
{
"epoch": 1.6649323621227887,
"grad_norm": 0.2527744472026825,
"learning_rate": 2.1085949060360654e-05,
"loss": 0.7518,
"step": 400
},
{
"epoch": 1.7065556711758585,
"grad_norm": 0.24453039467334747,
"learning_rate": 1.6283352173747145e-05,
"loss": 0.7763,
"step": 410
},
{
"epoch": 1.748178980228928,
"grad_norm": 0.2481396645307541,
"learning_rate": 1.2052624879351104e-05,
"loss": 0.7949,
"step": 420
},
{
"epoch": 1.7898022892819978,
"grad_norm": 0.2535879611968994,
"learning_rate": 8.422667334494249e-06,
"loss": 0.781,
"step": 430
},
{
"epoch": 1.8314255983350676,
"grad_norm": 0.24758380651474,
"learning_rate": 5.418275829936537e-06,
"loss": 0.7376,
"step": 440
},
{
"epoch": 1.8730489073881373,
"grad_norm": 0.25179538130760193,
"learning_rate": 3.059973406066963e-06,
"loss": 0.7679,
"step": 450
},
{
"epoch": 1.914672216441207,
"grad_norm": 0.2668040990829468,
"learning_rate": 1.3638696597277679e-06,
"loss": 0.7577,
"step": 460
},
{
"epoch": 1.9562955254942769,
"grad_norm": 0.2509586215019226,
"learning_rate": 3.415506993330153e-07,
"loss": 0.7456,
"step": 470
},
{
"epoch": 1.9979188345473466,
"grad_norm": 0.25124892592430115,
"learning_rate": 0.0,
"loss": 0.7852,
"step": 480
}
],
"logging_steps": 10,
"max_steps": 480,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 6.682197897510912e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}