Bmcbob76's picture
Upload folder using huggingface_hub
085146e verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9989594172736732,
"eval_steps": 500,
"global_step": 240,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.04162330905306972,
"grad_norm": 0.7745468616485596,
"learning_rate": 2e-05,
"loss": 1.7706,
"step": 10
},
{
"epoch": 0.08324661810613944,
"grad_norm": 0.5561801791191101,
"learning_rate": 4e-05,
"loss": 1.6448,
"step": 20
},
{
"epoch": 0.12486992715920915,
"grad_norm": 0.23404787480831146,
"learning_rate": 6e-05,
"loss": 1.4716,
"step": 30
},
{
"epoch": 0.16649323621227888,
"grad_norm": 0.18818368017673492,
"learning_rate": 8e-05,
"loss": 1.3672,
"step": 40
},
{
"epoch": 0.2081165452653486,
"grad_norm": 0.18688304722309113,
"learning_rate": 0.0001,
"loss": 1.2657,
"step": 50
},
{
"epoch": 0.2497398543184183,
"grad_norm": 0.1951465606689453,
"learning_rate": 0.00012,
"loss": 1.125,
"step": 60
},
{
"epoch": 0.29136316337148804,
"grad_norm": 0.1746235191822052,
"learning_rate": 0.00014,
"loss": 1.1003,
"step": 70
},
{
"epoch": 0.33298647242455776,
"grad_norm": 0.20920881628990173,
"learning_rate": 0.00016,
"loss": 1.0207,
"step": 80
},
{
"epoch": 0.37460978147762747,
"grad_norm": 0.22199565172195435,
"learning_rate": 0.00018,
"loss": 0.9757,
"step": 90
},
{
"epoch": 0.4162330905306972,
"grad_norm": 0.18666373193264008,
"learning_rate": 0.0002,
"loss": 0.9414,
"step": 100
},
{
"epoch": 0.4578563995837669,
"grad_norm": 0.2139161080121994,
"learning_rate": 0.000199658449300667,
"loss": 0.9025,
"step": 110
},
{
"epoch": 0.4994797086368366,
"grad_norm": 0.19018647074699402,
"learning_rate": 0.00019863613034027224,
"loss": 0.904,
"step": 120
},
{
"epoch": 0.5411030176899063,
"grad_norm": 0.28649064898490906,
"learning_rate": 0.00019694002659393305,
"loss": 0.9177,
"step": 130
},
{
"epoch": 0.5827263267429761,
"grad_norm": 0.21059155464172363,
"learning_rate": 0.00019458172417006347,
"loss": 0.8929,
"step": 140
},
{
"epoch": 0.6243496357960457,
"grad_norm": 0.21605370938777924,
"learning_rate": 0.00019157733266550575,
"loss": 0.8692,
"step": 150
},
{
"epoch": 0.6659729448491155,
"grad_norm": 0.204218327999115,
"learning_rate": 0.0001879473751206489,
"loss": 0.8661,
"step": 160
},
{
"epoch": 0.7075962539021852,
"grad_norm": 0.20825326442718506,
"learning_rate": 0.00018371664782625287,
"loss": 0.8508,
"step": 170
},
{
"epoch": 0.7492195629552549,
"grad_norm": 0.21748915314674377,
"learning_rate": 0.00017891405093963938,
"loss": 0.8912,
"step": 180
},
{
"epoch": 0.7908428720083247,
"grad_norm": 0.21721281111240387,
"learning_rate": 0.00017357239106731317,
"loss": 0.8368,
"step": 190
},
{
"epoch": 0.8324661810613944,
"grad_norm": 0.2268160879611969,
"learning_rate": 0.00016772815716257412,
"loss": 0.8407,
"step": 200
},
{
"epoch": 0.8740894901144641,
"grad_norm": 0.25088444352149963,
"learning_rate": 0.0001614212712689668,
"loss": 0.8206,
"step": 210
},
{
"epoch": 0.9157127991675338,
"grad_norm": 0.2083650529384613,
"learning_rate": 0.00015469481581224272,
"loss": 0.8191,
"step": 220
},
{
"epoch": 0.9573361082206036,
"grad_norm": 0.20936889946460724,
"learning_rate": 0.00014759473930370736,
"loss": 0.8103,
"step": 230
},
{
"epoch": 0.9989594172736732,
"grad_norm": 0.2282739132642746,
"learning_rate": 0.00014016954246529696,
"loss": 0.7974,
"step": 240
}
],
"logging_steps": 10,
"max_steps": 480,
"num_input_tokens_seen": 0,
"num_train_epochs": 2,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3.3445792601604096e+17,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}