Ojas / checkpoint-5500 /trainer_state.json
PSAT's picture
Upload folder using huggingface_hub
2a9395b verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.8741258741258742,
"eval_steps": 500,
"global_step": 5500,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.01589319771137953,
"grad_norm": 0.9081700444221497,
"learning_rate": 0.0002,
"loss": 2.3142,
"step": 100
},
{
"epoch": 0.03178639542275906,
"grad_norm": 0.8463758826255798,
"learning_rate": 0.0002,
"loss": 2.0015,
"step": 200
},
{
"epoch": 0.04767959313413859,
"grad_norm": 1.0305041074752808,
"learning_rate": 0.0002,
"loss": 1.9429,
"step": 300
},
{
"epoch": 0.06357279084551812,
"grad_norm": 0.48407527804374695,
"learning_rate": 0.0002,
"loss": 1.9406,
"step": 400
},
{
"epoch": 0.07946598855689765,
"grad_norm": 0.9855284690856934,
"learning_rate": 0.0002,
"loss": 1.79,
"step": 500
},
{
"epoch": 0.09535918626827718,
"grad_norm": 0.8263546228408813,
"learning_rate": 0.0002,
"loss": 1.7586,
"step": 600
},
{
"epoch": 0.11125238397965671,
"grad_norm": 1.258915901184082,
"learning_rate": 0.0002,
"loss": 1.628,
"step": 700
},
{
"epoch": 0.12714558169103624,
"grad_norm": 0.7160557508468628,
"learning_rate": 0.0002,
"loss": 1.4786,
"step": 800
},
{
"epoch": 0.14303877940241577,
"grad_norm": 2.1789512634277344,
"learning_rate": 0.0002,
"loss": 1.4301,
"step": 900
},
{
"epoch": 0.1589319771137953,
"grad_norm": 0.34466424584388733,
"learning_rate": 0.0002,
"loss": 1.5137,
"step": 1000
},
{
"epoch": 0.17482517482517482,
"grad_norm": 1.9239227771759033,
"learning_rate": 0.0002,
"loss": 1.4945,
"step": 1100
},
{
"epoch": 0.19071837253655435,
"grad_norm": 1.7776683568954468,
"learning_rate": 0.0002,
"loss": 1.3702,
"step": 1200
},
{
"epoch": 0.2066115702479339,
"grad_norm": 1.5118937492370605,
"learning_rate": 0.0002,
"loss": 1.4384,
"step": 1300
},
{
"epoch": 0.22250476795931343,
"grad_norm": 1.504011869430542,
"learning_rate": 0.0002,
"loss": 1.3728,
"step": 1400
},
{
"epoch": 0.23839796567069294,
"grad_norm": 0.8850716352462769,
"learning_rate": 0.0002,
"loss": 1.4082,
"step": 1500
},
{
"epoch": 0.25429116338207247,
"grad_norm": 0.7327104210853577,
"learning_rate": 0.0002,
"loss": 1.3342,
"step": 1600
},
{
"epoch": 0.270184361093452,
"grad_norm": 0.9595212936401367,
"learning_rate": 0.0002,
"loss": 1.3882,
"step": 1700
},
{
"epoch": 0.28607755880483154,
"grad_norm": 2.5830607414245605,
"learning_rate": 0.0002,
"loss": 1.2326,
"step": 1800
},
{
"epoch": 0.3019707565162111,
"grad_norm": 1.852844476699829,
"learning_rate": 0.0002,
"loss": 1.3595,
"step": 1900
},
{
"epoch": 0.3178639542275906,
"grad_norm": 1.6929068565368652,
"learning_rate": 0.0002,
"loss": 1.0967,
"step": 2000
},
{
"epoch": 0.3337571519389701,
"grad_norm": 1.2370538711547852,
"learning_rate": 0.0002,
"loss": 1.3132,
"step": 2100
},
{
"epoch": 0.34965034965034963,
"grad_norm": 1.0512717962265015,
"learning_rate": 0.0002,
"loss": 1.2205,
"step": 2200
},
{
"epoch": 0.36554354736172917,
"grad_norm": 2.0815775394439697,
"learning_rate": 0.0002,
"loss": 1.2684,
"step": 2300
},
{
"epoch": 0.3814367450731087,
"grad_norm": 1.7328189611434937,
"learning_rate": 0.0002,
"loss": 1.0058,
"step": 2400
},
{
"epoch": 0.39732994278448824,
"grad_norm": 1.8121742010116577,
"learning_rate": 0.0002,
"loss": 1.1701,
"step": 2500
},
{
"epoch": 0.4132231404958678,
"grad_norm": 1.3292349576950073,
"learning_rate": 0.0002,
"loss": 1.1467,
"step": 2600
},
{
"epoch": 0.4291163382072473,
"grad_norm": 1.1031920909881592,
"learning_rate": 0.0002,
"loss": 1.2143,
"step": 2700
},
{
"epoch": 0.44500953591862685,
"grad_norm": 1.3371531963348389,
"learning_rate": 0.0002,
"loss": 0.996,
"step": 2800
},
{
"epoch": 0.46090273363000633,
"grad_norm": 0.6710326671600342,
"learning_rate": 0.0002,
"loss": 0.9845,
"step": 2900
},
{
"epoch": 0.47679593134138587,
"grad_norm": 0.930266797542572,
"learning_rate": 0.0002,
"loss": 1.0894,
"step": 3000
},
{
"epoch": 0.4926891290527654,
"grad_norm": 1.5724022388458252,
"learning_rate": 0.0002,
"loss": 1.0488,
"step": 3100
},
{
"epoch": 0.5085823267641449,
"grad_norm": 1.4082521200180054,
"learning_rate": 0.0002,
"loss": 1.1167,
"step": 3200
},
{
"epoch": 0.5244755244755245,
"grad_norm": 1.9330273866653442,
"learning_rate": 0.0002,
"loss": 0.9845,
"step": 3300
},
{
"epoch": 0.540368722186904,
"grad_norm": 2.368753433227539,
"learning_rate": 0.0002,
"loss": 1.0145,
"step": 3400
},
{
"epoch": 0.5562619198982836,
"grad_norm": 0.16658253967761993,
"learning_rate": 0.0002,
"loss": 0.9638,
"step": 3500
},
{
"epoch": 0.5721551176096631,
"grad_norm": 1.6113061904907227,
"learning_rate": 0.0002,
"loss": 1.0362,
"step": 3600
},
{
"epoch": 0.5880483153210426,
"grad_norm": 1.9637274742126465,
"learning_rate": 0.0002,
"loss": 0.9143,
"step": 3700
},
{
"epoch": 0.6039415130324222,
"grad_norm": 1.396763801574707,
"learning_rate": 0.0002,
"loss": 0.7685,
"step": 3800
},
{
"epoch": 0.6198347107438017,
"grad_norm": 2.6856513023376465,
"learning_rate": 0.0002,
"loss": 0.9068,
"step": 3900
},
{
"epoch": 0.6357279084551812,
"grad_norm": 0.7980265617370605,
"learning_rate": 0.0002,
"loss": 0.9764,
"step": 4000
},
{
"epoch": 0.6516211061665607,
"grad_norm": 3.0948915481567383,
"learning_rate": 0.0002,
"loss": 1.0757,
"step": 4100
},
{
"epoch": 0.6675143038779402,
"grad_norm": 0.8271145820617676,
"learning_rate": 0.0002,
"loss": 0.8772,
"step": 4200
},
{
"epoch": 0.6834075015893197,
"grad_norm": 1.1177769899368286,
"learning_rate": 0.0002,
"loss": 0.8533,
"step": 4300
},
{
"epoch": 0.6993006993006993,
"grad_norm": 1.5585882663726807,
"learning_rate": 0.0002,
"loss": 0.7309,
"step": 4400
},
{
"epoch": 0.7151938970120788,
"grad_norm": 0.3275369107723236,
"learning_rate": 0.0002,
"loss": 0.8906,
"step": 4500
},
{
"epoch": 0.7310870947234583,
"grad_norm": 0.05096842721104622,
"learning_rate": 0.0002,
"loss": 0.9252,
"step": 4600
},
{
"epoch": 0.7469802924348379,
"grad_norm": 1.699520230293274,
"learning_rate": 0.0002,
"loss": 0.816,
"step": 4700
},
{
"epoch": 0.7628734901462174,
"grad_norm": 1.1933566331863403,
"learning_rate": 0.0002,
"loss": 0.9015,
"step": 4800
},
{
"epoch": 0.778766687857597,
"grad_norm": 2.1289737224578857,
"learning_rate": 0.0002,
"loss": 0.659,
"step": 4900
},
{
"epoch": 0.7946598855689765,
"grad_norm": 0.6902903318405151,
"learning_rate": 0.0002,
"loss": 0.6742,
"step": 5000
},
{
"epoch": 0.810553083280356,
"grad_norm": 2.0536046028137207,
"learning_rate": 0.0002,
"loss": 0.7554,
"step": 5100
},
{
"epoch": 0.8264462809917356,
"grad_norm": 0.2782207429409027,
"learning_rate": 0.0002,
"loss": 0.7318,
"step": 5200
},
{
"epoch": 0.8423394787031151,
"grad_norm": 0.13541792333126068,
"learning_rate": 0.0002,
"loss": 0.7749,
"step": 5300
},
{
"epoch": 0.8582326764144946,
"grad_norm": 2.2065443992614746,
"learning_rate": 0.0002,
"loss": 0.7728,
"step": 5400
},
{
"epoch": 0.8741258741258742,
"grad_norm": 1.3952250480651855,
"learning_rate": 0.0002,
"loss": 0.8359,
"step": 5500
}
],
"logging_steps": 100,
"max_steps": 6292,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.8051153354752e+16,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}