os-reasoning-model / checkpoint-2000 /trainer_state.json
jahidhasan's picture
Upload OS Reasoning model
8d03952 verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 4.763267740011926,
"eval_steps": 500,
"global_step": 2000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.11926058437686345,
"grad_norm": 8.05271053314209,
"learning_rate": 7.35e-06,
"loss": 5.4953,
"step": 50
},
{
"epoch": 0.2385211687537269,
"grad_norm": 2.7420473098754883,
"learning_rate": 1.485e-05,
"loss": 1.2318,
"step": 100
},
{
"epoch": 0.35778175313059035,
"grad_norm": 1.7430616617202759,
"learning_rate": 2.235e-05,
"loss": 0.7734,
"step": 150
},
{
"epoch": 0.4770423375074538,
"grad_norm": 1.5376101732254028,
"learning_rate": 2.985e-05,
"loss": 0.6646,
"step": 200
},
{
"epoch": 0.5963029218843172,
"grad_norm": 1.6270289421081543,
"learning_rate": 2.9226315789473687e-05,
"loss": 0.6334,
"step": 250
},
{
"epoch": 0.7155635062611807,
"grad_norm": 1.8452305793762207,
"learning_rate": 2.8436842105263156e-05,
"loss": 0.6103,
"step": 300
},
{
"epoch": 0.8348240906380441,
"grad_norm": 1.8059115409851074,
"learning_rate": 2.7647368421052632e-05,
"loss": 0.6186,
"step": 350
},
{
"epoch": 0.9540846750149076,
"grad_norm": 1.6886439323425293,
"learning_rate": 2.6857894736842105e-05,
"loss": 0.6045,
"step": 400
},
{
"epoch": 1.071556350626118,
"grad_norm": 1.4987449645996094,
"learning_rate": 2.6068421052631578e-05,
"loss": 0.5941,
"step": 450
},
{
"epoch": 1.1908169350029816,
"grad_norm": 1.5386848449707031,
"learning_rate": 2.527894736842105e-05,
"loss": 0.5678,
"step": 500
},
{
"epoch": 1.310077519379845,
"grad_norm": 1.5747556686401367,
"learning_rate": 2.4489473684210527e-05,
"loss": 0.5671,
"step": 550
},
{
"epoch": 1.4293381037567083,
"grad_norm": 1.5854023694992065,
"learning_rate": 2.37e-05,
"loss": 0.5376,
"step": 600
},
{
"epoch": 1.5485986881335718,
"grad_norm": 1.4520505666732788,
"learning_rate": 2.2910526315789473e-05,
"loss": 0.5632,
"step": 650
},
{
"epoch": 1.6678592725104353,
"grad_norm": 1.4415792226791382,
"learning_rate": 2.2121052631578946e-05,
"loss": 0.5629,
"step": 700
},
{
"epoch": 1.7871198568872988,
"grad_norm": 1.4224036931991577,
"learning_rate": 2.1331578947368422e-05,
"loss": 0.5647,
"step": 750
},
{
"epoch": 1.906380441264162,
"grad_norm": 1.4895819425582886,
"learning_rate": 2.0542105263157895e-05,
"loss": 0.54,
"step": 800
},
{
"epoch": 2.0238521168753727,
"grad_norm": 1.5508358478546143,
"learning_rate": 1.9752631578947368e-05,
"loss": 0.5695,
"step": 850
},
{
"epoch": 2.143112701252236,
"grad_norm": 1.5374252796173096,
"learning_rate": 1.896315789473684e-05,
"loss": 0.5228,
"step": 900
},
{
"epoch": 2.2623732856290997,
"grad_norm": 1.639708161354065,
"learning_rate": 1.8173684210526317e-05,
"loss": 0.5512,
"step": 950
},
{
"epoch": 2.381633870005963,
"grad_norm": 1.6390520334243774,
"learning_rate": 1.738421052631579e-05,
"loss": 0.5252,
"step": 1000
},
{
"epoch": 2.500894454382826,
"grad_norm": 1.4625619649887085,
"learning_rate": 1.6594736842105263e-05,
"loss": 0.5339,
"step": 1050
},
{
"epoch": 2.62015503875969,
"grad_norm": 1.3197258710861206,
"learning_rate": 1.5805263157894735e-05,
"loss": 0.5479,
"step": 1100
},
{
"epoch": 2.739415623136553,
"grad_norm": 1.4443845748901367,
"learning_rate": 1.5015789473684212e-05,
"loss": 0.5134,
"step": 1150
},
{
"epoch": 2.8586762075134167,
"grad_norm": 1.7365626096725464,
"learning_rate": 1.4226315789473685e-05,
"loss": 0.5253,
"step": 1200
},
{
"epoch": 2.97793679189028,
"grad_norm": 1.7469673156738281,
"learning_rate": 1.343684210526316e-05,
"loss": 0.5452,
"step": 1250
},
{
"epoch": 3.095408467501491,
"grad_norm": 1.7984752655029297,
"learning_rate": 1.2647368421052632e-05,
"loss": 0.5178,
"step": 1300
},
{
"epoch": 3.2146690518783543,
"grad_norm": 1.5190192461013794,
"learning_rate": 1.1857894736842105e-05,
"loss": 0.5261,
"step": 1350
},
{
"epoch": 3.3339296362552178,
"grad_norm": 1.5508211851119995,
"learning_rate": 1.106842105263158e-05,
"loss": 0.5435,
"step": 1400
},
{
"epoch": 3.4531902206320813,
"grad_norm": 1.8733484745025635,
"learning_rate": 1.0278947368421052e-05,
"loss": 0.5001,
"step": 1450
},
{
"epoch": 3.5724508050089447,
"grad_norm": 1.6355196237564087,
"learning_rate": 9.489473684210527e-06,
"loss": 0.5005,
"step": 1500
},
{
"epoch": 3.691711389385808,
"grad_norm": 1.238028645515442,
"learning_rate": 8.7e-06,
"loss": 0.5164,
"step": 1550
},
{
"epoch": 3.8109719737626713,
"grad_norm": 1.8155537843704224,
"learning_rate": 7.910526315789474e-06,
"loss": 0.5049,
"step": 1600
},
{
"epoch": 3.9302325581395348,
"grad_norm": 1.6747583150863647,
"learning_rate": 7.121052631578948e-06,
"loss": 0.5099,
"step": 1650
},
{
"epoch": 4.047704233750745,
"grad_norm": 1.4803907871246338,
"learning_rate": 6.331578947368422e-06,
"loss": 0.5148,
"step": 1700
},
{
"epoch": 4.166964818127608,
"grad_norm": 1.571410059928894,
"learning_rate": 5.542105263157895e-06,
"loss": 0.5128,
"step": 1750
},
{
"epoch": 4.286225402504472,
"grad_norm": 1.606655478477478,
"learning_rate": 4.752631578947368e-06,
"loss": 0.5116,
"step": 1800
},
{
"epoch": 4.405485986881335,
"grad_norm": 1.6239967346191406,
"learning_rate": 3.963157894736842e-06,
"loss": 0.5068,
"step": 1850
},
{
"epoch": 4.524746571258199,
"grad_norm": 1.3790518045425415,
"learning_rate": 3.173684210526316e-06,
"loss": 0.4962,
"step": 1900
},
{
"epoch": 4.644007155635062,
"grad_norm": 1.2910724878311157,
"learning_rate": 2.38421052631579e-06,
"loss": 0.5172,
"step": 1950
},
{
"epoch": 4.763267740011926,
"grad_norm": 2.057995557785034,
"learning_rate": 1.5947368421052633e-06,
"loss": 0.5007,
"step": 2000
}
],
"logging_steps": 50,
"max_steps": 2100,
"num_input_tokens_seen": 0,
"num_train_epochs": 5,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 1043619221864448.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}