lemma-model / checkpoint-38197 /trainer_state.json
Ishanan's picture
Add files using upload-large-folder tool
5743bce verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.0,
"eval_steps": 500,
"global_step": 38197,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.013090033248684452,
"grad_norm": 0.4367656707763672,
"learning_rate": 1.991273311167544e-05,
"loss": 1.32,
"step": 500
},
{
"epoch": 0.026180066497368905,
"grad_norm": 0.39482298493385315,
"learning_rate": 1.9825466223350874e-05,
"loss": 0.0594,
"step": 1000
},
{
"epoch": 0.039270099746053354,
"grad_norm": 0.47824403643608093,
"learning_rate": 1.9738199335026312e-05,
"loss": 0.0497,
"step": 1500
},
{
"epoch": 0.05236013299473781,
"grad_norm": 0.3971627652645111,
"learning_rate": 1.965093244670175e-05,
"loss": 0.043,
"step": 2000
},
{
"epoch": 0.06545016624342226,
"grad_norm": 0.5000212788581848,
"learning_rate": 1.9563665558377188e-05,
"loss": 0.0374,
"step": 2500
},
{
"epoch": 0.07854019949210671,
"grad_norm": 0.9078471064567566,
"learning_rate": 1.9476398670052623e-05,
"loss": 0.0357,
"step": 3000
},
{
"epoch": 0.09163023274079116,
"grad_norm": 0.35205739736557007,
"learning_rate": 1.938913178172806e-05,
"loss": 0.0317,
"step": 3500
},
{
"epoch": 0.10472026598947562,
"grad_norm": 0.19642357528209686,
"learning_rate": 1.9301864893403495e-05,
"loss": 0.0304,
"step": 4000
},
{
"epoch": 0.11781029923816007,
"grad_norm": 0.5134682655334473,
"learning_rate": 1.9214598005078933e-05,
"loss": 0.0281,
"step": 4500
},
{
"epoch": 0.13090033248684452,
"grad_norm": 0.23212824761867523,
"learning_rate": 1.912733111675437e-05,
"loss": 0.0274,
"step": 5000
},
{
"epoch": 0.14399036573552898,
"grad_norm": 0.4254980981349945,
"learning_rate": 1.904006422842981e-05,
"loss": 0.0247,
"step": 5500
},
{
"epoch": 0.15708039898421342,
"grad_norm": 0.4024842083454132,
"learning_rate": 1.8952797340105244e-05,
"loss": 0.0235,
"step": 6000
},
{
"epoch": 0.17017043223289788,
"grad_norm": 0.40947026014328003,
"learning_rate": 1.8865530451780682e-05,
"loss": 0.0235,
"step": 6500
},
{
"epoch": 0.1832604654815823,
"grad_norm": 0.366158127784729,
"learning_rate": 1.877826356345612e-05,
"loss": 0.0225,
"step": 7000
},
{
"epoch": 0.19635049873026678,
"grad_norm": 0.4678824841976166,
"learning_rate": 1.8690996675131558e-05,
"loss": 0.0219,
"step": 7500
},
{
"epoch": 0.20944053197895124,
"grad_norm": 0.21004918217658997,
"learning_rate": 1.8603729786806996e-05,
"loss": 0.0198,
"step": 8000
},
{
"epoch": 0.22253056522763567,
"grad_norm": 0.2192375510931015,
"learning_rate": 1.851646289848243e-05,
"loss": 0.0201,
"step": 8500
},
{
"epoch": 0.23562059847632014,
"grad_norm": 0.15161575376987457,
"learning_rate": 1.842919601015787e-05,
"loss": 0.0192,
"step": 9000
},
{
"epoch": 0.24871063172500457,
"grad_norm": 0.3270639181137085,
"learning_rate": 1.8341929121833303e-05,
"loss": 0.0186,
"step": 9500
},
{
"epoch": 0.26180066497368903,
"grad_norm": 0.25447696447372437,
"learning_rate": 1.825466223350874e-05,
"loss": 0.0182,
"step": 10000
},
{
"epoch": 0.2748906982223735,
"grad_norm": 0.1797029674053192,
"learning_rate": 1.816739534518418e-05,
"loss": 0.0182,
"step": 10500
},
{
"epoch": 0.28798073147105796,
"grad_norm": 0.4422529637813568,
"learning_rate": 1.8080128456859617e-05,
"loss": 0.0175,
"step": 11000
},
{
"epoch": 0.30107076471974237,
"grad_norm": 0.19019261002540588,
"learning_rate": 1.7992861568535052e-05,
"loss": 0.0159,
"step": 11500
},
{
"epoch": 0.31416079796842683,
"grad_norm": 0.12262561917304993,
"learning_rate": 1.790559468021049e-05,
"loss": 0.0162,
"step": 12000
},
{
"epoch": 0.3272508312171113,
"grad_norm": 0.5514086484909058,
"learning_rate": 1.7818327791885924e-05,
"loss": 0.0159,
"step": 12500
},
{
"epoch": 0.34034086446579576,
"grad_norm": 0.3397659957408905,
"learning_rate": 1.7731060903561362e-05,
"loss": 0.0163,
"step": 13000
},
{
"epoch": 0.3534308977144802,
"grad_norm": 0.3542526364326477,
"learning_rate": 1.76437940152368e-05,
"loss": 0.0161,
"step": 13500
},
{
"epoch": 0.3665209309631646,
"grad_norm": 0.22688041627407074,
"learning_rate": 1.7556527126912238e-05,
"loss": 0.0155,
"step": 14000
},
{
"epoch": 0.3796109642118491,
"grad_norm": 0.20115447044372559,
"learning_rate": 1.7469260238587673e-05,
"loss": 0.0148,
"step": 14500
},
{
"epoch": 0.39270099746053355,
"grad_norm": 0.09773228317499161,
"learning_rate": 1.738199335026311e-05,
"loss": 0.0149,
"step": 15000
},
{
"epoch": 0.405791030709218,
"grad_norm": 0.41297146677970886,
"learning_rate": 1.729472646193855e-05,
"loss": 0.0146,
"step": 15500
},
{
"epoch": 0.4188810639579025,
"grad_norm": 0.6495208740234375,
"learning_rate": 1.7207459573613983e-05,
"loss": 0.0136,
"step": 16000
},
{
"epoch": 0.4319710972065869,
"grad_norm": 0.35930490493774414,
"learning_rate": 1.712019268528942e-05,
"loss": 0.0137,
"step": 16500
},
{
"epoch": 0.44506113045527135,
"grad_norm": 0.22953346371650696,
"learning_rate": 1.703292579696486e-05,
"loss": 0.0139,
"step": 17000
},
{
"epoch": 0.4581511637039558,
"grad_norm": 0.41518378257751465,
"learning_rate": 1.6945658908640297e-05,
"loss": 0.0135,
"step": 17500
},
{
"epoch": 0.4712411969526403,
"grad_norm": 0.2171572744846344,
"learning_rate": 1.6858392020315732e-05,
"loss": 0.0129,
"step": 18000
},
{
"epoch": 0.48433123020132474,
"grad_norm": 0.2897777557373047,
"learning_rate": 1.677112513199117e-05,
"loss": 0.0122,
"step": 18500
},
{
"epoch": 0.49742126345000914,
"grad_norm": 0.4209305942058563,
"learning_rate": 1.6683858243666608e-05,
"loss": 0.0128,
"step": 19000
},
{
"epoch": 0.5105112966986937,
"grad_norm": 0.08220311999320984,
"learning_rate": 1.6596591355342046e-05,
"loss": 0.0123,
"step": 19500
},
{
"epoch": 0.5236013299473781,
"grad_norm": 0.09847331047058105,
"learning_rate": 1.650932446701748e-05,
"loss": 0.0123,
"step": 20000
},
{
"epoch": 0.5366913631960625,
"grad_norm": 0.41798558831214905,
"learning_rate": 1.642205757869292e-05,
"loss": 0.0128,
"step": 20500
},
{
"epoch": 0.549781396444747,
"grad_norm": 0.2177186757326126,
"learning_rate": 1.6334790690368353e-05,
"loss": 0.0124,
"step": 21000
},
{
"epoch": 0.5628714296934314,
"grad_norm": 0.16467055678367615,
"learning_rate": 1.624752380204379e-05,
"loss": 0.0118,
"step": 21500
},
{
"epoch": 0.5759614629421159,
"grad_norm": 0.3392176330089569,
"learning_rate": 1.616025691371923e-05,
"loss": 0.0121,
"step": 22000
},
{
"epoch": 0.5890514961908003,
"grad_norm": 0.14208835363388062,
"learning_rate": 1.6072990025394667e-05,
"loss": 0.0118,
"step": 22500
},
{
"epoch": 0.6021415294394847,
"grad_norm": 0.18248602747917175,
"learning_rate": 1.5985723137070105e-05,
"loss": 0.0118,
"step": 23000
},
{
"epoch": 0.6152315626881693,
"grad_norm": 0.32215237617492676,
"learning_rate": 1.589845624874554e-05,
"loss": 0.0111,
"step": 23500
},
{
"epoch": 0.6283215959368537,
"grad_norm": 0.35770946741104126,
"learning_rate": 1.5811189360420978e-05,
"loss": 0.0111,
"step": 24000
},
{
"epoch": 0.6414116291855382,
"grad_norm": 0.21091119945049286,
"learning_rate": 1.5723922472096412e-05,
"loss": 0.0115,
"step": 24500
},
{
"epoch": 0.6545016624342226,
"grad_norm": 0.215097576379776,
"learning_rate": 1.563665558377185e-05,
"loss": 0.0107,
"step": 25000
},
{
"epoch": 0.667591695682907,
"grad_norm": 0.15477751195430756,
"learning_rate": 1.554938869544729e-05,
"loss": 0.0105,
"step": 25500
},
{
"epoch": 0.6806817289315915,
"grad_norm": 0.1576426774263382,
"learning_rate": 1.5462121807122726e-05,
"loss": 0.0105,
"step": 26000
},
{
"epoch": 0.6937717621802759,
"grad_norm": 0.19675689935684204,
"learning_rate": 1.537485491879816e-05,
"loss": 0.0096,
"step": 26500
},
{
"epoch": 0.7068617954289604,
"grad_norm": 0.480955570936203,
"learning_rate": 1.52875880304736e-05,
"loss": 0.0104,
"step": 27000
},
{
"epoch": 0.7199518286776448,
"grad_norm": 0.2651515603065491,
"learning_rate": 1.5200321142149035e-05,
"loss": 0.0106,
"step": 27500
},
{
"epoch": 0.7330418619263293,
"grad_norm": 0.21407313644886017,
"learning_rate": 1.5113054253824473e-05,
"loss": 0.0102,
"step": 28000
},
{
"epoch": 0.7461318951750138,
"grad_norm": 0.13915963470935822,
"learning_rate": 1.502578736549991e-05,
"loss": 0.0098,
"step": 28500
},
{
"epoch": 0.7592219284236982,
"grad_norm": 0.09625498950481415,
"learning_rate": 1.4938520477175348e-05,
"loss": 0.0099,
"step": 29000
},
{
"epoch": 0.7723119616723827,
"grad_norm": 0.26766207814216614,
"learning_rate": 1.4851253588850782e-05,
"loss": 0.01,
"step": 29500
},
{
"epoch": 0.7854019949210671,
"grad_norm": 0.03729819133877754,
"learning_rate": 1.476398670052622e-05,
"loss": 0.0092,
"step": 30000
},
{
"epoch": 0.7984920281697515,
"grad_norm": 0.19327211380004883,
"learning_rate": 1.4676719812201658e-05,
"loss": 0.0094,
"step": 30500
},
{
"epoch": 0.811582061418436,
"grad_norm": 0.27896180748939514,
"learning_rate": 1.4589452923877094e-05,
"loss": 0.01,
"step": 31000
},
{
"epoch": 0.8246720946671204,
"grad_norm": 0.1490613967180252,
"learning_rate": 1.4502186035552532e-05,
"loss": 0.0092,
"step": 31500
},
{
"epoch": 0.837762127915805,
"grad_norm": 0.22722095251083374,
"learning_rate": 1.4414919147227969e-05,
"loss": 0.0093,
"step": 32000
},
{
"epoch": 0.8508521611644894,
"grad_norm": 0.2942313849925995,
"learning_rate": 1.4327652258903407e-05,
"loss": 0.0098,
"step": 32500
},
{
"epoch": 0.8639421944131738,
"grad_norm": 0.37654176354408264,
"learning_rate": 1.4240385370578841e-05,
"loss": 0.0095,
"step": 33000
},
{
"epoch": 0.8770322276618583,
"grad_norm": 0.21543939411640167,
"learning_rate": 1.415311848225428e-05,
"loss": 0.0097,
"step": 33500
},
{
"epoch": 0.8901222609105427,
"grad_norm": 0.4608267843723297,
"learning_rate": 1.4065851593929716e-05,
"loss": 0.0093,
"step": 34000
},
{
"epoch": 0.9032122941592272,
"grad_norm": 0.1784515380859375,
"learning_rate": 1.3978584705605154e-05,
"loss": 0.009,
"step": 34500
},
{
"epoch": 0.9163023274079116,
"grad_norm": 0.3353884518146515,
"learning_rate": 1.389131781728059e-05,
"loss": 0.009,
"step": 35000
},
{
"epoch": 0.929392360656596,
"grad_norm": 0.2330337017774582,
"learning_rate": 1.3804050928956028e-05,
"loss": 0.009,
"step": 35500
},
{
"epoch": 0.9424823939052805,
"grad_norm": 0.32975077629089355,
"learning_rate": 1.3716784040631464e-05,
"loss": 0.0092,
"step": 36000
},
{
"epoch": 0.955572427153965,
"grad_norm": 0.03280099481344223,
"learning_rate": 1.3629517152306902e-05,
"loss": 0.0084,
"step": 36500
},
{
"epoch": 0.9686624604026495,
"grad_norm": 0.22344884276390076,
"learning_rate": 1.3542250263982337e-05,
"loss": 0.0089,
"step": 37000
},
{
"epoch": 0.9817524936513339,
"grad_norm": 0.361147403717041,
"learning_rate": 1.3454983375657775e-05,
"loss": 0.0084,
"step": 37500
},
{
"epoch": 0.9948425269000183,
"grad_norm": 0.2099611908197403,
"learning_rate": 1.3367716487333213e-05,
"loss": 0.0088,
"step": 38000
},
{
"epoch": 1.0,
"eval_loss": 0.005731215700507164,
"eval_runtime": 2297.4971,
"eval_samples_per_second": 133.003,
"eval_steps_per_second": 16.625,
"step": 38197
}
],
"logging_steps": 500,
"max_steps": 114591,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 2.328987484422144e+16,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}