RMFT-Checkpoints / rmft /30e2 /trainer_state.json
Garsa3112's picture
Add files using upload-large-folder tool
ca2f29f verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.2998581715033821,
"eval_steps": 500,
"global_step": 23829,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.027274710888064588,
"grad_norm": 8.132363319396973,
"learning_rate": 1.976e-05,
"loss": 3.5991,
"step": 500
},
{
"epoch": 0.054549421776129176,
"grad_norm": 7.099025249481201,
"learning_rate": 1.9818702290076336e-05,
"loss": 2.8754,
"step": 1000
},
{
"epoch": 0.08182413266419376,
"grad_norm": 7.820577621459961,
"learning_rate": 1.963520258367587e-05,
"loss": 2.6895,
"step": 1500
},
{
"epoch": 0.10909884355225835,
"grad_norm": 5.448477268218994,
"learning_rate": 1.94517028772754e-05,
"loss": 2.6494,
"step": 2000
},
{
"epoch": 0.13637355444032292,
"grad_norm": 6.391147613525391,
"learning_rate": 1.926820317087493e-05,
"loss": 2.5544,
"step": 2500
},
{
"epoch": 0.16364826532838753,
"grad_norm": 5.117228031158447,
"learning_rate": 1.908470346447446e-05,
"loss": 2.5431,
"step": 3000
},
{
"epoch": 0.1909229762164521,
"grad_norm": 4.08992338180542,
"learning_rate": 1.890120375807399e-05,
"loss": 2.5359,
"step": 3500
},
{
"epoch": 0.2181976871045167,
"grad_norm": 3.984097957611084,
"learning_rate": 1.871770405167352e-05,
"loss": 2.4834,
"step": 4000
},
{
"epoch": 0.24547239799258128,
"grad_norm": 3.8590118885040283,
"learning_rate": 1.853420434527305e-05,
"loss": 2.4899,
"step": 4500
},
{
"epoch": 0.27274710888064585,
"grad_norm": 4.541988372802734,
"learning_rate": 1.835070463887258e-05,
"loss": 2.4608,
"step": 5000
},
{
"epoch": 0.30002181976871045,
"grad_norm": 4.910805702209473,
"learning_rate": 1.816720493247211e-05,
"loss": 2.402,
"step": 5500
},
{
"epoch": 0.32729653065677505,
"grad_norm": 4.220065116882324,
"learning_rate": 1.798370522607164e-05,
"loss": 2.4483,
"step": 6000
},
{
"epoch": 0.3545712415448396,
"grad_norm": 3.9413557052612305,
"learning_rate": 1.780020551967117e-05,
"loss": 2.4271,
"step": 6500
},
{
"epoch": 0.3818459524329042,
"grad_norm": 2.890425443649292,
"learning_rate": 1.76167058132707e-05,
"loss": 2.3884,
"step": 7000
},
{
"epoch": 0.4091206633209688,
"grad_norm": 3.186286211013794,
"learning_rate": 1.743320610687023e-05,
"loss": 2.364,
"step": 7500
},
{
"epoch": 0.4363953742090334,
"grad_norm": 3.4042603969573975,
"learning_rate": 1.7249706400469762e-05,
"loss": 2.4374,
"step": 8000
},
{
"epoch": 0.46367008509709795,
"grad_norm": 2.714256763458252,
"learning_rate": 1.7066206694069292e-05,
"loss": 2.3535,
"step": 8500
},
{
"epoch": 0.49094479598516255,
"grad_norm": 3.9176571369171143,
"learning_rate": 1.6882706987668822e-05,
"loss": 2.379,
"step": 9000
},
{
"epoch": 0.5182195068732272,
"grad_norm": 3.3017940521240234,
"learning_rate": 1.669920728126835e-05,
"loss": 2.3246,
"step": 9500
},
{
"epoch": 0.5454942177612917,
"grad_norm": 3.9203879833221436,
"learning_rate": 1.651570757486788e-05,
"loss": 2.3452,
"step": 10000
},
{
"epoch": 0.5727689286493564,
"grad_norm": 3.474233627319336,
"learning_rate": 1.6332207868467413e-05,
"loss": 2.3192,
"step": 10500
},
{
"epoch": 0.6000436395374209,
"grad_norm": 3.47390079498291,
"learning_rate": 1.6148708162066943e-05,
"loss": 2.3775,
"step": 11000
},
{
"epoch": 0.6273183504254854,
"grad_norm": 3.5392141342163086,
"learning_rate": 1.5965208455666473e-05,
"loss": 2.3436,
"step": 11500
},
{
"epoch": 0.6545930613135501,
"grad_norm": 3.213120698928833,
"learning_rate": 1.5782075748678803e-05,
"loss": 2.2958,
"step": 12000
},
{
"epoch": 0.6818677722016147,
"grad_norm": 3.356034278869629,
"learning_rate": 1.5598576042278333e-05,
"loss": 2.3013,
"step": 12500
},
{
"epoch": 0.7091424830896792,
"grad_norm": 2.832211494445801,
"learning_rate": 1.5415076335877863e-05,
"loss": 2.3138,
"step": 13000
},
{
"epoch": 0.7364171939777439,
"grad_norm": 3.0613503456115723,
"learning_rate": 1.5231576629477394e-05,
"loss": 2.302,
"step": 13500
},
{
"epoch": 0.7636919048658084,
"grad_norm": 3.324085235595703,
"learning_rate": 1.5048076923076924e-05,
"loss": 2.3072,
"step": 14000
},
{
"epoch": 0.7909666157538731,
"grad_norm": 3.129357099533081,
"learning_rate": 1.4864577216676454e-05,
"loss": 2.3137,
"step": 14500
},
{
"epoch": 0.8182413266419376,
"grad_norm": 3.2217562198638916,
"learning_rate": 1.4681444509688784e-05,
"loss": 2.2899,
"step": 15000
},
{
"epoch": 0.8455160375300022,
"grad_norm": 3.6490132808685303,
"learning_rate": 1.4497944803288314e-05,
"loss": 2.3015,
"step": 15500
},
{
"epoch": 0.8727907484180668,
"grad_norm": 3.53132700920105,
"learning_rate": 1.4314445096887846e-05,
"loss": 2.2479,
"step": 16000
},
{
"epoch": 0.9000654593061314,
"grad_norm": 2.9956769943237305,
"learning_rate": 1.4130945390487376e-05,
"loss": 2.2878,
"step": 16500
},
{
"epoch": 0.9273401701941959,
"grad_norm": 3.027482271194458,
"learning_rate": 1.3947445684086906e-05,
"loss": 2.2656,
"step": 17000
},
{
"epoch": 0.9546148810822606,
"grad_norm": 2.76747465133667,
"learning_rate": 1.3763945977686436e-05,
"loss": 2.3085,
"step": 17500
},
{
"epoch": 0.9818895919703251,
"grad_norm": 3.367910385131836,
"learning_rate": 1.3580446271285968e-05,
"loss": 2.2726,
"step": 18000
},
{
"epoch": 1.0091643028583897,
"grad_norm": 3.19476056098938,
"learning_rate": 1.3396946564885498e-05,
"loss": 2.2459,
"step": 18500
},
{
"epoch": 1.0364390137464543,
"grad_norm": 2.947768211364746,
"learning_rate": 1.3213446858485028e-05,
"loss": 2.2225,
"step": 19000
},
{
"epoch": 1.063713724634519,
"grad_norm": 3.5084404945373535,
"learning_rate": 1.3030314151497358e-05,
"loss": 2.2222,
"step": 19500
},
{
"epoch": 1.0909884355225834,
"grad_norm": 3.5933291912078857,
"learning_rate": 1.284681444509689e-05,
"loss": 2.2169,
"step": 20000
},
{
"epoch": 1.118263146410648,
"grad_norm": 3.1414079666137695,
"learning_rate": 1.266331473869642e-05,
"loss": 2.2104,
"step": 20500
},
{
"epoch": 1.1455378572987127,
"grad_norm": 3.1075944900512695,
"learning_rate": 1.247981503229595e-05,
"loss": 2.1874,
"step": 21000
},
{
"epoch": 1.1728125681867771,
"grad_norm": 3.304654598236084,
"learning_rate": 1.229668232530828e-05,
"loss": 2.2282,
"step": 21500
},
{
"epoch": 1.2000872790748418,
"grad_norm": 2.92022705078125,
"learning_rate": 1.211318261890781e-05,
"loss": 2.2337,
"step": 22000
},
{
"epoch": 1.2273619899629065,
"grad_norm": 3.450542449951172,
"learning_rate": 1.1929682912507342e-05,
"loss": 2.1784,
"step": 22500
},
{
"epoch": 1.254636700850971,
"grad_norm": 2.8238916397094727,
"learning_rate": 1.1746183206106872e-05,
"loss": 2.2287,
"step": 23000
},
{
"epoch": 1.2819114117390356,
"grad_norm": 2.935835123062134,
"learning_rate": 1.1563050499119202e-05,
"loss": 2.2062,
"step": 23500
}
],
"logging_steps": 500,
"max_steps": 54996,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1833,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6226327830528000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}