RMFT-Checkpoints / dedup /100e2 /trainer_state.json
Garsa3112's picture
Add files using upload-large-folder tool
287dc56 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 1.999054075358663,
"eval_steps": 500,
"global_step": 25360,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03941352672237112,
"grad_norm": 9.434146881103516,
"learning_rate": 1.9840000000000003e-05,
"loss": 3.5972,
"step": 500
},
{
"epoch": 0.07882705344474224,
"grad_norm": 8.484919548034668,
"learning_rate": 1.973587517972203e-05,
"loss": 3.0731,
"step": 1000
},
{
"epoch": 0.11824058016711335,
"grad_norm": 8.578750610351562,
"learning_rate": 1.9469620320570853e-05,
"loss": 2.9624,
"step": 1500
},
{
"epoch": 0.15765410688948447,
"grad_norm": 6.2477827072143555,
"learning_rate": 1.920336546141967e-05,
"loss": 2.8351,
"step": 2000
},
{
"epoch": 0.19706763361185559,
"grad_norm": 6.6669602394104,
"learning_rate": 1.8937110602268492e-05,
"loss": 2.817,
"step": 2500
},
{
"epoch": 0.2364811603342267,
"grad_norm": 4.857083797454834,
"learning_rate": 1.8670855743117314e-05,
"loss": 2.7781,
"step": 3000
},
{
"epoch": 0.2758946870565978,
"grad_norm": 5.961228847503662,
"learning_rate": 1.8404600883966135e-05,
"loss": 2.7681,
"step": 3500
},
{
"epoch": 0.31530821377896895,
"grad_norm": 3.896094560623169,
"learning_rate": 1.8138346024814953e-05,
"loss": 2.6833,
"step": 4000
},
{
"epoch": 0.35472174050134003,
"grad_norm": 4.19826078414917,
"learning_rate": 1.7872091165663774e-05,
"loss": 2.7317,
"step": 4500
},
{
"epoch": 0.39413526722371117,
"grad_norm": 4.363683223724365,
"learning_rate": 1.7605836306512596e-05,
"loss": 2.6456,
"step": 5000
},
{
"epoch": 0.4335487939460823,
"grad_norm": 4.608914852142334,
"learning_rate": 1.7339581447361414e-05,
"loss": 2.6575,
"step": 5500
},
{
"epoch": 0.4729623206684534,
"grad_norm": 5.031886577606201,
"learning_rate": 1.707332658821024e-05,
"loss": 2.6423,
"step": 6000
},
{
"epoch": 0.5123758473908245,
"grad_norm": 4.503154277801514,
"learning_rate": 1.6807071729059057e-05,
"loss": 2.6062,
"step": 6500
},
{
"epoch": 0.5517893741131956,
"grad_norm": 3.5765438079833984,
"learning_rate": 1.654134937962618e-05,
"loss": 2.6487,
"step": 7000
},
{
"epoch": 0.5912029008355668,
"grad_norm": 3.3804619312286377,
"learning_rate": 1.6275094520475e-05,
"loss": 2.6378,
"step": 7500
},
{
"epoch": 0.6306164275579379,
"grad_norm": 4.173604965209961,
"learning_rate": 1.600883966132382e-05,
"loss": 2.5821,
"step": 8000
},
{
"epoch": 0.670029954280309,
"grad_norm": 3.0175459384918213,
"learning_rate": 1.5743117311890943e-05,
"loss": 2.5756,
"step": 8500
},
{
"epoch": 0.7094434810026801,
"grad_norm": 4.371723651885986,
"learning_rate": 1.5476862452739764e-05,
"loss": 2.5365,
"step": 9000
},
{
"epoch": 0.7488570077250513,
"grad_norm": 3.797525644302368,
"learning_rate": 1.5210607593588584e-05,
"loss": 2.5734,
"step": 9500
},
{
"epoch": 0.7882705344474223,
"grad_norm": 2.7907519340515137,
"learning_rate": 1.4944352734437404e-05,
"loss": 2.5801,
"step": 10000
},
{
"epoch": 0.8276840611697934,
"grad_norm": 3.6856374740600586,
"learning_rate": 1.4678097875286225e-05,
"loss": 2.5198,
"step": 10500
},
{
"epoch": 0.8670975878921646,
"grad_norm": 3.692336320877075,
"learning_rate": 1.4411843016135045e-05,
"loss": 2.5294,
"step": 11000
},
{
"epoch": 0.9065111146145357,
"grad_norm": 3.4679853916168213,
"learning_rate": 1.4145588156983866e-05,
"loss": 2.5818,
"step": 11500
},
{
"epoch": 0.9459246413369068,
"grad_norm": 3.318463087081909,
"learning_rate": 1.3879333297832686e-05,
"loss": 2.5145,
"step": 12000
},
{
"epoch": 0.985338168059278,
"grad_norm": 3.9721946716308594,
"learning_rate": 1.361361094839981e-05,
"loss": 2.5793,
"step": 12500
},
{
"epoch": 1.024751694781649,
"grad_norm": 4.008026123046875,
"learning_rate": 1.3347356089248631e-05,
"loss": 2.4809,
"step": 13000
},
{
"epoch": 1.0641652215040203,
"grad_norm": 2.818800926208496,
"learning_rate": 1.308110123009745e-05,
"loss": 2.4303,
"step": 13500
},
{
"epoch": 1.1035787482263912,
"grad_norm": 2.989525556564331,
"learning_rate": 1.281484637094627e-05,
"loss": 2.4062,
"step": 14000
},
{
"epoch": 1.1429922749487624,
"grad_norm": 3.570216178894043,
"learning_rate": 1.2548591511795092e-05,
"loss": 2.4559,
"step": 14500
},
{
"epoch": 1.1824058016711336,
"grad_norm": 3.4571452140808105,
"learning_rate": 1.2282336652643912e-05,
"loss": 2.4451,
"step": 15000
},
{
"epoch": 1.2218193283935046,
"grad_norm": 3.5280067920684814,
"learning_rate": 1.2016081793492733e-05,
"loss": 2.4488,
"step": 15500
},
{
"epoch": 1.2612328551158758,
"grad_norm": 3.444591999053955,
"learning_rate": 1.1749826934341553e-05,
"loss": 2.4949,
"step": 16000
},
{
"epoch": 1.3006463818382468,
"grad_norm": 3.746109962463379,
"learning_rate": 1.1484104584908674e-05,
"loss": 2.4157,
"step": 16500
},
{
"epoch": 1.340059908560618,
"grad_norm": 3.2765095233917236,
"learning_rate": 1.1217849725757498e-05,
"loss": 2.442,
"step": 17000
},
{
"epoch": 1.3794734352829892,
"grad_norm": 3.1070899963378906,
"learning_rate": 1.0951594866606317e-05,
"loss": 2.4619,
"step": 17500
},
{
"epoch": 1.4188869620053604,
"grad_norm": 3.6844067573547363,
"learning_rate": 1.0685340007455135e-05,
"loss": 2.4091,
"step": 18000
},
{
"epoch": 1.4583004887277313,
"grad_norm": 3.408829927444458,
"learning_rate": 1.0419085148303958e-05,
"loss": 2.4427,
"step": 18500
},
{
"epoch": 1.4977140154501025,
"grad_norm": 3.1087682247161865,
"learning_rate": 1.0152830289152778e-05,
"loss": 2.4273,
"step": 19000
},
{
"epoch": 1.5371275421724735,
"grad_norm": 3.229764223098755,
"learning_rate": 9.886575430001598e-06,
"loss": 2.4041,
"step": 19500
},
{
"epoch": 1.5765410688948447,
"grad_norm": 2.389808416366577,
"learning_rate": 9.620853080568721e-06,
"loss": 2.4336,
"step": 20000
},
{
"epoch": 1.6159545956172159,
"grad_norm": 3.6460657119750977,
"learning_rate": 9.354598221417541e-06,
"loss": 2.4151,
"step": 20500
},
{
"epoch": 1.655368122339587,
"grad_norm": 3.6286749839782715,
"learning_rate": 9.088343362266362e-06,
"loss": 2.4395,
"step": 21000
},
{
"epoch": 1.694781649061958,
"grad_norm": 3.662971258163452,
"learning_rate": 8.822088503115182e-06,
"loss": 2.4143,
"step": 21500
},
{
"epoch": 1.734195175784329,
"grad_norm": 3.5085411071777344,
"learning_rate": 8.555833643964004e-06,
"loss": 2.4443,
"step": 22000
},
{
"epoch": 1.7736087025067002,
"grad_norm": 2.7148663997650146,
"learning_rate": 8.289578784812823e-06,
"loss": 2.3879,
"step": 22500
},
{
"epoch": 1.8130222292290714,
"grad_norm": 3.083752155303955,
"learning_rate": 8.023323925661643e-06,
"loss": 2.3917,
"step": 23000
},
{
"epoch": 1.8524357559514426,
"grad_norm": 3.1884474754333496,
"learning_rate": 7.757069066510464e-06,
"loss": 2.3965,
"step": 23500
},
{
"epoch": 1.8918492826738138,
"grad_norm": 3.8930094242095947,
"learning_rate": 7.490814207359285e-06,
"loss": 2.3829,
"step": 24000
},
{
"epoch": 1.9312628093961848,
"grad_norm": 3.560189723968506,
"learning_rate": 7.225091857926408e-06,
"loss": 2.3792,
"step": 24500
},
{
"epoch": 1.9706763361185557,
"grad_norm": 3.2734220027923584,
"learning_rate": 6.958836998775228e-06,
"loss": 2.3858,
"step": 25000
}
],
"logging_steps": 500,
"max_steps": 38058,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1268,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 6626235285504000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}