combined-r1 / trainer_state.json
puneet1337's picture
Initial model upload
c09e6fc verified
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 7.7947882736156355,
"eval_steps": 200,
"global_step": 600,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.13029315960912052,
"grad_norm": 8.268109321594238,
"learning_rate": 1.1249999999999999e-05,
"loss": 8.118,
"step": 10
},
{
"epoch": 0.26058631921824105,
"grad_norm": 1.9696224927902222,
"learning_rate": 2.3749999999999998e-05,
"loss": 6.7018,
"step": 20
},
{
"epoch": 0.39087947882736157,
"grad_norm": 1.5390442609786987,
"learning_rate": 3.625e-05,
"loss": 5.7973,
"step": 30
},
{
"epoch": 0.5211726384364821,
"grad_norm": 1.8240673542022705,
"learning_rate": 4.875e-05,
"loss": 4.3519,
"step": 40
},
{
"epoch": 0.6514657980456026,
"grad_norm": 1.5940146446228027,
"learning_rate": 6.125e-05,
"loss": 2.7139,
"step": 50
},
{
"epoch": 0.7817589576547231,
"grad_norm": 1.8101792335510254,
"learning_rate": 7.374999999999999e-05,
"loss": 1.319,
"step": 60
},
{
"epoch": 0.9120521172638436,
"grad_norm": 1.423922061920166,
"learning_rate": 7.49486075532965e-05,
"loss": 0.8324,
"step": 70
},
{
"epoch": 1.0390879478827362,
"grad_norm": 1.1624470949172974,
"learning_rate": 7.477113549054449e-05,
"loss": 0.6864,
"step": 80
},
{
"epoch": 1.1693811074918568,
"grad_norm": 0.8140671253204346,
"learning_rate": 7.446754970319223e-05,
"loss": 0.6267,
"step": 90
},
{
"epoch": 1.2996742671009773,
"grad_norm": 0.9221881628036499,
"learning_rate": 7.403887742944632e-05,
"loss": 0.4194,
"step": 100
},
{
"epoch": 1.4299674267100977,
"grad_norm": 0.6408695578575134,
"learning_rate": 7.348656916058809e-05,
"loss": 0.3262,
"step": 110
},
{
"epoch": 1.5602605863192183,
"grad_norm": 0.5344031453132629,
"learning_rate": 7.281249373297033e-05,
"loss": 0.3679,
"step": 120
},
{
"epoch": 1.6905537459283386,
"grad_norm": 0.6845048666000366,
"learning_rate": 7.201893200446651e-05,
"loss": 0.3101,
"step": 130
},
{
"epoch": 1.8208469055374592,
"grad_norm": 0.5909339189529419,
"learning_rate": 7.110856913676987e-05,
"loss": 0.2775,
"step": 140
},
{
"epoch": 1.9511400651465798,
"grad_norm": 0.4650987684726715,
"learning_rate": 7.008448550965612e-05,
"loss": 0.275,
"step": 150
},
{
"epoch": 2.0781758957654723,
"grad_norm": 0.444742351770401,
"learning_rate": 6.89501462979534e-05,
"loss": 0.2893,
"step": 160
},
{
"epoch": 2.208469055374593,
"grad_norm": 0.42899981141090393,
"learning_rate": 6.770938974648747e-05,
"loss": 0.213,
"step": 170
},
{
"epoch": 2.3387622149837135,
"grad_norm": 0.4496869444847107,
"learning_rate": 6.636641418267628e-05,
"loss": 0.3031,
"step": 180
},
{
"epoch": 2.469055374592834,
"grad_norm": 0.5703123807907104,
"learning_rate": 6.492576381071889e-05,
"loss": 0.2808,
"step": 190
},
{
"epoch": 2.5993485342019547,
"grad_norm": 0.43668872117996216,
"learning_rate": 6.339231333544685e-05,
"loss": 0.3289,
"step": 200
},
{
"epoch": 2.729641693811075,
"grad_norm": 0.3623226583003998,
"learning_rate": 6.177125146786621e-05,
"loss": 0.2122,
"step": 210
},
{
"epoch": 2.8599348534201954,
"grad_norm": 0.35421496629714966,
"learning_rate": 6.0068063368201814e-05,
"loss": 0.2559,
"step": 220
},
{
"epoch": 2.990228013029316,
"grad_norm": 0.38672634959220886,
"learning_rate": 5.828851208585143e-05,
"loss": 0.2479,
"step": 230
},
{
"epoch": 3.1172638436482085,
"grad_norm": 0.2925184369087219,
"learning_rate": 5.643861905905087e-05,
"loss": 0.2245,
"step": 240
},
{
"epoch": 3.247557003257329,
"grad_norm": 0.3759796619415283,
"learning_rate": 5.4524643740232996e-05,
"loss": 0.184,
"step": 250
},
{
"epoch": 3.3778501628664497,
"grad_norm": 0.6650795340538025,
"learning_rate": 5.255306241602209e-05,
"loss": 0.1786,
"step": 260
},
{
"epoch": 3.5081433224755703,
"grad_norm": 0.31284600496292114,
"learning_rate": 5.053054629352992e-05,
"loss": 0.2012,
"step": 270
},
{
"epoch": 3.6384364820846904,
"grad_norm": 0.48433321714401245,
"learning_rate": 4.8463938927102625e-05,
"loss": 0.1715,
"step": 280
},
{
"epoch": 3.768729641693811,
"grad_norm": 0.41222795844078064,
"learning_rate": 4.6360233061899054e-05,
"loss": 0.17,
"step": 290
},
{
"epoch": 3.8990228013029316,
"grad_norm": 0.34344053268432617,
"learning_rate": 4.422654697265465e-05,
"loss": 0.1903,
"step": 300
},
{
"epoch": 4.026058631921824,
"grad_norm": 0.25037407875061035,
"learning_rate": 4.2070100377693023e-05,
"loss": 0.2105,
"step": 310
},
{
"epoch": 4.156351791530945,
"grad_norm": 0.402792364358902,
"learning_rate": 3.9898190009684456e-05,
"loss": 0.1178,
"step": 320
},
{
"epoch": 4.286644951140065,
"grad_norm": 0.5768423080444336,
"learning_rate": 3.7718164925812265e-05,
"loss": 0.1553,
"step": 330
},
{
"epoch": 4.416938110749186,
"grad_norm": 0.3002632260322571,
"learning_rate": 3.55374016408896e-05,
"loss": 0.1381,
"step": 340
},
{
"epoch": 4.547231270358306,
"grad_norm": 0.45716574788093567,
"learning_rate": 3.3363279167568306e-05,
"loss": 0.1134,
"step": 350
},
{
"epoch": 4.677524429967427,
"grad_norm": 0.47437748312950134,
"learning_rate": 3.120315404809586e-05,
"loss": 0.1067,
"step": 360
},
{
"epoch": 4.807817589576548,
"grad_norm": 0.680506706237793,
"learning_rate": 2.9064335462105067e-05,
"loss": 0.1564,
"step": 370
},
{
"epoch": 4.938110749185668,
"grad_norm": 0.41097283363342285,
"learning_rate": 2.6954060494663847e-05,
"loss": 0.108,
"step": 380
},
{
"epoch": 5.06514657980456,
"grad_norm": 0.48278936743736267,
"learning_rate": 2.4879469648270256e-05,
"loss": 0.123,
"step": 390
},
{
"epoch": 5.195439739413681,
"grad_norm": 0.7354271411895752,
"learning_rate": 2.2847582681652236e-05,
"loss": 0.0881,
"step": 400
},
{
"epoch": 5.3257328990228014,
"grad_norm": 0.40792593359947205,
"learning_rate": 2.0865274857126165e-05,
"loss": 0.0718,
"step": 410
},
{
"epoch": 5.456026058631922,
"grad_norm": 0.44886234402656555,
"learning_rate": 1.8939253676885473e-05,
"loss": 0.0942,
"step": 420
},
{
"epoch": 5.586319218241043,
"grad_norm": 0.432784765958786,
"learning_rate": 1.7076036186936484e-05,
"loss": 0.0876,
"step": 430
},
{
"epoch": 5.716612377850163,
"grad_norm": 0.20283620059490204,
"learning_rate": 1.52819269254776e-05,
"loss": 0.0416,
"step": 440
},
{
"epoch": 5.846905537459284,
"grad_norm": 0.47396963834762573,
"learning_rate": 1.3562996590337701e-05,
"loss": 0.0797,
"step": 450
},
{
"epoch": 5.977198697068404,
"grad_norm": 0.3657645285129547,
"learning_rate": 1.192506149765631e-05,
"loss": 0.0981,
"step": 460
},
{
"epoch": 6.1042345276872965,
"grad_norm": 0.34181857109069824,
"learning_rate": 1.0373663901310948e-05,
"loss": 0.044,
"step": 470
},
{
"epoch": 6.234527687296417,
"grad_norm": 0.484397828578949,
"learning_rate": 8.91405323968433e-06,
"loss": 0.0605,
"step": 480
},
{
"epoch": 6.364820846905538,
"grad_norm": 0.3345278799533844,
"learning_rate": 7.551168373226526e-06,
"loss": 0.0381,
"step": 490
},
{
"epoch": 6.495114006514658,
"grad_norm": 0.2914145290851593,
"learning_rate": 6.289620872914588e-06,
"loss": 0.0447,
"step": 500
},
{
"epoch": 6.625407166123779,
"grad_norm": 0.4786616563796997,
"learning_rate": 5.133679416156135e-06,
"loss": 0.0375,
"step": 510
},
{
"epoch": 6.755700325732899,
"grad_norm": 0.2902482748031616,
"learning_rate": 4.0872553429362075e-06,
"loss": 0.0727,
"step": 520
},
{
"epoch": 6.88599348534202,
"grad_norm": 0.28187865018844604,
"learning_rate": 3.153889421080841e-06,
"loss": 0.0685,
"step": 530
},
{
"epoch": 7.013029315960912,
"grad_norm": 0.24184636771678925,
"learning_rate": 2.3367398654195707e-06,
"loss": 0.055,
"step": 540
},
{
"epoch": 7.143322475570033,
"grad_norm": 0.364844411611557,
"learning_rate": 1.638571651386171e-06,
"loss": 0.0447,
"step": 550
},
{
"epoch": 7.273615635179153,
"grad_norm": 0.24921606481075287,
"learning_rate": 1.061747159216994e-06,
"loss": 0.0323,
"step": 560
},
{
"epoch": 7.403908794788274,
"grad_norm": 0.3031994700431824,
"learning_rate": 6.082181804040465e-07,
"loss": 0.0491,
"step": 570
},
{
"epoch": 7.534201954397394,
"grad_norm": 0.21111436188220978,
"learning_rate": 2.795193134504256e-07,
"loss": 0.0327,
"step": 580
},
{
"epoch": 7.664495114006515,
"grad_norm": 0.37282687425613403,
"learning_rate": 7.67627712747243e-08,
"loss": 0.049,
"step": 590
},
{
"epoch": 7.7947882736156355,
"grad_norm": 0.3026382029056549,
"learning_rate": 6.346178345911779e-10,
"loss": 0.0405,
"step": 600
}
],
"logging_steps": 10,
"max_steps": 600,
"num_input_tokens_seen": 0,
"num_train_epochs": 8,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1.804936549180113e+18,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}