Invalid JSON: Unexpected token 'N', ..."tio_var": NaN
},"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.00507000970431545, | |
| "eval_steps": 500, | |
| "global_step": 32, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0001584378032598578, | |
| "grad_norm": 6.941703031770885e-05, | |
| "learning_rate": 2.9998415465061005e-05, | |
| "loss": 0.0, | |
| "loss/policy_avg": 9.534414857625961e-08, | |
| "objective/entropy": 66.7407455444336, | |
| "objective/kl": 0.0, | |
| "objective/rlhf_reward": 1.9394512176513672, | |
| "objective/scores": 1.9393310546875, | |
| "policy/approxkl_avg": 0.0, | |
| "policy/clipfrac_avg": 0.0, | |
| "policy/entropy_avg": 0.501089334487915, | |
| "step": 1, | |
| "timer/calc_advantages": 1.838552713394165, | |
| "timer/calc_loss": 0.718826174736023, | |
| "timer/get_reward": 0.5057681202888489, | |
| "timer/training_step": 5.023805141448975, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0003168756065197156, | |
| "grad_norm": 8.962108612060547, | |
| "learning_rate": 2.999683093012201e-05, | |
| "loss": 0.0079, | |
| "loss/policy_avg": 0.007912077941000462, | |
| "objective/entropy": 58.23992919921875, | |
| "objective/kl": 0.006753697991371155, | |
| "objective/rlhf_reward": 2.58967924118042, | |
| "objective/scores": 2.59033203125, | |
| "policy/approxkl_avg": 0.143972247838974, | |
| "policy/clipfrac_avg": 0.376953125, | |
| "policy/entropy_avg": 0.437211811542511, | |
| "step": 2, | |
| "timer/calc_advantages": 1.693800687789917, | |
| "timer/calc_loss": 0.6173452734947205, | |
| "timer/get_reward": 0.431838721036911, | |
| "timer/training_step": 4.488475322723389, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0006933212280273, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0004753134097795734, | |
| "grad_norm": 8.80933666229248, | |
| "learning_rate": 2.9995246395183014e-05, | |
| "loss": 0.0087, | |
| "loss/policy_avg": 0.008671796880662441, | |
| "objective/entropy": 60.747886657714844, | |
| "objective/kl": 0.19268979132175446, | |
| "objective/rlhf_reward": 2.797236442565918, | |
| "objective/scores": 2.8162841796875, | |
| "policy/approxkl_avg": 0.22207684814929962, | |
| "policy/clipfrac_avg": 0.369140625, | |
| "policy/entropy_avg": 0.47127196192741394, | |
| "step": 3, | |
| "timer/calc_advantages": 2.117703437805176, | |
| "timer/calc_loss": 0.8592336773872375, | |
| "timer/get_reward": 0.5989187955856323, | |
| "timer/training_step": 5.804616928100586, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 0.9997344613075256, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0006337512130394313, | |
| "grad_norm": 15.397004127502441, | |
| "learning_rate": 2.999366186024402e-05, | |
| "loss": 0.0181, | |
| "loss/policy_avg": 0.018090050667524338, | |
| "objective/entropy": 59.366294860839844, | |
| "objective/kl": 0.16402865946292877, | |
| "objective/rlhf_reward": 2.7970707416534424, | |
| "objective/scores": 2.8134765625, | |
| "policy/approxkl_avg": 0.30915290117263794, | |
| "policy/clipfrac_avg": 0.41796875, | |
| "policy/entropy_avg": 0.4661800265312195, | |
| "step": 4, | |
| "timer/calc_advantages": 2.018901824951172, | |
| "timer/calc_loss": 0.8049512505531311, | |
| "timer/get_reward": 0.5547392964363098, | |
| "timer/training_step": 5.506021022796631, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0003275871276855, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.000792189016299289, | |
| "grad_norm": 6.137847231002524e-05, | |
| "learning_rate": 2.9992077325305024e-05, | |
| "loss": 0.0, | |
| "loss/policy_avg": 1.0040821507573128e-07, | |
| "objective/entropy": 61.850738525390625, | |
| "objective/kl": 0.0, | |
| "objective/rlhf_reward": 2.3658785820007324, | |
| "objective/scores": 2.36578369140625, | |
| "policy/approxkl_avg": 0.0, | |
| "policy/clipfrac_avg": 0.0, | |
| "policy/entropy_avg": 0.48403242230415344, | |
| "step": 5, | |
| "timer/calc_advantages": 1.889434576034546, | |
| "timer/calc_loss": 0.7489800453186035, | |
| "timer/get_reward": 0.5218558311462402, | |
| "timer/training_step": 5.19569206237793, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0009506268195591468, | |
| "grad_norm": 3.129560947418213, | |
| "learning_rate": 2.9990492790366028e-05, | |
| "loss": 0.002, | |
| "loss/policy_avg": 0.002019597217440605, | |
| "objective/entropy": 64.70206451416016, | |
| "objective/kl": -0.011846143752336502, | |
| "objective/rlhf_reward": 2.4429714679718018, | |
| "objective/scores": 2.44146728515625, | |
| "policy/approxkl_avg": 0.05796036496758461, | |
| "policy/clipfrac_avg": 0.224609375, | |
| "policy/entropy_avg": 0.4975966215133667, | |
| "step": 6, | |
| "timer/calc_advantages": 1.939255714416504, | |
| "timer/calc_loss": 0.7629643082618713, | |
| "timer/get_reward": 0.5286913514137268, | |
| "timer/training_step": 5.261943340301514, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0005271434783936, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0011090646228190046, | |
| "grad_norm": 4.28013277053833, | |
| "learning_rate": 2.9988908255427033e-05, | |
| "loss": 0.0037, | |
| "loss/policy_avg": 0.0036908092442899942, | |
| "objective/entropy": 68.02294158935547, | |
| "objective/kl": 0.15080219507217407, | |
| "objective/rlhf_reward": 3.149240016937256, | |
| "objective/scores": 3.1646728515625, | |
| "policy/approxkl_avg": 0.11497651040554047, | |
| "policy/clipfrac_avg": 0.25, | |
| "policy/entropy_avg": 0.5161693692207336, | |
| "step": 7, | |
| "timer/calc_advantages": 2.2946014404296875, | |
| "timer/calc_loss": 0.96665358543396, | |
| "timer/get_reward": 0.6638086438179016, | |
| "timer/training_step": 6.369439125061035, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 0.99953293800354, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0012675024260788625, | |
| "grad_norm": 8.370040893554688, | |
| "learning_rate": 2.9987323720488037e-05, | |
| "loss": 0.0078, | |
| "loss/policy_avg": 0.00783943198621273, | |
| "objective/entropy": 66.24089813232422, | |
| "objective/kl": 0.0030039921402931213, | |
| "objective/rlhf_reward": 3.1140072345733643, | |
| "objective/scores": 3.1142501831054688, | |
| "policy/approxkl_avg": 0.13499276340007782, | |
| "policy/clipfrac_avg": 0.345703125, | |
| "policy/entropy_avg": 0.4966353476047516, | |
| "step": 8, | |
| "timer/calc_advantages": 2.4299309253692627, | |
| "timer/calc_loss": 1.046256422996521, | |
| "timer/get_reward": 0.7162653207778931, | |
| "timer/training_step": 6.773098945617676, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0011930465698242, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0014259402293387202, | |
| "grad_norm": 5.985812094877474e-05, | |
| "learning_rate": 2.9985739185549042e-05, | |
| "loss": 0.0, | |
| "loss/policy_avg": 1.3085082173347473e-07, | |
| "objective/entropy": 57.91233825683594, | |
| "objective/kl": 0.0, | |
| "objective/rlhf_reward": 2.9793567657470703, | |
| "objective/scores": 2.9791259765625, | |
| "policy/approxkl_avg": 0.0, | |
| "policy/clipfrac_avg": 0.0, | |
| "policy/entropy_avg": 0.4229130744934082, | |
| "step": 9, | |
| "timer/calc_advantages": 2.2175822257995605, | |
| "timer/calc_loss": 0.940474271774292, | |
| "timer/get_reward": 0.6452049016952515, | |
| "timer/training_step": 6.192902565002441, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.001584378032598578, | |
| "grad_norm": 2.061887502670288, | |
| "learning_rate": 2.9984154650610046e-05, | |
| "loss": 0.0012, | |
| "loss/policy_avg": 0.00118393381126225, | |
| "objective/entropy": 57.78638458251953, | |
| "objective/kl": 0.016596710309386253, | |
| "objective/rlhf_reward": 2.493161916732788, | |
| "objective/scores": 2.494873046875, | |
| "policy/approxkl_avg": 0.04327381029725075, | |
| "policy/clipfrac_avg": 0.220703125, | |
| "policy/entropy_avg": 0.45412811636924744, | |
| "step": 10, | |
| "timer/calc_advantages": 1.9425208568572998, | |
| "timer/calc_loss": 0.7499428987503052, | |
| "timer/get_reward": 0.5237178802490234, | |
| "timer/training_step": 5.241105556488037, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0002281665802002, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0017428158358584358, | |
| "grad_norm": 3.437352180480957, | |
| "learning_rate": 2.998257011567105e-05, | |
| "loss": 0.003, | |
| "loss/policy_avg": 0.003013810608536005, | |
| "objective/entropy": 62.60158920288086, | |
| "objective/kl": 0.04900990426540375, | |
| "objective/rlhf_reward": 3.26180362701416, | |
| "objective/scores": 3.2667236328125, | |
| "policy/approxkl_avg": 0.08036807179450989, | |
| "policy/clipfrac_avg": 0.2734375, | |
| "policy/entropy_avg": 0.49370765686035156, | |
| "step": 11, | |
| "timer/calc_advantages": 2.010678768157959, | |
| "timer/calc_loss": 0.8044981956481934, | |
| "timer/get_reward": 0.5571302771568298, | |
| "timer/training_step": 5.486995220184326, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0001283884048462, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0019012536391182935, | |
| "grad_norm": 3.6680727005004883, | |
| "learning_rate": 2.9980985580732055e-05, | |
| "loss": 0.0026, | |
| "loss/policy_avg": 0.002624134300276637, | |
| "objective/entropy": 70.5216064453125, | |
| "objective/kl": 0.039649978280067444, | |
| "objective/rlhf_reward": 1.9454692602157593, | |
| "objective/scores": 1.94927978515625, | |
| "policy/approxkl_avg": 0.07172106206417084, | |
| "policy/clipfrac_avg": 0.34375, | |
| "policy/entropy_avg": 0.5483217239379883, | |
| "step": 12, | |
| "timer/calc_advantages": 1.708000898361206, | |
| "timer/calc_loss": 0.6391787528991699, | |
| "timer/get_reward": 0.4455040693283081, | |
| "timer/training_step": 4.587984085083008, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0003334283828735, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0020596914423781513, | |
| "grad_norm": 5.551970753003843e-05, | |
| "learning_rate": 2.997940104579306e-05, | |
| "loss": 0.0, | |
| "loss/policy_avg": 6.216578185558319e-08, | |
| "objective/entropy": 58.013153076171875, | |
| "objective/kl": 0.0, | |
| "objective/rlhf_reward": 1.9868602752685547, | |
| "objective/scores": 1.9864273071289062, | |
| "policy/approxkl_avg": 0.0, | |
| "policy/clipfrac_avg": 0.0, | |
| "policy/entropy_avg": 0.44822999835014343, | |
| "step": 13, | |
| "timer/calc_advantages": 2.0904719829559326, | |
| "timer/calc_loss": 0.8480838537216187, | |
| "timer/get_reward": 0.587557315826416, | |
| "timer/training_step": 5.7307024002075195, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.002218129245638009, | |
| "grad_norm": 2.7803940773010254, | |
| "learning_rate": 2.9977816510854064e-05, | |
| "loss": 0.0015, | |
| "loss/policy_avg": 0.0014500978868454695, | |
| "objective/entropy": 69.96898651123047, | |
| "objective/kl": 0.06745042651891708, | |
| "objective/rlhf_reward": 2.469928741455078, | |
| "objective/scores": 2.477020263671875, | |
| "policy/approxkl_avg": 0.053128380328416824, | |
| "policy/clipfrac_avg": 0.2734375, | |
| "policy/entropy_avg": 0.5138126015663147, | |
| "step": 14, | |
| "timer/calc_advantages": 1.917910099029541, | |
| "timer/calc_loss": 0.7225819230079651, | |
| "timer/get_reward": 0.5053159594535828, | |
| "timer/training_step": 5.0998334884643555, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 0.9998296499252319, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.002376567048897867, | |
| "grad_norm": 2.532649278640747, | |
| "learning_rate": 2.997623197591507e-05, | |
| "loss": 0.0018, | |
| "loss/policy_avg": 0.0017822147347033024, | |
| "objective/entropy": 58.39247131347656, | |
| "objective/kl": -0.011621108278632164, | |
| "objective/rlhf_reward": 2.397026538848877, | |
| "objective/scores": 2.39556884765625, | |
| "policy/approxkl_avg": 0.05379398912191391, | |
| "policy/clipfrac_avg": 0.248046875, | |
| "policy/entropy_avg": 0.4527440071105957, | |
| "step": 15, | |
| "timer/calc_advantages": 2.565062999725342, | |
| "timer/calc_loss": 1.1112425327301025, | |
| "timer/get_reward": 0.7521659731864929, | |
| "timer/training_step": 7.156440734863281, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0004982948303223, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.002535004852157725, | |
| "grad_norm": 5.747053623199463, | |
| "learning_rate": 2.9974647440976074e-05, | |
| "loss": 0.0039, | |
| "loss/policy_avg": 0.0039013477507978678, | |
| "objective/entropy": 58.41150665283203, | |
| "objective/kl": -0.03636426478624344, | |
| "objective/rlhf_reward": 2.3745107650756836, | |
| "objective/scores": 2.370819091796875, | |
| "policy/approxkl_avg": 0.07460636645555496, | |
| "policy/clipfrac_avg": 0.310546875, | |
| "policy/entropy_avg": 0.4354715943336487, | |
| "step": 16, | |
| "timer/calc_advantages": 2.135810136795044, | |
| "timer/calc_loss": 0.8651329278945923, | |
| "timer/get_reward": 0.5979053378105164, | |
| "timer/training_step": 5.837425231933594, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0008955001831055, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0026934426554175825, | |
| "grad_norm": 6.287374708335847e-05, | |
| "learning_rate": 2.9973062906037078e-05, | |
| "loss": 0.0, | |
| "loss/policy_avg": 9.161794878309593e-08, | |
| "objective/entropy": 71.59750366210938, | |
| "objective/kl": 0.0, | |
| "objective/rlhf_reward": 1.8474715948104858, | |
| "objective/scores": 1.847381591796875, | |
| "policy/approxkl_avg": 0.0, | |
| "policy/clipfrac_avg": 0.0, | |
| "policy/entropy_avg": 0.547500729560852, | |
| "step": 17, | |
| "timer/calc_advantages": 1.8003900051116943, | |
| "timer/calc_loss": 0.6777751445770264, | |
| "timer/get_reward": 0.47231921553611755, | |
| "timer/training_step": 4.813199996948242, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0028518804586774404, | |
| "grad_norm": 3.230647563934326, | |
| "learning_rate": 2.9971478371098086e-05, | |
| "loss": 0.0015, | |
| "loss/policy_avg": 0.001512572169303894, | |
| "objective/entropy": 65.72537994384766, | |
| "objective/kl": -0.01090591587126255, | |
| "objective/rlhf_reward": 2.5540518760681152, | |
| "objective/scores": 2.5531005859375, | |
| "policy/approxkl_avg": 0.045434024184942245, | |
| "policy/clipfrac_avg": 0.306640625, | |
| "policy/entropy_avg": 0.49296021461486816, | |
| "step": 18, | |
| "timer/calc_advantages": 2.015744686126709, | |
| "timer/calc_loss": 0.798870325088501, | |
| "timer/get_reward": 0.5532949566841125, | |
| "timer/training_step": 5.489305019378662, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.000435471534729, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0030103182619372983, | |
| "grad_norm": 3.198871374130249, | |
| "learning_rate": 2.996989383615909e-05, | |
| "loss": 0.0019, | |
| "loss/policy_avg": 0.001899047987535596, | |
| "objective/entropy": 57.27036666870117, | |
| "objective/kl": 0.023365572094917297, | |
| "objective/rlhf_reward": 2.107551336288452, | |
| "objective/scores": 2.1098785400390625, | |
| "policy/approxkl_avg": 0.05755549669265747, | |
| "policy/clipfrac_avg": 0.287109375, | |
| "policy/entropy_avg": 0.4354955852031708, | |
| "step": 19, | |
| "timer/calc_advantages": 1.9864205121994019, | |
| "timer/calc_loss": 0.770275354385376, | |
| "timer/get_reward": 0.5395585894584656, | |
| "timer/training_step": 5.340071678161621, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0002330541610718, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.003168756065197156, | |
| "grad_norm": 3.0474605560302734, | |
| "learning_rate": 2.9968309301220095e-05, | |
| "loss": 0.0023, | |
| "loss/policy_avg": 0.0022917778696864843, | |
| "objective/entropy": 63.32637023925781, | |
| "objective/kl": 0.09173699468374252, | |
| "objective/rlhf_reward": 2.2212512493133545, | |
| "objective/scores": 2.23046875, | |
| "policy/approxkl_avg": 0.07603129744529724, | |
| "policy/clipfrac_avg": 0.28125, | |
| "policy/entropy_avg": 0.4984070062637329, | |
| "step": 20, | |
| "timer/calc_advantages": 2.401278257369995, | |
| "timer/calc_loss": 1.0309439897537231, | |
| "timer/get_reward": 0.7035059928894043, | |
| "timer/training_step": 6.705676555633545, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 0.9999362230300903, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0033271938684570138, | |
| "grad_norm": 5.2485152991721407e-05, | |
| "learning_rate": 2.99667247662811e-05, | |
| "loss": 0.0, | |
| "loss/policy_avg": 1.1641532182693481e-07, | |
| "objective/entropy": 63.780208587646484, | |
| "objective/kl": 0.0, | |
| "objective/rlhf_reward": 2.867950916290283, | |
| "objective/scores": 2.8680419921875, | |
| "policy/approxkl_avg": 0.0, | |
| "policy/clipfrac_avg": 0.0, | |
| "policy/entropy_avg": 0.4937428832054138, | |
| "step": 21, | |
| "timer/calc_advantages": 1.9973487854003906, | |
| "timer/calc_loss": 0.799564003944397, | |
| "timer/get_reward": 0.5565282106399536, | |
| "timer/training_step": 5.468988418579102, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0034856316717168717, | |
| "grad_norm": 1.7639113664627075, | |
| "learning_rate": 2.9965140231342104e-05, | |
| "loss": 0.0008, | |
| "loss/policy_avg": 0.0008156594703905284, | |
| "objective/entropy": 53.04450225830078, | |
| "objective/kl": -0.02643105760216713, | |
| "objective/rlhf_reward": 3.207425355911255, | |
| "objective/scores": 3.205322265625, | |
| "policy/approxkl_avg": 0.033180996775627136, | |
| "policy/clipfrac_avg": 0.1875, | |
| "policy/entropy_avg": 0.40510252118110657, | |
| "step": 22, | |
| "timer/calc_advantages": 1.910763144493103, | |
| "timer/calc_loss": 0.7438158392906189, | |
| "timer/get_reward": 0.5141557455062866, | |
| "timer/training_step": 5.157877445220947, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0004686117172241, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0036440694749767296, | |
| "grad_norm": 2.1950137615203857, | |
| "learning_rate": 2.996355569640311e-05, | |
| "loss": 0.0009, | |
| "loss/policy_avg": 0.0009003256564028561, | |
| "objective/entropy": 62.4653205871582, | |
| "objective/kl": 0.001978884916752577, | |
| "objective/rlhf_reward": 2.033841848373413, | |
| "objective/scores": 2.0343017578125, | |
| "policy/approxkl_avg": 0.035770971328020096, | |
| "policy/clipfrac_avg": 0.25390625, | |
| "policy/entropy_avg": 0.47088196873664856, | |
| "step": 23, | |
| "timer/calc_advantages": 1.9022271633148193, | |
| "timer/calc_loss": 0.73871910572052, | |
| "timer/get_reward": 0.5146778225898743, | |
| "timer/training_step": 5.153467178344727, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.000369668006897, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.003802507278236587, | |
| "grad_norm": 3.9163379669189453, | |
| "learning_rate": 2.9961971161464113e-05, | |
| "loss": 0.0024, | |
| "loss/policy_avg": 0.0024069277569651604, | |
| "objective/entropy": 74.16690826416016, | |
| "objective/kl": -0.011084027588367462, | |
| "objective/rlhf_reward": 2.49955415725708, | |
| "objective/scores": 2.498291015625, | |
| "policy/approxkl_avg": 0.07602480053901672, | |
| "policy/clipfrac_avg": 0.37109375, | |
| "policy/entropy_avg": 0.5638661980628967, | |
| "step": 24, | |
| "timer/calc_advantages": 2.4937758445739746, | |
| "timer/calc_loss": 1.0552998781204224, | |
| "timer/get_reward": 0.7326261401176453, | |
| "timer/training_step": 6.899857521057129, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0005831718444824, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0039609450814964454, | |
| "grad_norm": 6.367493915604427e-05, | |
| "learning_rate": 2.9960386626525118e-05, | |
| "loss": 0.0, | |
| "loss/policy_avg": 9.551877155900002e-08, | |
| "objective/entropy": 63.46808624267578, | |
| "objective/kl": 0.0, | |
| "objective/rlhf_reward": 2.20194149017334, | |
| "objective/scores": 2.2020263671875, | |
| "policy/approxkl_avg": 0.0, | |
| "policy/clipfrac_avg": 0.0, | |
| "policy/entropy_avg": 0.49982160329818726, | |
| "step": 25, | |
| "timer/calc_advantages": 1.9283984899520874, | |
| "timer/calc_loss": 0.8220177888870239, | |
| "timer/get_reward": 0.5712042450904846, | |
| "timer/training_step": 5.475875377655029, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.0041193828847563025, | |
| "grad_norm": 2.6384997367858887, | |
| "learning_rate": 2.9958802091586123e-05, | |
| "loss": 0.0015, | |
| "loss/policy_avg": 0.001533568138256669, | |
| "objective/entropy": 63.77919006347656, | |
| "objective/kl": -0.0701940730214119, | |
| "objective/rlhf_reward": 2.2986488342285156, | |
| "objective/scores": 2.291473388671875, | |
| "policy/approxkl_avg": 0.04308713600039482, | |
| "policy/clipfrac_avg": 0.255859375, | |
| "policy/entropy_avg": 0.4809061884880066, | |
| "step": 26, | |
| "timer/calc_advantages": 2.43847918510437, | |
| "timer/calc_loss": 1.1137644052505493, | |
| "timer/get_reward": 0.7613001465797424, | |
| "timer/training_step": 7.051676273345947, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0008255243301392, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.00427782068801616, | |
| "grad_norm": 4.069683074951172, | |
| "learning_rate": 2.9957217556647127e-05, | |
| "loss": 0.0031, | |
| "loss/policy_avg": 0.0031379496213048697, | |
| "objective/entropy": 59.70353698730469, | |
| "objective/kl": 0.030867144465446472, | |
| "objective/rlhf_reward": 3.1007046699523926, | |
| "objective/scores": 3.103759765625, | |
| "policy/approxkl_avg": 0.08930616080760956, | |
| "policy/clipfrac_avg": 0.296875, | |
| "policy/entropy_avg": 0.45782268047332764, | |
| "step": 27, | |
| "timer/calc_advantages": 1.9838676452636719, | |
| "timer/calc_loss": 0.8435078263282776, | |
| "timer/get_reward": 0.5804767608642578, | |
| "timer/training_step": 5.597771167755127, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0001940727233887, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.004436258491276018, | |
| "grad_norm": 3.3112289905548096, | |
| "learning_rate": 2.995563302170813e-05, | |
| "loss": 0.0021, | |
| "loss/policy_avg": 0.0020556009840220213, | |
| "objective/entropy": 59.56258773803711, | |
| "objective/kl": 0.01491690892726183, | |
| "objective/rlhf_reward": 2.9051923751831055, | |
| "objective/scores": 2.9068603515625, | |
| "policy/approxkl_avg": 0.056783340871334076, | |
| "policy/clipfrac_avg": 0.318359375, | |
| "policy/entropy_avg": 0.4829668402671814, | |
| "step": 28, | |
| "timer/calc_advantages": 2.4110093116760254, | |
| "timer/calc_loss": 1.0960487127304077, | |
| "timer/get_reward": 0.7531113624572754, | |
| "timer/training_step": 6.956838607788086, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.000368356704712, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.004594696294535876, | |
| "grad_norm": 4.160216121817939e-05, | |
| "learning_rate": 2.9954048486769136e-05, | |
| "loss": 0.0, | |
| "loss/policy_avg": 7.753260433673859e-08, | |
| "objective/entropy": 61.242225646972656, | |
| "objective/kl": 0.0, | |
| "objective/rlhf_reward": 2.0377540588378906, | |
| "objective/scores": 2.037841796875, | |
| "policy/approxkl_avg": 0.0, | |
| "policy/clipfrac_avg": 0.0, | |
| "policy/entropy_avg": 0.49786943197250366, | |
| "step": 29, | |
| "timer/calc_advantages": 1.305631160736084, | |
| "timer/calc_loss": 0.46132537722587585, | |
| "timer/get_reward": 0.32864800095558167, | |
| "timer/training_step": 3.542546272277832, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.004753134097795734, | |
| "grad_norm": 2.1457343101501465, | |
| "learning_rate": 2.995246395183014e-05, | |
| "loss": 0.0008, | |
| "loss/policy_avg": 0.0008310286793857813, | |
| "objective/entropy": 60.64550018310547, | |
| "objective/kl": 0.0053929053246974945, | |
| "objective/rlhf_reward": 2.9084725379943848, | |
| "objective/scores": 2.908935546875, | |
| "policy/approxkl_avg": 0.03600907325744629, | |
| "policy/clipfrac_avg": 0.236328125, | |
| "policy/entropy_avg": 0.4702727794647217, | |
| "step": 30, | |
| "timer/calc_advantages": 1.9878807067871094, | |
| "timer/calc_loss": 0.8536985516548157, | |
| "timer/get_reward": 0.5830812454223633, | |
| "timer/training_step": 5.61873722076416, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0002671480178833, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.004911571901055592, | |
| "grad_norm": 7.465836048126221, | |
| "learning_rate": 2.9950879416891145e-05, | |
| "loss": 0.0027, | |
| "loss/policy_avg": 0.0026657686103135347, | |
| "objective/entropy": 55.14857482910156, | |
| "objective/kl": 0.03210607171058655, | |
| "objective/rlhf_reward": 3.1992266178131104, | |
| "objective/scores": 3.20263671875, | |
| "policy/approxkl_avg": 0.06627106666564941, | |
| "policy/clipfrac_avg": 0.263671875, | |
| "policy/entropy_avg": 0.4340878129005432, | |
| "step": 31, | |
| "timer/calc_advantages": 2.075843095779419, | |
| "timer/calc_loss": 0.8940950036048889, | |
| "timer/get_reward": 0.6188414096832275, | |
| "timer/training_step": 5.896753311157227, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0001206398010254, | |
| "val/ratio_var": NaN | |
| }, | |
| { | |
| "epoch": 0.00507000970431545, | |
| "grad_norm": 4.614927768707275, | |
| "learning_rate": 2.9949294881952146e-05, | |
| "loss": 0.0036, | |
| "loss/policy_avg": 0.003638375783339143, | |
| "objective/entropy": 63.83076477050781, | |
| "objective/kl": -0.020551415160298347, | |
| "objective/rlhf_reward": 2.031364917755127, | |
| "objective/scores": 2.029052734375, | |
| "policy/approxkl_avg": 0.08366862684488297, | |
| "policy/clipfrac_avg": 0.283203125, | |
| "policy/entropy_avg": 0.49368801712989807, | |
| "step": 32, | |
| "timer/calc_advantages": 1.9352046251296997, | |
| "timer/calc_loss": 0.7866313457489014, | |
| "timer/get_reward": 0.5527838468551636, | |
| "timer/training_step": 5.347667694091797, | |
| "val/num_eos_tokens": 0.0, | |
| "val/ratio": 1.0006513595581055, | |
| "val/ratio_var": NaN | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 18933, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 8, | |
| "total_flos": 1.6683704308432896e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |