| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.1909307875894988, | |
| "eval_steps": 10, | |
| "global_step": 80, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1075.9285888671875, | |
| "epoch": 0.002386634844868735, | |
| "grad_norm": 0.25947600514173863, | |
| "kl": 0.0, | |
| "learning_rate": 7.692307692307692e-08, | |
| "loss": -0.0203, | |
| "reward": 1.1196562051773071, | |
| "reward_std": 0.6183667778968811, | |
| "rewards/": 4.645899772644043, | |
| "rewards/math_compute_score": 0.2380952388048172, | |
| "step": 1 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 827.4761962890625, | |
| "epoch": 0.00477326968973747, | |
| "grad_norm": 0.34697911338033505, | |
| "kl": 0.0, | |
| "learning_rate": 1.5384615384615385e-07, | |
| "loss": 0.1454, | |
| "reward": 1.1186012029647827, | |
| "reward_std": 0.9155174493789673, | |
| "rewards/": 4.069196701049805, | |
| "rewards/math_compute_score": 0.380952388048172, | |
| "step": 2 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 720.5714721679688, | |
| "epoch": 0.007159904534606206, | |
| "grad_norm": 0.32782371953159684, | |
| "kl": 1.9550323486328125e-05, | |
| "learning_rate": 2.3076923076923078e-07, | |
| "loss": 0.1558, | |
| "reward": 0.7518136501312256, | |
| "reward_std": 0.8834309577941895, | |
| "rewards/": 3.473353862762451, | |
| "rewards/math_compute_score": 0.0714285746216774, | |
| "step": 3 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1059.40478515625, | |
| "epoch": 0.00954653937947494, | |
| "grad_norm": 0.2765682789441865, | |
| "kl": 2.300739288330078e-05, | |
| "learning_rate": 3.076923076923077e-07, | |
| "loss": 0.2423, | |
| "reward": 0.8247302770614624, | |
| "reward_std": 0.8945653438568115, | |
| "rewards/": 3.456984758377075, | |
| "rewards/math_compute_score": 0.1666666716337204, | |
| "step": 4 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 960.7380981445312, | |
| "epoch": 0.011933174224343675, | |
| "grad_norm": 0.33895220525238917, | |
| "kl": 4.792213439941406e-05, | |
| "learning_rate": 3.8461538461538463e-07, | |
| "loss": 0.1198, | |
| "reward": 0.4260934293270111, | |
| "reward_std": 1.0363324880599976, | |
| "rewards/": 2.797133684158325, | |
| "rewards/math_compute_score": -0.1666666716337204, | |
| "step": 5 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1122.1190185546875, | |
| "epoch": 0.014319809069212411, | |
| "grad_norm": 0.627130910533, | |
| "kl": 1.6808509826660156e-05, | |
| "learning_rate": 4.6153846153846156e-07, | |
| "loss": 0.1927, | |
| "reward": 0.5224749445915222, | |
| "reward_std": 1.1910170316696167, | |
| "rewards/": 2.231422185897827, | |
| "rewards/math_compute_score": 0.095238097012043, | |
| "step": 6 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1001.7857055664062, | |
| "epoch": 0.016706443914081145, | |
| "grad_norm": 0.403975286781731, | |
| "kl": 4.9114227294921875e-05, | |
| "learning_rate": 5.384615384615384e-07, | |
| "loss": 0.0551, | |
| "reward": 0.82244473695755, | |
| "reward_std": 0.849882960319519, | |
| "rewards/": 4.207461357116699, | |
| "rewards/math_compute_score": -0.02380952425301075, | |
| "step": 7 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1074.1190185546875, | |
| "epoch": 0.01909307875894988, | |
| "grad_norm": 0.2627348199882925, | |
| "kl": 4.673004150390625e-05, | |
| "learning_rate": 6.153846153846154e-07, | |
| "loss": 0.1452, | |
| "reward": 0.9726702570915222, | |
| "reward_std": 0.9153575301170349, | |
| "rewards/": 4.291922569274902, | |
| "rewards/math_compute_score": 0.1428571492433548, | |
| "step": 8 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 696.3095092773438, | |
| "epoch": 0.021479713603818614, | |
| "grad_norm": 0.33742159571899366, | |
| "kl": 2.8848648071289062e-05, | |
| "learning_rate": 6.923076923076922e-07, | |
| "loss": 0.0265, | |
| "reward": 0.965959906578064, | |
| "reward_std": 0.6284574270248413, | |
| "rewards/": 3.115513563156128, | |
| "rewards/math_compute_score": 0.4285714328289032, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.02386634844868735, | |
| "grad_norm": 0.35280633916428544, | |
| "learning_rate": 7.692307692307693e-07, | |
| "loss": 0.0653, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02386634844868735, | |
| "eval_clip_ratio": 0.0, | |
| "eval_completion_length": 1023.7500305175781, | |
| "eval_kl": 3.4868717193603516e-05, | |
| "eval_loss": 0.198669895529747, | |
| "eval_reward": 0.9118510186672211, | |
| "eval_reward_std": 0.8892157077789307, | |
| "eval_rewards/": 3.9640169739723206, | |
| "eval_rewards/math_compute_score": 0.1488095261156559, | |
| "eval_runtime": 82.3895, | |
| "eval_samples_per_second": 0.255, | |
| "eval_steps_per_second": 0.012, | |
| "step": 10 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 938.1309509277344, | |
| "epoch": 0.026252983293556086, | |
| "grad_norm": 0.30608376776798635, | |
| "kl": 4.845857620239258e-05, | |
| "learning_rate": 8.461538461538461e-07, | |
| "loss": 0.1056, | |
| "reward": 0.5514880195260048, | |
| "reward_std": 0.9235334098339081, | |
| "rewards/": 2.6145827770233154, | |
| "rewards/math_compute_score": 0.0357142835855484, | |
| "step": 11 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1085.9761962890625, | |
| "epoch": 0.028639618138424822, | |
| "grad_norm": 0.2661507893474065, | |
| "kl": 4.2438507080078125e-05, | |
| "learning_rate": 9.230769230769231e-07, | |
| "loss": 0.2348, | |
| "reward": 0.9119373559951782, | |
| "reward_std": 0.7280297875404358, | |
| "rewards/": 3.988258123397827, | |
| "rewards/math_compute_score": 0.1428571492433548, | |
| "step": 12 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1281.1190185546875, | |
| "epoch": 0.031026252983293555, | |
| "grad_norm": 0.26920466593905773, | |
| "kl": 3.2901763916015625e-05, | |
| "learning_rate": 1e-06, | |
| "loss": 0.1775, | |
| "reward": 0.6415899395942688, | |
| "reward_std": 0.9059087038040161, | |
| "rewards/": 3.493664026260376, | |
| "rewards/math_compute_score": -0.0714285746216774, | |
| "step": 13 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1072.7857666015625, | |
| "epoch": 0.03341288782816229, | |
| "grad_norm": 0.31831495486107775, | |
| "kl": 4.267692565917969e-05, | |
| "learning_rate": 9.99985031250522e-07, | |
| "loss": 0.3527, | |
| "reward": 0.7675060033798218, | |
| "reward_std": 1.03658926486969, | |
| "rewards/": 3.170863628387451, | |
| "rewards/math_compute_score": 0.1666666716337204, | |
| "step": 14 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1117.40478515625, | |
| "epoch": 0.03579952267303103, | |
| "grad_norm": 0.2987798632353295, | |
| "kl": 4.220008850097656e-05, | |
| "learning_rate": 9.999401258983425e-07, | |
| "loss": 0.1567, | |
| "reward": 0.8110119104385376, | |
| "reward_std": 0.8140324950218201, | |
| "rewards/": 3.293154716491699, | |
| "rewards/math_compute_score": 0.190476194024086, | |
| "step": 15 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1116.9285888671875, | |
| "epoch": 0.03818615751789976, | |
| "grad_norm": 0.2575689090925313, | |
| "kl": 4.506111145019531e-05, | |
| "learning_rate": 9.998652866321687e-07, | |
| "loss": 0.1441, | |
| "reward": 0.39009490609169006, | |
| "reward_std": 0.8612415194511414, | |
| "rewards/": 2.5219030380249023, | |
| "rewards/math_compute_score": -0.1428571492433548, | |
| "step": 16 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 905.047607421875, | |
| "epoch": 0.0405727923627685, | |
| "grad_norm": 0.3749791786039388, | |
| "kl": 6.341934204101562e-05, | |
| "learning_rate": 9.997605179330017e-07, | |
| "loss": 0.2651, | |
| "reward": 0.7404214143753052, | |
| "reward_std": 0.6791912317276001, | |
| "rewards/": 3.035440444946289, | |
| "rewards/math_compute_score": 0.1666666716337204, | |
| "step": 17 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 824.1666870117188, | |
| "epoch": 0.04295942720763723, | |
| "grad_norm": 0.32853762246024193, | |
| "kl": 2.7060508728027344e-05, | |
| "learning_rate": 9.996258260738674e-07, | |
| "loss": 0.3643, | |
| "reward": 0.9851795434951782, | |
| "reward_std": 0.7530649304389954, | |
| "rewards/": 3.592564344406128, | |
| "rewards/math_compute_score": 0.3333333432674408, | |
| "step": 18 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1304.6190185546875, | |
| "epoch": 0.045346062052505964, | |
| "grad_norm": 0.21651829402479825, | |
| "kl": 5.269050598144531e-05, | |
| "learning_rate": 9.994612191194405e-07, | |
| "loss": 0.0839, | |
| "reward": 0.5019060373306274, | |
| "reward_std": 1.0315256118774414, | |
| "rewards/": 2.7952444553375244, | |
| "rewards/math_compute_score": -0.0714285746216774, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.0477326968973747, | |
| "grad_norm": 0.26887161838116597, | |
| "learning_rate": 9.992667069255618e-07, | |
| "loss": 0.1969, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0477326968973747, | |
| "eval_clip_ratio": 0.0, | |
| "eval_completion_length": 1136.1309814453125, | |
| "eval_kl": 5.7637691497802734e-05, | |
| "eval_loss": 0.25490644574165344, | |
| "eval_reward": 0.8720096871256828, | |
| "eval_reward_std": 0.9841141104698181, | |
| "eval_rewards/": 3.83623868227005, | |
| "eval_rewards/math_compute_score": 0.13095238618552685, | |
| "eval_runtime": 82.545, | |
| "eval_samples_per_second": 0.254, | |
| "eval_steps_per_second": 0.012, | |
| "step": 20 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 981.9642639160156, | |
| "epoch": 0.050119331742243436, | |
| "grad_norm": 0.34449148369838256, | |
| "kl": 0.0001131296157836914, | |
| "learning_rate": 9.990423011386488e-07, | |
| "loss": 0.2118, | |
| "reward": 0.9287342429161072, | |
| "reward_std": 0.7616195380687714, | |
| "rewards/": 3.977004289627075, | |
| "rewards/math_compute_score": 0.16666667070239782, | |
| "step": 21 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 877.3095703125, | |
| "epoch": 0.05250596658711217, | |
| "grad_norm": 0.40398884988921724, | |
| "kl": 0.000102996826171875, | |
| "learning_rate": 9.987880151949975e-07, | |
| "loss": 0.1459, | |
| "reward": 1.012574553489685, | |
| "reward_std": 0.7148696780204773, | |
| "rewards/": 4.300967216491699, | |
| "rewards/math_compute_score": 0.190476194024086, | |
| "step": 22 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1151.3333740234375, | |
| "epoch": 0.05489260143198091, | |
| "grad_norm": 0.3204894614801331, | |
| "kl": 0.00017547607421875, | |
| "learning_rate": 9.985038643199778e-07, | |
| "loss": 0.2659, | |
| "reward": 0.4764881134033203, | |
| "reward_std": 0.9507336616516113, | |
| "rewards/": 2.763392925262451, | |
| "rewards/math_compute_score": -0.095238097012043, | |
| "step": 23 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1214.59521484375, | |
| "epoch": 0.057279236276849645, | |
| "grad_norm": 0.2915535040877492, | |
| "kl": 9.870529174804688e-05, | |
| "learning_rate": 9.981898655271234e-07, | |
| "loss": 0.2398, | |
| "reward": 0.5920386910438538, | |
| "reward_std": 0.9118747115135193, | |
| "rewards/": 3.5316221714019775, | |
| "rewards/math_compute_score": -0.1428571492433548, | |
| "step": 24 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1111.761962890625, | |
| "epoch": 0.059665871121718374, | |
| "grad_norm": 0.3966341374575089, | |
| "kl": 0.00014209747314453125, | |
| "learning_rate": 9.978460376171112e-07, | |
| "loss": 0.074, | |
| "reward": 0.5987061262130737, | |
| "reward_std": 0.8957276344299316, | |
| "rewards/": 2.6125779151916504, | |
| "rewards/math_compute_score": 0.095238097012043, | |
| "step": 25 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 937.5238037109375, | |
| "epoch": 0.06205250596658711, | |
| "grad_norm": 0.24086304315962492, | |
| "kl": 9.107589721679688e-05, | |
| "learning_rate": 9.974724011766361e-07, | |
| "loss": 0.1505, | |
| "reward": 1.1114305257797241, | |
| "reward_std": 0.7139089107513428, | |
| "rewards/": 3.8428664207458496, | |
| "rewards/math_compute_score": 0.4285714328289032, | |
| "step": 26 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 806.90478515625, | |
| "epoch": 0.06443914081145585, | |
| "grad_norm": 0.36587422053686375, | |
| "kl": 0.00021839141845703125, | |
| "learning_rate": 9.970689785771798e-07, | |
| "loss": 0.335, | |
| "reward": 1.220070719718933, | |
| "reward_std": 0.8377094268798828, | |
| "rewards/": 4.862258434295654, | |
| "rewards/math_compute_score": 0.3095238208770752, | |
| "step": 27 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 840.7857055664062, | |
| "epoch": 0.06682577565632458, | |
| "grad_norm": 1.1836785865916923, | |
| "kl": 0.00023555755615234375, | |
| "learning_rate": 9.96635793973669e-07, | |
| "loss": 0.3326, | |
| "reward": 1.1378720998764038, | |
| "reward_std": 0.8822279572486877, | |
| "rewards/": 4.356026649475098, | |
| "rewards/math_compute_score": 0.3333333432674408, | |
| "step": 28 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 894.5952758789062, | |
| "epoch": 0.06921241050119331, | |
| "grad_norm": 0.2976095645322702, | |
| "kl": 0.0002040863037109375, | |
| "learning_rate": 9.961728733030316e-07, | |
| "loss": 0.1709, | |
| "reward": 0.7952183485031128, | |
| "reward_std": 0.8282038569450378, | |
| "rewards/": 3.0237107276916504, | |
| "rewards/math_compute_score": 0.2380952388048172, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.07159904534606205, | |
| "grad_norm": 0.31922105827783287, | |
| "learning_rate": 9.956802442826415e-07, | |
| "loss": 0.1756, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.07159904534606205, | |
| "eval_clip_ratio": 0.0, | |
| "eval_completion_length": 1086.5595245361328, | |
| "eval_kl": 0.00028777122497558594, | |
| "eval_loss": 0.11829498410224915, | |
| "eval_reward": 0.9524402394890785, | |
| "eval_reward_std": 0.8144673109054565, | |
| "eval_rewards/": 3.9764870405197144, | |
| "eval_rewards/math_compute_score": 0.1964285746216774, | |
| "eval_runtime": 83.0591, | |
| "eval_samples_per_second": 0.253, | |
| "eval_steps_per_second": 0.012, | |
| "step": 30 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 918.5238037109375, | |
| "epoch": 0.07398568019093078, | |
| "grad_norm": 0.3688063540106728, | |
| "kl": 0.00035762786865234375, | |
| "learning_rate": 9.951579364086603e-07, | |
| "loss": 0.3767, | |
| "reward": 0.8680548071861267, | |
| "reward_std": 0.8301426470279694, | |
| "rewards/": 3.2926549911499023, | |
| "rewards/math_compute_score": 0.2619047649204731, | |
| "step": 31 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1212.3809814453125, | |
| "epoch": 0.07637231503579953, | |
| "grad_norm": 0.2490900765535506, | |
| "kl": 0.0002994537353515625, | |
| "learning_rate": 9.946059809542706e-07, | |
| "loss": 0.1623, | |
| "reward": 0.7525901794433594, | |
| "reward_std": 0.8568804860115051, | |
| "rewards/": 3.477236747741699, | |
| "rewards/math_compute_score": 0.0714285746216774, | |
| "step": 32 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 725.40478515625, | |
| "epoch": 0.07875894988066826, | |
| "grad_norm": 0.39900214613105317, | |
| "kl": 0.000522613525390625, | |
| "learning_rate": 9.940244109678041e-07, | |
| "loss": 0.0071, | |
| "reward": 1.2588740587234497, | |
| "reward_std": 0.6066949367523193, | |
| "rewards/": 5.0562744140625, | |
| "rewards/math_compute_score": 0.3095238208770752, | |
| "step": 33 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 698.9761962890625, | |
| "epoch": 0.081145584725537, | |
| "grad_norm": 0.3908939527833547, | |
| "kl": 0.000396728515625, | |
| "learning_rate": 9.93413261270763e-07, | |
| "loss": 0.268, | |
| "reward": 1.276125431060791, | |
| "reward_std": 0.6631749868392944, | |
| "rewards/": 3.8091983795166016, | |
| "rewards/math_compute_score": 0.6428571343421936, | |
| "step": 34 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 908.0238037109375, | |
| "epoch": 0.08353221957040573, | |
| "grad_norm": 0.3377115570250511, | |
| "kl": 0.000377655029296875, | |
| "learning_rate": 9.927725684557339e-07, | |
| "loss": 0.1234, | |
| "reward": 1.2422432899475098, | |
| "reward_std": 0.6443389654159546, | |
| "rewards/": 5.163597583770752, | |
| "rewards/math_compute_score": 0.261904776096344, | |
| "step": 35 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1112.8095703125, | |
| "epoch": 0.08591885441527446, | |
| "grad_norm": 0.23706974723352947, | |
| "kl": 0.000385284423828125, | |
| "learning_rate": 9.921023708841973e-07, | |
| "loss": -0.1353, | |
| "reward": 0.601748526096344, | |
| "reward_std": 0.5709776282310486, | |
| "rewards/": 3.294456958770752, | |
| "rewards/math_compute_score": -0.0714285746216774, | |
| "step": 36 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 867.7619018554688, | |
| "epoch": 0.0883054892601432, | |
| "grad_norm": 0.3434544369292518, | |
| "kl": 0.00067138671875, | |
| "learning_rate": 9.914027086842322e-07, | |
| "loss": 0.0705, | |
| "reward": 1.0254464149475098, | |
| "reward_std": 1.1037424802780151, | |
| "rewards/": 3.5081846714019775, | |
| "rewards/math_compute_score": 0.4047619104385376, | |
| "step": 37 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 792.3095092773438, | |
| "epoch": 0.09069212410501193, | |
| "grad_norm": 0.3791096432652416, | |
| "kl": 0.000576019287109375, | |
| "learning_rate": 9.906736237481108e-07, | |
| "loss": 0.1116, | |
| "reward": 1.2115607261657715, | |
| "reward_std": 0.8592520356178284, | |
| "rewards/": 4.153041362762451, | |
| "rewards/math_compute_score": 0.4761904776096344, | |
| "step": 38 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 772.5714721679688, | |
| "epoch": 0.09307875894988067, | |
| "grad_norm": 0.3544290113443038, | |
| "kl": 0.000850677490234375, | |
| "learning_rate": 9.899151597297922e-07, | |
| "loss": 0.3191, | |
| "reward": 1.139400839805603, | |
| "reward_std": 0.6882633566856384, | |
| "rewards/": 3.9827184677124023, | |
| "rewards/math_compute_score": 0.4285714328289032, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.0954653937947494, | |
| "grad_norm": 0.42043440113349795, | |
| "learning_rate": 9.891273620423082e-07, | |
| "loss": 0.1347, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0954653937947494, | |
| "eval_clip_ratio": 0.0, | |
| "eval_completion_length": 1038.15478515625, | |
| "eval_kl": 0.0018901824951171875, | |
| "eval_loss": 0.12366585433483124, | |
| "eval_reward": 1.030681535601616, | |
| "eval_reward_std": 0.8943771868944168, | |
| "eval_rewards/": 4.439121901988983, | |
| "eval_rewards/math_compute_score": 0.17857143329456449, | |
| "eval_runtime": 82.5285, | |
| "eval_samples_per_second": 0.254, | |
| "eval_steps_per_second": 0.012, | |
| "step": 40 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 908.3333435058594, | |
| "epoch": 0.09785202863961814, | |
| "grad_norm": 0.25194007930707885, | |
| "kl": 0.0010833740234375, | |
| "learning_rate": 9.883102778550434e-07, | |
| "loss": 0.2062, | |
| "reward": 1.3001837134361267, | |
| "reward_std": 0.7053222358226776, | |
| "rewards/": 4.977108955383301, | |
| "rewards/math_compute_score": 0.380952388048172, | |
| "step": 41 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 900.2619018554688, | |
| "epoch": 0.10023866348448687, | |
| "grad_norm": 0.32787405490701843, | |
| "kl": 0.0010986328125, | |
| "learning_rate": 9.874639560909118e-07, | |
| "loss": 0.0606, | |
| "reward": 1.039421796798706, | |
| "reward_std": 0.8191918730735779, | |
| "rewards/": 3.197108745574951, | |
| "rewards/math_compute_score": 0.5, | |
| "step": 42 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 917.90478515625, | |
| "epoch": 0.1026252983293556, | |
| "grad_norm": 0.5163718395183062, | |
| "kl": 0.00128936767578125, | |
| "learning_rate": 9.865884474234275e-07, | |
| "loss": 0.2818, | |
| "reward": 1.222414493560791, | |
| "reward_std": 0.7349604368209839, | |
| "rewards/": 4.493024826049805, | |
| "rewards/math_compute_score": 0.4047619104385376, | |
| "step": 43 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 690.2857055664062, | |
| "epoch": 0.10501193317422435, | |
| "grad_norm": 0.3207693941377101, | |
| "kl": 0.0008087158203125, | |
| "learning_rate": 9.856838042736696e-07, | |
| "loss": 0.1025, | |
| "reward": 1.2743303775787354, | |
| "reward_std": 0.7797837257385254, | |
| "rewards/": 4.46688985824585, | |
| "rewards/math_compute_score": 0.4761904776096344, | |
| "step": 44 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 676.0714111328125, | |
| "epoch": 0.10739856801909307, | |
| "grad_norm": 0.28894735083744694, | |
| "kl": 0.0009918212890625, | |
| "learning_rate": 9.847500808071456e-07, | |
| "loss": 0.1353, | |
| "reward": 1.2259044647216797, | |
| "reward_std": 0.7126508355140686, | |
| "rewards/": 3.558094024658203, | |
| "rewards/math_compute_score": 0.6428571343421936, | |
| "step": 45 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 775.40478515625, | |
| "epoch": 0.10978520286396182, | |
| "grad_norm": 0.27186613080235555, | |
| "kl": 0.000629425048828125, | |
| "learning_rate": 9.837873329305457e-07, | |
| "loss": 0.1791, | |
| "reward": 1.423451542854309, | |
| "reward_std": 0.6115496754646301, | |
| "rewards/": 4.545828819274902, | |
| "rewards/math_compute_score": 0.6428571343421936, | |
| "step": 46 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 1084.90478515625, | |
| "epoch": 0.11217183770883055, | |
| "grad_norm": 0.2663709639869927, | |
| "kl": 0.001007080078125, | |
| "learning_rate": 9.82795618288397e-07, | |
| "loss": 0.1393, | |
| "reward": 0.9681082367897034, | |
| "reward_std": 0.7282014489173889, | |
| "rewards/": 4.173874855041504, | |
| "rewards/math_compute_score": 0.1666666716337204, | |
| "step": 47 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 844.6428833007812, | |
| "epoch": 0.11455847255369929, | |
| "grad_norm": 0.30223981125620725, | |
| "kl": 0.00147247314453125, | |
| "learning_rate": 9.817749962596114e-07, | |
| "loss": 0.18, | |
| "reward": 1.0321242809295654, | |
| "reward_std": 0.8400474190711975, | |
| "rewards/": 4.208240509033203, | |
| "rewards/math_compute_score": 0.2380952388048172, | |
| "step": 48 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 560.5238037109375, | |
| "epoch": 0.11694510739856802, | |
| "grad_norm": 0.4242899512098847, | |
| "kl": 0.00177764892578125, | |
| "learning_rate": 9.807255279539312e-07, | |
| "loss": 0.1896, | |
| "reward": 1.313750982284546, | |
| "reward_std": 0.46607664227485657, | |
| "rewards/": 4.283040523529053, | |
| "rewards/math_compute_score": 0.5714285969734192, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.11933174224343675, | |
| "grad_norm": 0.348729238348003, | |
| "learning_rate": 9.796472762082685e-07, | |
| "loss": 0.2283, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.11933174224343675, | |
| "eval_clip_ratio": 0.0, | |
| "eval_completion_length": 933.5654907226562, | |
| "eval_kl": 0.0038604736328125, | |
| "eval_loss": 0.1660795509815216, | |
| "eval_reward": 1.0914957970380783, | |
| "eval_reward_std": 0.7910807579755783, | |
| "eval_rewards/": 4.457478940486908, | |
| "eval_rewards/math_compute_score": 0.25000000558793545, | |
| "eval_runtime": 81.0284, | |
| "eval_samples_per_second": 0.259, | |
| "eval_steps_per_second": 0.012, | |
| "step": 50 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 785.5595397949219, | |
| "epoch": 0.12171837708830549, | |
| "grad_norm": 0.27596087991963414, | |
| "kl": 0.001678466796875, | |
| "learning_rate": 9.785403055829448e-07, | |
| "loss": -0.001, | |
| "reward": 1.2578682899475098, | |
| "reward_std": 0.7686284184455872, | |
| "rewards/": 4.384579658508301, | |
| "rewards/math_compute_score": 0.4761904776096344, | |
| "step": 51 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 916.0714721679688, | |
| "epoch": 0.12410501193317422, | |
| "grad_norm": 0.2741628390919074, | |
| "kl": 0.00164794921875, | |
| "learning_rate": 9.77404682357824e-07, | |
| "loss": 0.1083, | |
| "reward": 1.1715024709701538, | |
| "reward_std": 0.6183907985687256, | |
| "rewards/": 4.333702564239502, | |
| "rewards/math_compute_score": 0.380952388048172, | |
| "step": 52 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 819.5714721679688, | |
| "epoch": 0.12649164677804295, | |
| "grad_norm": 0.35526991630751986, | |
| "kl": 0.0020904541015625, | |
| "learning_rate": 9.762404745283437e-07, | |
| "loss": 0.2932, | |
| "reward": 1.2613282203674316, | |
| "reward_std": 0.5622918605804443, | |
| "rewards/": 4.497117042541504, | |
| "rewards/math_compute_score": 0.4523809552192688, | |
| "step": 53 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 878.7857055664062, | |
| "epoch": 0.1288782816229117, | |
| "grad_norm": 0.5981296635875077, | |
| "kl": 0.00194549560546875, | |
| "learning_rate": 9.75047751801446e-07, | |
| "loss": 0.2455, | |
| "reward": 1.058624029159546, | |
| "reward_std": 0.8494831323623657, | |
| "rewards/": 4.2455010414123535, | |
| "rewards/math_compute_score": 0.261904776096344, | |
| "step": 54 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 753.7380981445312, | |
| "epoch": 0.13126491646778043, | |
| "grad_norm": 0.2280494532860685, | |
| "kl": 0.000865936279296875, | |
| "learning_rate": 9.738265855914012e-07, | |
| "loss": -0.0038, | |
| "reward": 1.4897369146347046, | |
| "reward_std": 0.6451947689056396, | |
| "rewards/": 4.972493648529053, | |
| "rewards/math_compute_score": 0.6190476417541504, | |
| "step": 55 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 732.0, | |
| "epoch": 0.13365155131264916, | |
| "grad_norm": 0.3162020489120827, | |
| "kl": 0.0020599365234375, | |
| "learning_rate": 9.725770490155338e-07, | |
| "loss": 0.2325, | |
| "reward": 1.3167736530303955, | |
| "reward_std": 0.5749982595443726, | |
| "rewards/": 4.393392086029053, | |
| "rewards/math_compute_score": 0.5476190447807312, | |
| "step": 56 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 780.0238037109375, | |
| "epoch": 0.1360381861575179, | |
| "grad_norm": 0.5756772020485215, | |
| "kl": 0.001495361328125, | |
| "learning_rate": 9.712992168898435e-07, | |
| "loss": 0.1519, | |
| "reward": 1.3120629787445068, | |
| "reward_std": 0.6641415953636169, | |
| "rewards/": 5.226981163024902, | |
| "rewards/math_compute_score": 0.3333333432674408, | |
| "step": 57 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 556.5, | |
| "epoch": 0.13842482100238662, | |
| "grad_norm": 0.4844983583796039, | |
| "kl": 0.0034942626953125, | |
| "learning_rate": 9.699931657245263e-07, | |
| "loss": -0.0877, | |
| "reward": 1.2563197612762451, | |
| "reward_std": 0.745265543460846, | |
| "rewards/": 4.376836776733398, | |
| "rewards/math_compute_score": 0.4761904776096344, | |
| "step": 58 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 635.2380981445312, | |
| "epoch": 0.14081145584725538, | |
| "grad_norm": 0.4758034742756555, | |
| "kl": 0.0026397705078125, | |
| "learning_rate": 9.686589737193928e-07, | |
| "loss": 0.053, | |
| "reward": 1.0557314157485962, | |
| "reward_std": 1.0078890323638916, | |
| "rewards/": 3.754848003387451, | |
| "rewards/math_compute_score": 0.380952388048172, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.1431980906921241, | |
| "grad_norm": 0.3072937144155207, | |
| "learning_rate": 9.67296720759187e-07, | |
| "loss": 0.0474, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.1431980906921241, | |
| "eval_clip_ratio": 0.0, | |
| "eval_completion_length": 837.0000152587891, | |
| "eval_kl": 0.013916015625, | |
| "eval_loss": 0.11870573461055756, | |
| "eval_reward": 1.0395183265209198, | |
| "eval_reward_std": 0.7884587794542313, | |
| "eval_rewards/": 4.197591245174408, | |
| "eval_rewards/math_compute_score": 0.2500000111758709, | |
| "eval_runtime": 79.2917, | |
| "eval_samples_per_second": 0.265, | |
| "eval_steps_per_second": 0.013, | |
| "step": 60 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 674.9404907226562, | |
| "epoch": 0.14558472553699284, | |
| "grad_norm": 0.4086529116673979, | |
| "kl": 0.00350189208984375, | |
| "learning_rate": 9.659064884088016e-07, | |
| "loss": 0.1354, | |
| "reward": 1.1227608919143677, | |
| "reward_std": 0.6351146399974823, | |
| "rewards/": 4.042375802993774, | |
| "rewards/math_compute_score": 0.3928571492433548, | |
| "step": 61 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 730.952392578125, | |
| "epoch": 0.14797136038186157, | |
| "grad_norm": 0.33492026311818307, | |
| "kl": 0.0020599365234375, | |
| "learning_rate": 9.644883599083957e-07, | |
| "loss": -0.0413, | |
| "reward": 1.2523438930511475, | |
| "reward_std": 0.5973415374755859, | |
| "rewards/": 5.023623466491699, | |
| "rewards/math_compute_score": 0.3095238208770752, | |
| "step": 62 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 591.1190795898438, | |
| "epoch": 0.15035799522673032, | |
| "grad_norm": 0.30746445342498774, | |
| "kl": 0.00110626220703125, | |
| "learning_rate": 9.630424201684103e-07, | |
| "loss": -0.024, | |
| "reward": 1.3001348972320557, | |
| "reward_std": 0.5841225385665894, | |
| "rewards/": 4.4054365158081055, | |
| "rewards/math_compute_score": 0.523809552192688, | |
| "step": 63 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 739.0714721679688, | |
| "epoch": 0.15274463007159905, | |
| "grad_norm": 0.33486655671684584, | |
| "kl": 0.0015411376953125, | |
| "learning_rate": 9.615687557644848e-07, | |
| "loss": 0.1172, | |
| "reward": 1.4695312976837158, | |
| "reward_std": 0.6374803185462952, | |
| "rewards/": 4.871465682983398, | |
| "rewards/math_compute_score": 0.6190476417541504, | |
| "step": 64 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 820.4285888671875, | |
| "epoch": 0.15513126491646778, | |
| "grad_norm": 0.7636558012245415, | |
| "kl": 0.0037994384765625, | |
| "learning_rate": 9.600674549322716e-07, | |
| "loss": 0.2518, | |
| "reward": 1.0687873363494873, | |
| "reward_std": 0.8015285730361938, | |
| "rewards/": 3.534412384033203, | |
| "rewards/math_compute_score": 0.4523809552192688, | |
| "step": 65 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 991.5238037109375, | |
| "epoch": 0.1575178997613365, | |
| "grad_norm": 0.24283471181996055, | |
| "kl": 0.00176239013671875, | |
| "learning_rate": 9.585386075621552e-07, | |
| "loss": 0.1276, | |
| "reward": 1.4505395889282227, | |
| "reward_std": 0.3715428113937378, | |
| "rewards/": 5.157459259033203, | |
| "rewards/math_compute_score": 0.523809552192688, | |
| "step": 66 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 666.2857055664062, | |
| "epoch": 0.15990453460620524, | |
| "grad_norm": 0.32163982732892316, | |
| "kl": 0.003021240234375, | |
| "learning_rate": 9.569823051938689e-07, | |
| "loss": 0.3253, | |
| "reward": 1.4773437976837158, | |
| "reward_std": 0.6230893731117249, | |
| "rewards/": 4.339099884033203, | |
| "rewards/math_compute_score": 0.761904776096344, | |
| "step": 67 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 603.547607421875, | |
| "epoch": 0.162291169451074, | |
| "grad_norm": 0.5401807083101557, | |
| "kl": 0.00982666015625, | |
| "learning_rate": 9.553986410110134e-07, | |
| "loss": 0.4025, | |
| "reward": 0.9949312210083008, | |
| "reward_std": 0.4883081316947937, | |
| "rewards/": 3.641322612762451, | |
| "rewards/math_compute_score": 0.3333333432674408, | |
| "step": 68 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 857.6190795898438, | |
| "epoch": 0.16467780429594273, | |
| "grad_norm": 0.2558130935281063, | |
| "kl": 0.00057220458984375, | |
| "learning_rate": 9.537877098354784e-07, | |
| "loss": -0.0058, | |
| "reward": 1.5268882513046265, | |
| "reward_std": 0.5736187696456909, | |
| "rewards/": 5.34872579574585, | |
| "rewards/math_compute_score": 0.5714285969734192, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.16706443914081145, | |
| "grad_norm": 0.27521723291274197, | |
| "learning_rate": 9.52149608121765e-07, | |
| "loss": 0.0567, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.16706443914081145, | |
| "eval_clip_ratio": 0.0, | |
| "eval_completion_length": 882.9702453613281, | |
| "eval_kl": 0.03376007080078125, | |
| "eval_loss": 0.26883435249328613, | |
| "eval_reward": 1.151841551065445, | |
| "eval_reward_std": 0.8478920459747314, | |
| "eval_rewards/": 4.473493456840515, | |
| "eval_rewards/math_compute_score": 0.3214285783469677, | |
| "eval_runtime": 78.4604, | |
| "eval_samples_per_second": 0.268, | |
| "eval_steps_per_second": 0.013, | |
| "step": 70 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 712.1309509277344, | |
| "epoch": 0.16945107398568018, | |
| "grad_norm": 0.286488510896718, | |
| "kl": 0.00177001953125, | |
| "learning_rate": 9.504844339512094e-07, | |
| "loss": 0.2365, | |
| "reward": 1.5305490493774414, | |
| "reward_std": 0.5794989466667175, | |
| "rewards/": 5.12893533706665, | |
| "rewards/math_compute_score": 0.6309524029493332, | |
| "step": 71 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 807.7857055664062, | |
| "epoch": 0.1718377088305489, | |
| "grad_norm": 0.3110498028785089, | |
| "kl": 0.0013275146484375, | |
| "learning_rate": 9.487922870261121e-07, | |
| "loss": -0.0117, | |
| "reward": 1.6091006994247437, | |
| "reward_std": 0.5738370418548584, | |
| "rewards/": 5.188360214233398, | |
| "rewards/math_compute_score": 0.7142857313156128, | |
| "step": 72 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 771.6666870117188, | |
| "epoch": 0.17422434367541767, | |
| "grad_norm": 0.28968592215813294, | |
| "kl": 0.0019989013671875, | |
| "learning_rate": 9.470732686637664e-07, | |
| "loss": 0.0653, | |
| "reward": 1.406017541885376, | |
| "reward_std": 0.7840931415557861, | |
| "rewards/": 5.220563888549805, | |
| "rewards/math_compute_score": 0.4523809552192688, | |
| "step": 73 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 776.0238037109375, | |
| "epoch": 0.1766109785202864, | |
| "grad_norm": 0.31133583857342856, | |
| "kl": 0.0069580078125, | |
| "learning_rate": 9.45327481790393e-07, | |
| "loss": -0.0325, | |
| "reward": 1.483282208442688, | |
| "reward_std": 0.7019442915916443, | |
| "rewards/": 5.321172714233398, | |
| "rewards/math_compute_score": 0.523809552192688, | |
| "step": 74 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 764.5, | |
| "epoch": 0.17899761336515513, | |
| "grad_norm": 0.36739420438573467, | |
| "kl": 0.004913330078125, | |
| "learning_rate": 9.435550309349776e-07, | |
| "loss": -0.025, | |
| "reward": 1.2999628782272339, | |
| "reward_std": 0.7012795805931091, | |
| "rewards/": 5.26171875, | |
| "rewards/math_compute_score": 0.3095238208770752, | |
| "step": 75 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 666.9761962890625, | |
| "epoch": 0.18138424821002386, | |
| "grad_norm": 0.33428186789934067, | |
| "kl": 0.0022125244140625, | |
| "learning_rate": 9.417560222230114e-07, | |
| "loss": 0.0816, | |
| "reward": 1.252938985824585, | |
| "reward_std": 0.6224194765090942, | |
| "rewards/": 4.07421875, | |
| "rewards/math_compute_score": 0.5476190447807312, | |
| "step": 76 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 703.4285888671875, | |
| "epoch": 0.18377088305489261, | |
| "grad_norm": 0.3021119658752094, | |
| "kl": 0.0036773681640625, | |
| "learning_rate": 9.399305633701372e-07, | |
| "loss": 0.0133, | |
| "reward": 1.5353423357009888, | |
| "reward_std": 0.5898163318634033, | |
| "rewards/": 5.390996932983398, | |
| "rewards/math_compute_score": 0.5714285969734192, | |
| "step": 77 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 743.4761962890625, | |
| "epoch": 0.18615751789976134, | |
| "grad_norm": 0.4512916551501506, | |
| "kl": 0.003326416015625, | |
| "learning_rate": 9.380787636757e-07, | |
| "loss": 0.3175, | |
| "reward": 1.4912109375, | |
| "reward_std": 0.6637881994247437, | |
| "rewards/": 5.360816478729248, | |
| "rewards/math_compute_score": 0.523809552192688, | |
| "step": 78 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 760.452392578125, | |
| "epoch": 0.18854415274463007, | |
| "grad_norm": 0.3498194905087342, | |
| "kl": 0.00142669677734375, | |
| "learning_rate": 9.362007340162028e-07, | |
| "loss": 0.0411, | |
| "reward": 1.5506510734558105, | |
| "reward_std": 0.3708002269268036, | |
| "rewards/": 5.277064800262451, | |
| "rewards/math_compute_score": 0.6190476417541504, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.1909307875894988, | |
| "grad_norm": 0.23488028047184029, | |
| "learning_rate": 9.342965868386673e-07, | |
| "loss": -0.0472, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1909307875894988, | |
| "eval_clip_ratio": 0.0, | |
| "eval_completion_length": 915.2738342285156, | |
| "eval_kl": 0.02166748046875, | |
| "eval_loss": 0.17802654206752777, | |
| "eval_reward": 1.2084915190935135, | |
| "eval_reward_std": 0.7365763932466507, | |
| "eval_rewards/": 4.542457342147827, | |
| "eval_rewards/math_compute_score": 0.3750000111758709, | |
| "eval_runtime": 78.6127, | |
| "eval_samples_per_second": 0.267, | |
| "eval_steps_per_second": 0.013, | |
| "step": 80 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 419, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 40, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 14, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |