| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.9666011787819253, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 579.5, | |
| "completions/max_terminated_length": 579.5, | |
| "completions/mean_length": 445.5, | |
| "completions/mean_terminated_length": 445.5, | |
| "completions/min_length": 358.5, | |
| "completions/min_terminated_length": 358.5, | |
| "epoch": 0.003929273084479371, | |
| "grad_norm": 0.46015690099076406, | |
| "kl": 0.0, | |
| "learning_rate": 0.0, | |
| "loss": -0.0157, | |
| "num_tokens": 18992.0, | |
| "reward": 9.166666984558105, | |
| "reward_std": 1.178511381149292, | |
| "rewards/accuracy_reward/mean": 1.8333333134651184, | |
| "rewards/accuracy_reward/std": 0.40824830532073975, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 554.4736842105264, | |
| "completions/max_terminated_length": 554.4736842105264, | |
| "completions/mean_length": 427.37282040244656, | |
| "completions/mean_terminated_length": 427.37282040244656, | |
| "completions/min_length": 322.5, | |
| "completions/min_terminated_length": 322.5, | |
| "epoch": 0.07858546168958742, | |
| "grad_norm": 0.37367734361954813, | |
| "kl": 0.0002414552788985403, | |
| "learning_rate": 1.14e-06, | |
| "loss": 0.0021, | |
| "num_tokens": 366755.0, | |
| "reward": 9.62719312467073, | |
| "reward_std": 0.5272287757773149, | |
| "rewards/accuracy_reward/mean": 1.9254385891713595, | |
| "rewards/accuracy_reward/std": 0.18263739974875198, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 20 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 543.875, | |
| "completions/max_terminated_length": 543.875, | |
| "completions/mean_length": 423.0708427429199, | |
| "completions/mean_terminated_length": 423.0708427429199, | |
| "completions/min_length": 319.9, | |
| "completions/min_terminated_length": 319.9, | |
| "epoch": 0.15717092337917485, | |
| "grad_norm": 0.5322056075322863, | |
| "kl": 0.0003676414489746094, | |
| "learning_rate": 2.34e-06, | |
| "loss": 0.0042, | |
| "num_tokens": 745300.0, | |
| "reward": 9.479166793823243, | |
| "reward_std": 0.5008673369884491, | |
| "rewards/accuracy_reward/mean": 1.8958333283662796, | |
| "rewards/accuracy_reward/std": 0.20629913210868836, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 40 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 571.275, | |
| "completions/max_terminated_length": 571.275, | |
| "completions/mean_length": 416.7875122070312, | |
| "completions/mean_terminated_length": 416.7875122070312, | |
| "completions/min_length": 318.6, | |
| "completions/min_terminated_length": 318.6, | |
| "epoch": 0.2357563850687623, | |
| "grad_norm": 0.46502934681984226, | |
| "kl": 0.0017244338989257813, | |
| "learning_rate": 2.9970400926424076e-06, | |
| "loss": 0.0059, | |
| "num_tokens": 1102433.0, | |
| "reward": 9.479166793823243, | |
| "reward_std": 0.5597929060459137, | |
| "rewards/accuracy_reward/mean": 1.8958333283662796, | |
| "rewards/accuracy_reward/std": 0.2154431849718094, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 60 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 529.75, | |
| "completions/max_terminated_length": 529.75, | |
| "completions/mean_length": 416.1666778564453, | |
| "completions/mean_terminated_length": 416.1666778564453, | |
| "completions/min_length": 324.725, | |
| "completions/min_terminated_length": 324.725, | |
| "epoch": 0.3143418467583497, | |
| "grad_norm": 0.03393033103037874, | |
| "kl": 0.005292510986328125, | |
| "learning_rate": 2.969362874399016e-06, | |
| "loss": -0.0009, | |
| "num_tokens": 1476975.0, | |
| "reward": 9.583333444595336, | |
| "reward_std": 0.3535534143447876, | |
| "rewards/accuracy_reward/mean": 1.9166666626930238, | |
| "rewards/accuracy_reward/std": 0.17411426901817323, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 80 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 527.25, | |
| "completions/max_terminated_length": 527.25, | |
| "completions/mean_length": 416.6166793823242, | |
| "completions/mean_terminated_length": 416.6166793823242, | |
| "completions/min_length": 323.575, | |
| "completions/min_terminated_length": 323.575, | |
| "epoch": 0.3929273084479371, | |
| "grad_norm": 0.030903746658924047, | |
| "kl": 0.008585357666015625, | |
| "learning_rate": 2.913086179180945e-06, | |
| "loss": 0.0066, | |
| "num_tokens": 1840061.0, | |
| "reward": 9.604166769981385, | |
| "reward_std": 0.5597929060459137, | |
| "rewards/accuracy_reward/mean": 1.920833334326744, | |
| "rewards/accuracy_reward/std": 0.16390806138515474, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 100 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 541.625, | |
| "completions/max_terminated_length": 541.625, | |
| "completions/mean_length": 409.9083450317383, | |
| "completions/mean_terminated_length": 409.9083450317383, | |
| "completions/min_length": 306.25, | |
| "completions/min_terminated_length": 306.25, | |
| "epoch": 0.4715127701375246, | |
| "grad_norm": 0.45989535358245415, | |
| "kl": 0.012615966796875, | |
| "learning_rate": 2.829305368846822e-06, | |
| "loss": -0.0081, | |
| "num_tokens": 2205137.0, | |
| "reward": 9.583333444595336, | |
| "reward_std": 0.5303301155567169, | |
| "rewards/accuracy_reward/mean": 1.9166666597127915, | |
| "rewards/accuracy_reward/std": 0.18471990823745726, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 120 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 566.95, | |
| "completions/max_terminated_length": 566.95, | |
| "completions/mean_length": 433.34167709350584, | |
| "completions/mean_terminated_length": 433.34167709350584, | |
| "completions/min_length": 330.4, | |
| "completions/min_terminated_length": 330.4, | |
| "epoch": 0.550098231827112, | |
| "grad_norm": 0.037923463367969736, | |
| "kl": 0.0144561767578125, | |
| "learning_rate": 2.7196511415705416e-06, | |
| "loss": 0.0048, | |
| "num_tokens": 2588775.0, | |
| "reward": 9.416666793823243, | |
| "reward_std": 0.4714045524597168, | |
| "rewards/accuracy_reward/mean": 1.883333331346512, | |
| "rewards/accuracy_reward/std": 0.2257540464401245, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 140 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 567.8, | |
| "completions/max_terminated_length": 567.8, | |
| "completions/mean_length": 438.3958457946777, | |
| "completions/mean_terminated_length": 438.3958457946777, | |
| "completions/min_length": 330.85, | |
| "completions/min_terminated_length": 330.85, | |
| "epoch": 0.6286836935166994, | |
| "grad_norm": 0.03890017799267318, | |
| "kl": 0.0144439697265625, | |
| "learning_rate": 2.5862577921562017e-06, | |
| "loss": 0.0022, | |
| "num_tokens": 2969880.0, | |
| "reward": 9.70833342075348, | |
| "reward_std": 0.294627845287323, | |
| "rewards/accuracy_reward/mean": 1.941666665673256, | |
| "rewards/accuracy_reward/std": 0.1278819650411606, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 160 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 542.925, | |
| "completions/max_terminated_length": 542.925, | |
| "completions/mean_length": 432.9458457946777, | |
| "completions/mean_terminated_length": 432.9458457946777, | |
| "completions/min_length": 343.425, | |
| "completions/min_terminated_length": 343.425, | |
| "epoch": 0.7072691552062869, | |
| "grad_norm": 0.029836981989722188, | |
| "kl": 0.01332855224609375, | |
| "learning_rate": 2.4317216704174657e-06, | |
| "loss": 0.0032, | |
| "num_tokens": 3344559.0, | |
| "reward": 9.666666722297668, | |
| "reward_std": 0.3535534143447876, | |
| "rewards/accuracy_reward/mean": 1.933333334326744, | |
| "rewards/accuracy_reward/std": 0.11828449666500092, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 180 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 553.825, | |
| "completions/max_terminated_length": 553.825, | |
| "completions/mean_length": 432.8000114440918, | |
| "completions/mean_terminated_length": 432.8000114440918, | |
| "completions/min_length": 329.975, | |
| "completions/min_terminated_length": 329.975, | |
| "epoch": 0.7858546168958742, | |
| "grad_norm": 0.028665697797427125, | |
| "kl": 0.01359100341796875, | |
| "learning_rate": 2.2590506461817453e-06, | |
| "loss": 0.0011, | |
| "num_tokens": 3723883.0, | |
| "reward": 9.666666769981385, | |
| "reward_std": 0.4714045524597168, | |
| "rewards/accuracy_reward/mean": 1.9333333283662797, | |
| "rewards/accuracy_reward/std": 0.14829438030719758, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 200 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 565.9, | |
| "completions/max_terminated_length": 565.9, | |
| "completions/mean_length": 442.46251068115237, | |
| "completions/mean_terminated_length": 442.46251068115237, | |
| "completions/min_length": 349.45, | |
| "completions/min_terminated_length": 349.45, | |
| "epoch": 0.8644400785854617, | |
| "grad_norm": 0.03081820386262942, | |
| "kl": 0.01476898193359375, | |
| "learning_rate": 2.0716055645254116e-06, | |
| "loss": -0.0006, | |
| "num_tokens": 4106760.0, | |
| "reward": 9.812500023841858, | |
| "reward_std": 0.2062394917011261, | |
| "rewards/accuracy_reward/mean": 1.9625000029802322, | |
| "rewards/accuracy_reward/std": 0.06714880466461182, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 220 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 570.925, | |
| "completions/max_terminated_length": 570.925, | |
| "completions/mean_length": 430.3500122070312, | |
| "completions/mean_terminated_length": 430.3500122070312, | |
| "completions/min_length": 335.975, | |
| "completions/min_terminated_length": 335.975, | |
| "epoch": 0.9430255402750491, | |
| "grad_norm": 0.0325454508362931, | |
| "kl": 0.01363067626953125, | |
| "learning_rate": 1.8730348307472826e-06, | |
| "loss": -0.0006, | |
| "num_tokens": 4498062.0, | |
| "reward": 9.70833342075348, | |
| "reward_std": 0.4124789834022522, | |
| "rewards/accuracy_reward/mean": 1.9416666626930237, | |
| "rewards/accuracy_reward/std": 0.1278819650411606, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 240 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 546.625, | |
| "completions/max_terminated_length": 546.625, | |
| "completions/mean_length": 429.0875114440918, | |
| "completions/mean_terminated_length": 429.0875114440918, | |
| "completions/min_length": 333.85, | |
| "completions/min_terminated_length": 333.85, | |
| "epoch": 1.0235756385068762, | |
| "grad_norm": 0.0281329807884298, | |
| "kl": 0.01484832763671875, | |
| "learning_rate": 1.667203398309488e-06, | |
| "loss": -0.0006, | |
| "num_tokens": 4844839.0, | |
| "reward": 9.666666746139526, | |
| "reward_std": 0.47140454649925234, | |
| "rewards/accuracy_reward/mean": 1.9333333313465118, | |
| "rewards/accuracy_reward/std": 0.13859225809574127, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 260 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 530.05, | |
| "completions/max_terminated_length": 530.05, | |
| "completions/mean_length": 412.9875099182129, | |
| "completions/mean_terminated_length": 412.9875099182129, | |
| "completions/min_length": 319.675, | |
| "completions/min_terminated_length": 319.675, | |
| "epoch": 1.1021611001964637, | |
| "grad_norm": 0.03324322790874101, | |
| "kl": 0.0139068603515625, | |
| "learning_rate": 1.458117541914647e-06, | |
| "loss": 0.0015, | |
| "num_tokens": 5222808.0, | |
| "reward": 9.791666746139526, | |
| "reward_std": 0.294627845287323, | |
| "rewards/accuracy_reward/mean": 1.9583333313465119, | |
| "rewards/accuracy_reward/std": 0.10206207633018494, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 280 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 524.825, | |
| "completions/max_terminated_length": 524.825, | |
| "completions/mean_length": 404.3958435058594, | |
| "completions/mean_terminated_length": 404.3958435058594, | |
| "completions/min_length": 308.875, | |
| "completions/min_terminated_length": 308.875, | |
| "epoch": 1.180746561886051, | |
| "grad_norm": 0.03412086623724304, | |
| "kl": 0.0137176513671875, | |
| "learning_rate": 1.2498468799258468e-06, | |
| "loss": 0.0031, | |
| "num_tokens": 5583225.0, | |
| "reward": 9.791666746139526, | |
| "reward_std": 0.294627845287323, | |
| "rewards/accuracy_reward/mean": 1.9583333283662796, | |
| "rewards/accuracy_reward/std": 0.10206207633018494, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 300 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 516.05, | |
| "completions/max_terminated_length": 516.05, | |
| "completions/mean_length": 395.33334197998045, | |
| "completions/mean_terminated_length": 395.33334197998045, | |
| "completions/min_length": 304.5, | |
| "completions/min_terminated_length": 304.5, | |
| "epoch": 1.2593320235756384, | |
| "grad_norm": 0.03048435461570321, | |
| "kl": 0.01298065185546875, | |
| "learning_rate": 1.0464451638743333e-06, | |
| "loss": 0.0031, | |
| "num_tokens": 5944449.0, | |
| "reward": 9.770833396911621, | |
| "reward_std": 0.2062394917011261, | |
| "rewards/accuracy_reward/mean": 1.9541666686534882, | |
| "rewards/accuracy_reward/std": 0.09726334214210511, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 320 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 553.4, | |
| "completions/max_terminated_length": 553.4, | |
| "completions/mean_length": 425.20000915527345, | |
| "completions/mean_terminated_length": 425.20000915527345, | |
| "completions/min_length": 323.3, | |
| "completions/min_terminated_length": 323.3, | |
| "epoch": 1.3379174852652258, | |
| "grad_norm": 0.7144758682156833, | |
| "kl": 0.01336822509765625, | |
| "learning_rate": 8.518713767970263e-07, | |
| "loss": 0.0009, | |
| "num_tokens": 6321679.0, | |
| "reward": 9.604166793823243, | |
| "reward_std": 0.4419417679309845, | |
| "rewards/accuracy_reward/mean": 1.9208333313465118, | |
| "rewards/accuracy_reward/std": 0.17891300320625306, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 340 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 574.225, | |
| "completions/max_terminated_length": 574.225, | |
| "completions/mean_length": 443.48334350585935, | |
| "completions/mean_terminated_length": 443.48334350585935, | |
| "completions/min_length": 340.075, | |
| "completions/min_terminated_length": 340.075, | |
| "epoch": 1.4165029469548134, | |
| "grad_norm": 0.03770084296737852, | |
| "kl": 0.01368255615234375, | |
| "learning_rate": 6.69912676134984e-07, | |
| "loss": 0.0042, | |
| "num_tokens": 6707325.0, | |
| "reward": 9.666666746139526, | |
| "reward_std": 0.35355340838432314, | |
| "rewards/accuracy_reward/mean": 1.9333333313465118, | |
| "rewards/accuracy_reward/std": 0.13859225809574127, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 360 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 562.675, | |
| "completions/max_terminated_length": 562.675, | |
| "completions/mean_length": 438.5416793823242, | |
| "completions/mean_terminated_length": 438.5416793823242, | |
| "completions/min_length": 346.0, | |
| "completions/min_terminated_length": 346.0, | |
| "epoch": 1.4950884086444007, | |
| "grad_norm": 0.02727792542475105, | |
| "kl": 0.0121246337890625, | |
| "learning_rate": 5.041106810216376e-07, | |
| "loss": 0.0017, | |
| "num_tokens": 7081575.0, | |
| "reward": 9.604166769981385, | |
| "reward_std": 0.4419417679309845, | |
| "rewards/accuracy_reward/mean": 1.9208333283662795, | |
| "rewards/accuracy_reward/std": 0.16006682813167572, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 380 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 549.7, | |
| "completions/max_terminated_length": 549.7, | |
| "completions/mean_length": 434.0916748046875, | |
| "completions/mean_terminated_length": 434.0916748046875, | |
| "completions/min_length": 341.4, | |
| "completions/min_terminated_length": 341.4, | |
| "epoch": 1.5736738703339883, | |
| "grad_norm": 0.030573782966379155, | |
| "kl": 0.012384033203125, | |
| "learning_rate": 3.5769253869489515e-07, | |
| "loss": -0.0017, | |
| "num_tokens": 7465665.0, | |
| "reward": 9.77083342075348, | |
| "reward_std": 0.3240906298160553, | |
| "rewards/accuracy_reward/mean": 1.9541666626930236, | |
| "rewards/accuracy_reward/std": 0.11226828396320343, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 400 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 529.95, | |
| "completions/max_terminated_length": 529.95, | |
| "completions/mean_length": 423.4125099182129, | |
| "completions/mean_terminated_length": 423.4125099182129, | |
| "completions/min_length": 337.4, | |
| "completions/min_terminated_length": 337.4, | |
| "epoch": 1.6522593320235757, | |
| "grad_norm": 0.026892810354609234, | |
| "kl": 0.011712646484375, | |
| "learning_rate": 2.3350811174697772e-07, | |
| "loss": 0.0009, | |
| "num_tokens": 7851292.0, | |
| "reward": 9.895833349227905, | |
| "reward_std": 0.1473139226436615, | |
| "rewards/accuracy_reward/mean": 1.9791666686534881, | |
| "rewards/accuracy_reward/std": 0.03602609634399414, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 420 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 548.65, | |
| "completions/max_terminated_length": 548.65, | |
| "completions/mean_length": 438.77084121704104, | |
| "completions/mean_terminated_length": 438.77084121704104, | |
| "completions/min_length": 344.9, | |
| "completions/min_terminated_length": 344.9, | |
| "epoch": 1.730844793713163, | |
| "grad_norm": 0.7530350302626055, | |
| "kl": 0.012438201904296875, | |
| "learning_rate": 1.3397450879073447e-07, | |
| "loss": 0.0001, | |
| "num_tokens": 8224807.0, | |
| "reward": 9.68750011920929, | |
| "reward_std": 0.4419417679309845, | |
| "rewards/accuracy_reward/mean": 1.9374999940395354, | |
| "rewards/accuracy_reward/std": 0.1530931144952774, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 440 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 531.9, | |
| "completions/max_terminated_length": 531.9, | |
| "completions/mean_length": 419.85418167114256, | |
| "completions/mean_terminated_length": 419.85418167114256, | |
| "completions/min_length": 322.85, | |
| "completions/min_terminated_length": 322.85, | |
| "epoch": 1.8094302554027504, | |
| "grad_norm": 0.3868391159083008, | |
| "kl": 0.011206817626953126, | |
| "learning_rate": 6.102903818991374e-08, | |
| "loss": 0.0016, | |
| "num_tokens": 8589626.0, | |
| "reward": 9.55208342075348, | |
| "reward_std": 0.5155987203121185, | |
| "rewards/accuracy_reward/mean": 1.910416665673256, | |
| "rewards/accuracy_reward/std": 0.1655041679739952, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 460 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 554.8, | |
| "completions/max_terminated_length": 554.8, | |
| "completions/mean_length": 438.46667556762696, | |
| "completions/mean_terminated_length": 438.46667556762696, | |
| "completions/min_length": 341.875, | |
| "completions/min_terminated_length": 341.875, | |
| "epoch": 1.888015717092338, | |
| "grad_norm": 0.05259089172915785, | |
| "kl": 0.011602783203125, | |
| "learning_rate": 1.60915005555175e-08, | |
| "loss": -0.0007, | |
| "num_tokens": 8961090.0, | |
| "reward": 9.562500095367431, | |
| "reward_std": 0.3830161988735199, | |
| "rewards/accuracy_reward/mean": 1.9125, | |
| "rewards/accuracy_reward/std": 0.16931553483009337, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 480 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completions/clipped_ratio": 0.0, | |
| "completions/max_length": 544.9, | |
| "completions/max_terminated_length": 544.9, | |
| "completions/mean_length": 435.1583450317383, | |
| "completions/mean_terminated_length": 435.1583450317383, | |
| "completions/min_length": 346.2, | |
| "completions/min_terminated_length": 346.2, | |
| "epoch": 1.9666011787819253, | |
| "grad_norm": 0.02646975626285412, | |
| "kl": 0.01181793212890625, | |
| "learning_rate": 3.655394190787975e-11, | |
| "loss": 0.0015, | |
| "num_tokens": 9338954.0, | |
| "reward": 9.750000047683717, | |
| "reward_std": 0.2357022762298584, | |
| "rewards/accuracy_reward/mean": 1.9499999970197677, | |
| "rewards/accuracy_reward/std": 0.08862337470054626, | |
| "rewards/format_reward/mean": 0.0, | |
| "rewards/format_reward/std": 0.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.9666011787819253, | |
| "step": 500, | |
| "total_flos": 0.0, | |
| "train_loss": 0.0013743292051367462, | |
| "train_runtime": 7960.7406, | |
| "train_samples_per_second": 0.754, | |
| "train_steps_per_second": 0.063 | |
| } | |
| ], | |
| "logging_steps": 20, | |
| "max_steps": 500, | |
| "num_input_tokens_seen": 9338954, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |