| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9952153110047847, | |
| "eval_steps": 500, | |
| "global_step": 52, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 885.7360992431641, | |
| "epoch": 0.019138755980861243, | |
| "grad_norm": 0.05717110271611869, | |
| "kl": 0.0, | |
| "learning_rate": 1.6666666666666665e-07, | |
| "loss": 0.0224, | |
| "reward": 0.6623263955116272, | |
| "reward_std": 0.20746709778904915, | |
| "rewards/accuracy_reward": 0.2500000037252903, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4123263880610466, | |
| "step": 1 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 855.6076354980469, | |
| "epoch": 0.03827751196172249, | |
| "grad_norm": 0.06704605474432411, | |
| "kl": 0.0, | |
| "learning_rate": 3.333333333333333e-07, | |
| "loss": 0.0199, | |
| "reward": 0.6901041716337204, | |
| "reward_std": 0.22956417128443718, | |
| "rewards/accuracy_reward": 0.2465277761220932, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4435763880610466, | |
| "step": 2 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 917.3541717529297, | |
| "epoch": 0.05741626794258373, | |
| "grad_norm": 0.05091560707629358, | |
| "kl": 2.9027462005615234e-05, | |
| "learning_rate": 5e-07, | |
| "loss": 0.0137, | |
| "reward": 0.5538194477558136, | |
| "reward_std": 0.19887378066778183, | |
| "rewards/accuracy_reward": 0.1770833358168602, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.3767361119389534, | |
| "step": 3 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 869.5937652587891, | |
| "epoch": 0.07655502392344497, | |
| "grad_norm": 0.06917728317111242, | |
| "kl": 3.2007694244384766e-05, | |
| "learning_rate": 6.666666666666666e-07, | |
| "loss": 0.0258, | |
| "reward": 0.634548619389534, | |
| "reward_std": 0.2516612634062767, | |
| "rewards/accuracy_reward": 0.2256944440305233, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4088541716337204, | |
| "step": 4 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 860.3229217529297, | |
| "epoch": 0.09569377990430622, | |
| "grad_norm": 0.06655951788671848, | |
| "kl": 2.7626752853393555e-05, | |
| "learning_rate": 8.333333333333333e-07, | |
| "loss": 0.0175, | |
| "reward": 0.636284738779068, | |
| "reward_std": 0.21483279019594193, | |
| "rewards/accuracy_reward": 0.2222222238779068, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4140625074505806, | |
| "step": 5 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 872.6909790039062, | |
| "epoch": 0.11483253588516747, | |
| "grad_norm": 0.06205063415295795, | |
| "kl": 2.2977590560913086e-05, | |
| "learning_rate": 1e-06, | |
| "loss": 0.0139, | |
| "reward": 0.6840277761220932, | |
| "reward_std": 0.2086946927011013, | |
| "rewards/accuracy_reward": 0.27083334140479565, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4131944477558136, | |
| "step": 6 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 881.4861297607422, | |
| "epoch": 0.1339712918660287, | |
| "grad_norm": 0.06390474824070583, | |
| "kl": 2.709031105041504e-05, | |
| "learning_rate": 9.989509461357426e-07, | |
| "loss": 0.0214, | |
| "reward": 0.618923619389534, | |
| "reward_std": 0.20255662873387337, | |
| "rewards/accuracy_reward": 0.2256944477558136, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.3932291641831398, | |
| "step": 7 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 856.1423645019531, | |
| "epoch": 0.15311004784688995, | |
| "grad_norm": 0.06611386863145982, | |
| "kl": 2.002716064453125e-05, | |
| "learning_rate": 9.958086757163488e-07, | |
| "loss": 0.0205, | |
| "reward": 0.659722238779068, | |
| "reward_std": 0.24306795373558998, | |
| "rewards/accuracy_reward": 0.2291666716337204, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.430555559694767, | |
| "step": 8 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 875.7395935058594, | |
| "epoch": 0.1722488038277512, | |
| "grad_norm": 0.06272616959518927, | |
| "kl": 1.7598271369934082e-05, | |
| "learning_rate": 9.905878394570453e-07, | |
| "loss": 0.0155, | |
| "reward": 0.6875, | |
| "reward_std": 0.22097085043787956, | |
| "rewards/accuracy_reward": 0.2708333432674408, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4166666716337204, | |
| "step": 9 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 885.6041717529297, | |
| "epoch": 0.19138755980861244, | |
| "grad_norm": 0.0566584544534715, | |
| "kl": 1.3478100299835205e-05, | |
| "learning_rate": 9.833127793065097e-07, | |
| "loss": 0.0147, | |
| "reward": 0.6840277910232544, | |
| "reward_std": 0.21606039628386497, | |
| "rewards/accuracy_reward": 0.2881944477558136, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.3958333358168602, | |
| "step": 10 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 849.3645782470703, | |
| "epoch": 0.21052631578947367, | |
| "grad_norm": 0.07330325365220204, | |
| "kl": 2.1457672119140625e-05, | |
| "learning_rate": 9.740174149534692e-07, | |
| "loss": 0.0204, | |
| "reward": 0.7526041716337204, | |
| "reward_std": 0.2590269520878792, | |
| "rewards/accuracy_reward": 0.3020833320915699, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4505208432674408, | |
| "step": 11 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 876.4757080078125, | |
| "epoch": 0.22966507177033493, | |
| "grad_norm": 0.05630416735365695, | |
| "kl": 1.9222497940063477e-05, | |
| "learning_rate": 9.627450856774539e-07, | |
| "loss": 0.0098, | |
| "reward": 0.661458358168602, | |
| "reward_std": 0.18905285000801086, | |
| "rewards/accuracy_reward": 0.2291666716337204, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4322916716337204, | |
| "step": 12 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 863.5590362548828, | |
| "epoch": 0.24880382775119617, | |
| "grad_norm": 0.060825286332779054, | |
| "kl": 1.8551945686340332e-05, | |
| "learning_rate": 9.495483482810687e-07, | |
| "loss": 0.0179, | |
| "reward": 0.6163194477558136, | |
| "reward_std": 0.22097087278962135, | |
| "rewards/accuracy_reward": 0.2048611119389534, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4114583358168602, | |
| "step": 13 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 859.7778015136719, | |
| "epoch": 0.2679425837320574, | |
| "grad_norm": 0.06028261705382758, | |
| "kl": 4.965066909790039e-05, | |
| "learning_rate": 9.344887320459198e-07, | |
| "loss": 0.0167, | |
| "reward": 0.6354166641831398, | |
| "reward_std": 0.2086947001516819, | |
| "rewards/accuracy_reward": 0.21527778171002865, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4201388955116272, | |
| "step": 14 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 889.1319427490234, | |
| "epoch": 0.28708133971291866, | |
| "grad_norm": 0.06077195852297118, | |
| "kl": 7.545948028564453e-05, | |
| "learning_rate": 9.176364518546988e-07, | |
| "loss": 0.0159, | |
| "reward": 0.6475694477558136, | |
| "reward_std": 0.19641854241490364, | |
| "rewards/accuracy_reward": 0.2465277723968029, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4010416716337204, | |
| "step": 15 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 875.8854217529297, | |
| "epoch": 0.3062200956937799, | |
| "grad_norm": 0.06912072388582151, | |
| "kl": 9.357929229736328e-05, | |
| "learning_rate": 8.990700808169889e-07, | |
| "loss": 0.0254, | |
| "reward": 0.709201380610466, | |
| "reward_std": 0.21483278647065163, | |
| "rewards/accuracy_reward": 0.2708333320915699, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.438368059694767, | |
| "step": 16 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 865.875, | |
| "epoch": 0.3253588516746411, | |
| "grad_norm": 0.06267659137438868, | |
| "kl": 0.00013184547424316406, | |
| "learning_rate": 8.788761839251558e-07, | |
| "loss": 0.0206, | |
| "reward": 0.7309027910232544, | |
| "reward_std": 0.262709803879261, | |
| "rewards/accuracy_reward": 0.3090277835726738, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4218750074505806, | |
| "step": 17 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 864.7777862548828, | |
| "epoch": 0.3444976076555024, | |
| "grad_norm": 0.06921820728455384, | |
| "kl": 0.0001842975616455078, | |
| "learning_rate": 8.571489144483944e-07, | |
| "loss": 0.023, | |
| "reward": 0.6979166716337204, | |
| "reward_std": 0.23079178109765053, | |
| "rewards/accuracy_reward": 0.2777777798473835, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4201388880610466, | |
| "step": 18 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 864.5208435058594, | |
| "epoch": 0.36363636363636365, | |
| "grad_norm": 0.06828931603666485, | |
| "kl": 0.0002467632293701172, | |
| "learning_rate": 8.339895749467237e-07, | |
| "loss": 0.021, | |
| "reward": 0.7291666865348816, | |
| "reward_std": 0.2357022613286972, | |
| "rewards/accuracy_reward": 0.2986111082136631, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.430555559694767, | |
| "step": 19 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 857.4965362548828, | |
| "epoch": 0.3827751196172249, | |
| "grad_norm": 0.057114973470776605, | |
| "kl": 0.00024008750915527344, | |
| "learning_rate": 8.095061449516902e-07, | |
| "loss": 0.0101, | |
| "reward": 0.7317708283662796, | |
| "reward_std": 0.23692986369132996, | |
| "rewards/accuracy_reward": 0.3090277835726738, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4227430671453476, | |
| "step": 20 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 855.9201354980469, | |
| "epoch": 0.4019138755980861, | |
| "grad_norm": 0.05706380401976011, | |
| "kl": 0.0002942085266113281, | |
| "learning_rate": 7.838127775159451e-07, | |
| "loss": 0.0221, | |
| "reward": 0.7118055671453476, | |
| "reward_std": 0.25534411519765854, | |
| "rewards/accuracy_reward": 0.2847222313284874, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4270833358168602, | |
| "step": 21 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 860.5173645019531, | |
| "epoch": 0.42105263157894735, | |
| "grad_norm": 0.05882812256894408, | |
| "kl": 0.0003452301025390625, | |
| "learning_rate": 7.570292669790184e-07, | |
| "loss": 0.0031, | |
| "reward": 0.6935763955116272, | |
| "reward_std": 0.20501185953617096, | |
| "rewards/accuracy_reward": 0.2708333395421505, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.422743059694767, | |
| "step": 22 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 859.2639007568359, | |
| "epoch": 0.44019138755980863, | |
| "grad_norm": 0.06173500079510746, | |
| "kl": 0.000537872314453125, | |
| "learning_rate": 7.292804904308086e-07, | |
| "loss": 0.0154, | |
| "reward": 0.7161458283662796, | |
| "reward_std": 0.21483277902007103, | |
| "rewards/accuracy_reward": 0.2951388917863369, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4210069552063942, | |
| "step": 23 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 858.9097290039062, | |
| "epoch": 0.45933014354066987, | |
| "grad_norm": 0.06893952606573064, | |
| "kl": 0.0005884170532226562, | |
| "learning_rate": 7.006958254769437e-07, | |
| "loss": 0.0264, | |
| "reward": 0.701388880610466, | |
| "reward_std": 0.2602545768022537, | |
| "rewards/accuracy_reward": 0.2881944440305233, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4131944477558136, | |
| "step": 24 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 860.0416717529297, | |
| "epoch": 0.4784688995215311, | |
| "grad_norm": 0.0687100634503083, | |
| "kl": 0.0006256103515625, | |
| "learning_rate": 6.714085470206609e-07, | |
| "loss": 0.0255, | |
| "reward": 0.7230902910232544, | |
| "reward_std": 0.24184033274650574, | |
| "rewards/accuracy_reward": 0.2881944552063942, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4348958283662796, | |
| "step": 25 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 864.3715209960938, | |
| "epoch": 0.49760765550239233, | |
| "grad_norm": 0.061459175011899785, | |
| "kl": 0.0009174346923828125, | |
| "learning_rate": 6.415552058736853e-07, | |
| "loss": 0.0191, | |
| "reward": 0.7335069477558136, | |
| "reward_std": 0.2786688134074211, | |
| "rewards/accuracy_reward": 0.3229166641831398, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4105902835726738, | |
| "step": 26 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 896.4722290039062, | |
| "epoch": 0.5167464114832536, | |
| "grad_norm": 0.06334722551499229, | |
| "kl": 0.000942230224609375, | |
| "learning_rate": 6.11274992093311e-07, | |
| "loss": 0.0245, | |
| "reward": 0.6371527761220932, | |
| "reward_std": 0.24306796491146088, | |
| "rewards/accuracy_reward": 0.2430555559694767, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.3940972313284874, | |
| "step": 27 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 853.0451354980469, | |
| "epoch": 0.5358851674641149, | |
| "grad_norm": 0.05677441196904184, | |
| "kl": 0.0012645721435546875, | |
| "learning_rate": 5.80709086014102e-07, | |
| "loss": 0.0077, | |
| "reward": 0.740451380610466, | |
| "reward_std": 0.16818338260054588, | |
| "rewards/accuracy_reward": 0.3229166641831398, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4175347238779068, | |
| "step": 28 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 859.9132080078125, | |
| "epoch": 0.5550239234449761, | |
| "grad_norm": 0.08121565074078244, | |
| "kl": 0.001453399658203125, | |
| "learning_rate": 5.5e-07, | |
| "loss": 0.0325, | |
| "reward": 0.743923619389534, | |
| "reward_std": 0.3105868399143219, | |
| "rewards/accuracy_reward": 0.3090277835726738, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4348958358168602, | |
| "step": 29 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 843.4513854980469, | |
| "epoch": 0.5741626794258373, | |
| "grad_norm": 0.06778365437453367, | |
| "kl": 0.001445770263671875, | |
| "learning_rate": 5.192909139858981e-07, | |
| "loss": 0.028, | |
| "reward": 0.7717013955116272, | |
| "reward_std": 0.27375834435224533, | |
| "rewards/accuracy_reward": 0.3368055522441864, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4348958432674408, | |
| "step": 30 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 887.6354370117188, | |
| "epoch": 0.5933014354066986, | |
| "grad_norm": 0.0590498329750237, | |
| "kl": 0.0012683868408203125, | |
| "learning_rate": 4.887250079066891e-07, | |
| "loss": 0.0255, | |
| "reward": 0.7057291865348816, | |
| "reward_std": 0.21237755194306374, | |
| "rewards/accuracy_reward": 0.2916666716337204, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4140625074505806, | |
| "step": 31 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 832.625, | |
| "epoch": 0.6124401913875598, | |
| "grad_norm": 0.07056325724098401, | |
| "kl": 0.0017757415771484375, | |
| "learning_rate": 4.584447941263149e-07, | |
| "loss": 0.0178, | |
| "reward": 0.7578125149011612, | |
| "reward_std": 0.26393740251660347, | |
| "rewards/accuracy_reward": 0.3159722238779068, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4418402761220932, | |
| "step": 32 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 856.9514007568359, | |
| "epoch": 0.631578947368421, | |
| "grad_norm": 0.06693116072370171, | |
| "kl": 0.001903533935546875, | |
| "learning_rate": 4.285914529793391e-07, | |
| "loss": 0.0283, | |
| "reward": 0.747395858168602, | |
| "reward_std": 0.23447464406490326, | |
| "rewards/accuracy_reward": 0.3229166716337204, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4244791716337204, | |
| "step": 33 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 837.0069427490234, | |
| "epoch": 0.6507177033492823, | |
| "grad_norm": 0.06430130826482054, | |
| "kl": 0.0016841888427734375, | |
| "learning_rate": 3.993041745230562e-07, | |
| "loss": 0.0254, | |
| "reward": 0.8125000149011612, | |
| "reward_std": 0.22833656147122383, | |
| "rewards/accuracy_reward": 0.3888888955116272, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.423611119389534, | |
| "step": 34 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 833.3854217529297, | |
| "epoch": 0.6698564593301436, | |
| "grad_norm": 0.06623663931865324, | |
| "kl": 0.0020465850830078125, | |
| "learning_rate": 3.707195095691913e-07, | |
| "loss": 0.0112, | |
| "reward": 0.8081597238779068, | |
| "reward_std": 0.254116490483284, | |
| "rewards/accuracy_reward": 0.3576388955116272, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4505208358168602, | |
| "step": 35 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 888.3264007568359, | |
| "epoch": 0.6889952153110048, | |
| "grad_norm": 0.05215266241295597, | |
| "kl": 0.001735687255859375, | |
| "learning_rate": 3.4297073302098155e-07, | |
| "loss": 0.012, | |
| "reward": 0.7022569477558136, | |
| "reward_std": 0.22956418246030807, | |
| "rewards/accuracy_reward": 0.2986111156642437, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4036458432674408, | |
| "step": 36 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 855.5243225097656, | |
| "epoch": 0.7081339712918661, | |
| "grad_norm": 0.06846168763574559, | |
| "kl": 0.0021762847900390625, | |
| "learning_rate": 3.16187222484055e-07, | |
| "loss": 0.0195, | |
| "reward": 0.7786458283662796, | |
| "reward_std": 0.29831067472696304, | |
| "rewards/accuracy_reward": 0.3402777761220932, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4383680522441864, | |
| "step": 37 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 867.7257080078125, | |
| "epoch": 0.7272727272727273, | |
| "grad_norm": 0.06807879530009997, | |
| "kl": 0.0018215179443359375, | |
| "learning_rate": 2.904938550483098e-07, | |
| "loss": 0.0285, | |
| "reward": 0.6675347238779068, | |
| "reward_std": 0.21728801727294922, | |
| "rewards/accuracy_reward": 0.2638888917863369, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4036458358168602, | |
| "step": 38 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 877.5381927490234, | |
| "epoch": 0.7464114832535885, | |
| "grad_norm": 0.06659774200582641, | |
| "kl": 0.001922607421875, | |
| "learning_rate": 2.6601042505327635e-07, | |
| "loss": 0.0224, | |
| "reward": 0.732638880610466, | |
| "reward_std": 0.2946278154850006, | |
| "rewards/accuracy_reward": 0.3055555559694767, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4270833358168602, | |
| "step": 39 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 848.8159790039062, | |
| "epoch": 0.7655502392344498, | |
| "grad_norm": 0.06715335856665118, | |
| "kl": 0.0018100738525390625, | |
| "learning_rate": 2.4285108555160575e-07, | |
| "loss": 0.0216, | |
| "reward": 0.8125000149011612, | |
| "reward_std": 0.2357022687792778, | |
| "rewards/accuracy_reward": 0.3854166716337204, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4270833358168602, | |
| "step": 40 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 862.9201507568359, | |
| "epoch": 0.784688995215311, | |
| "grad_norm": 0.06259931940680961, | |
| "kl": 0.00200653076171875, | |
| "learning_rate": 2.2112381607484416e-07, | |
| "loss": 0.0198, | |
| "reward": 0.714409738779068, | |
| "reward_std": 0.24429557099938393, | |
| "rewards/accuracy_reward": 0.2916666716337204, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.422743059694767, | |
| "step": 41 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 863.4201354980469, | |
| "epoch": 0.8038277511961722, | |
| "grad_norm": 0.07414881526494982, | |
| "kl": 0.002193450927734375, | |
| "learning_rate": 2.0092991918301106e-07, | |
| "loss": 0.022, | |
| "reward": 0.7604166865348816, | |
| "reward_std": 0.22833655402064323, | |
| "rewards/accuracy_reward": 0.329861119389534, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4305555522441864, | |
| "step": 42 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 843.4826507568359, | |
| "epoch": 0.8229665071770335, | |
| "grad_norm": 0.06426826229363446, | |
| "kl": 0.0018749237060546875, | |
| "learning_rate": 1.8236354814530112e-07, | |
| "loss": 0.0145, | |
| "reward": 0.796875, | |
| "reward_std": 0.2111499235033989, | |
| "rewards/accuracy_reward": 0.357638880610466, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4392361119389534, | |
| "step": 43 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 878.6527709960938, | |
| "epoch": 0.8421052631578947, | |
| "grad_norm": 0.058633524755595696, | |
| "kl": 0.0018100738525390625, | |
| "learning_rate": 1.6551126795408015e-07, | |
| "loss": 0.0143, | |
| "reward": 0.6909722238779068, | |
| "reward_std": 0.18659762665629387, | |
| "rewards/accuracy_reward": 0.2951388880610466, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.3958333358168602, | |
| "step": 44 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 840.09375, | |
| "epoch": 0.861244019138756, | |
| "grad_norm": 0.07730488608092184, | |
| "kl": 0.0020885467529296875, | |
| "learning_rate": 1.5045165171893116e-07, | |
| "loss": 0.0289, | |
| "reward": 0.8420138955116272, | |
| "reward_std": 0.2676202580332756, | |
| "rewards/accuracy_reward": 0.3888888955116272, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.453125, | |
| "step": 45 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 847.3611297607422, | |
| "epoch": 0.8803827751196173, | |
| "grad_norm": 0.06479288468708434, | |
| "kl": 0.002285003662109375, | |
| "learning_rate": 1.3725491432254623e-07, | |
| "loss": 0.0195, | |
| "reward": 0.7777777910232544, | |
| "reward_std": 0.27989643067121506, | |
| "rewards/accuracy_reward": 0.3541666716337204, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4236111119389534, | |
| "step": 46 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 851.6944427490234, | |
| "epoch": 0.8995215311004785, | |
| "grad_norm": 0.05948308775990511, | |
| "kl": 0.0021038055419921875, | |
| "learning_rate": 1.259825850465308e-07, | |
| "loss": 0.0056, | |
| "reward": 0.7213541716337204, | |
| "reward_std": 0.19764616712927818, | |
| "rewards/accuracy_reward": 0.3159722276031971, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.405381940305233, | |
| "step": 47 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 831.1041717529297, | |
| "epoch": 0.9186602870813397, | |
| "grad_norm": 0.06976945326641983, | |
| "kl": 0.0020198822021484375, | |
| "learning_rate": 1.166872206934904e-07, | |
| "loss": 0.0315, | |
| "reward": 0.7795138955116272, | |
| "reward_std": 0.25043364614248276, | |
| "rewards/accuracy_reward": 0.3402777835726738, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4392361119389534, | |
| "step": 48 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 871.7847137451172, | |
| "epoch": 0.937799043062201, | |
| "grad_norm": 0.06380948814810407, | |
| "kl": 0.002155303955078125, | |
| "learning_rate": 1.0941216054295468e-07, | |
| "loss": 0.0197, | |
| "reward": 0.7526041716337204, | |
| "reward_std": 0.23447464033961296, | |
| "rewards/accuracy_reward": 0.3298611119389534, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4227430671453476, | |
| "step": 49 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 847.6632080078125, | |
| "epoch": 0.9569377990430622, | |
| "grad_norm": 0.07507185787901022, | |
| "kl": 0.0022182464599609375, | |
| "learning_rate": 1.0419132428365116e-07, | |
| "loss": 0.0201, | |
| "reward": 0.7786458283662796, | |
| "reward_std": 0.2590269520878792, | |
| "rewards/accuracy_reward": 0.329861119389534, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4487847238779068, | |
| "step": 50 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 848.7222290039062, | |
| "epoch": 0.9760765550239234, | |
| "grad_norm": 0.05934554686543501, | |
| "kl": 0.0019321441650390625, | |
| "learning_rate": 1.0104905386425732e-07, | |
| "loss": 0.0147, | |
| "reward": 0.7803819626569748, | |
| "reward_std": 0.22219848446547985, | |
| "rewards/accuracy_reward": 0.3402777835726738, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4401041641831398, | |
| "step": 51 | |
| }, | |
| { | |
| "clip_ratio": 0.0, | |
| "completion_length": 920.5416870117188, | |
| "epoch": 0.9952153110047847, | |
| "grad_norm": 0.05965196947908554, | |
| "kl": 0.002071380615234375, | |
| "learning_rate": 1e-07, | |
| "loss": 0.0123, | |
| "reward": 0.7968750149011612, | |
| "reward_std": 0.2651650495827198, | |
| "rewards/accuracy_reward": 0.3715277761220932, | |
| "rewards/format_reward": 0.0, | |
| "rewards/tag_count_reward": 0.4253472313284874, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.9952153110047847, | |
| "step": 52, | |
| "total_flos": 0.0, | |
| "train_loss": 0.01933252687404792, | |
| "train_runtime": 2498.5955, | |
| "train_samples_per_second": 3.002, | |
| "train_steps_per_second": 0.021 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 52, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 10, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 24, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |