| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 10.0418410041841, | |
| "eval_steps": 500, | |
| "global_step": 300, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.33472803347280333, | |
| "grad_norm": 0.4676998555660248, | |
| "learning_rate": 8.620689655172415e-07, | |
| "logits/chosen": 1.7077701091766357, | |
| "logits/rejected": 1.8646482229232788, | |
| "logps/chosen": -85.7728271484375, | |
| "logps/rejected": -88.1952896118164, | |
| "loss": 0.6938, | |
| "rewards/accuracies": 0.32499998807907104, | |
| "rewards/chosen": -0.004770822823047638, | |
| "rewards/margins": -0.007881464436650276, | |
| "rewards/rejected": 0.0031106427777558565, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6694560669456067, | |
| "grad_norm": 0.43230223655700684, | |
| "learning_rate": 1.724137931034483e-06, | |
| "logits/chosen": 1.8023220300674438, | |
| "logits/rejected": 1.8210970163345337, | |
| "logps/chosen": -78.37618255615234, | |
| "logps/rejected": -75.44720458984375, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.48750001192092896, | |
| "rewards/chosen": -0.0056939031928777695, | |
| "rewards/margins": -0.003848772030323744, | |
| "rewards/rejected": -0.001845130929723382, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.00418410041841, | |
| "grad_norm": 2.431753396987915, | |
| "learning_rate": 2.5862068965517246e-06, | |
| "logits/chosen": 1.8643144369125366, | |
| "logits/rejected": 1.8500900268554688, | |
| "logps/chosen": -86.84412384033203, | |
| "logps/rejected": -90.78925323486328, | |
| "loss": 0.6946, | |
| "rewards/accuracies": 0.44999998807907104, | |
| "rewards/chosen": -0.0008674233104102314, | |
| "rewards/margins": -0.008064134046435356, | |
| "rewards/rejected": 0.0071967123076319695, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.3389121338912133, | |
| "grad_norm": 0.5327289700508118, | |
| "learning_rate": 3.448275862068966e-06, | |
| "logits/chosen": 1.7701479196548462, | |
| "logits/rejected": 1.7749736309051514, | |
| "logps/chosen": -83.61552429199219, | |
| "logps/rejected": -72.89176177978516, | |
| "loss": 0.6939, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": -0.0019066383829340339, | |
| "rewards/margins": -0.0035008196718990803, | |
| "rewards/rejected": 0.0015941811725497246, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.6736401673640167, | |
| "grad_norm": 0.5017096996307373, | |
| "learning_rate": 4.310344827586207e-06, | |
| "logits/chosen": 1.7465788125991821, | |
| "logits/rejected": 1.7925498485565186, | |
| "logps/chosen": -84.85249328613281, | |
| "logps/rejected": -88.79469299316406, | |
| "loss": 0.6938, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": 0.0003656863118521869, | |
| "rewards/margins": 0.00012080222222721204, | |
| "rewards/rejected": 0.00024488387862220407, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.00836820083682, | |
| "grad_norm": 0.4994066059589386, | |
| "learning_rate": 4.999818897894192e-06, | |
| "logits/chosen": 1.8359416723251343, | |
| "logits/rejected": 1.814234733581543, | |
| "logps/chosen": -79.98396301269531, | |
| "logps/rejected": -66.29866027832031, | |
| "loss": 0.6952, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": 0.00456579215824604, | |
| "rewards/margins": -0.002460189163684845, | |
| "rewards/rejected": 0.007025980856269598, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.3430962343096233, | |
| "grad_norm": 0.6425362825393677, | |
| "learning_rate": 4.9934830787948756e-06, | |
| "logits/chosen": 1.7550818920135498, | |
| "logits/rejected": 1.7802051305770874, | |
| "logps/chosen": -83.60951232910156, | |
| "logps/rejected": -70.81080627441406, | |
| "loss": 0.6937, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.004059882368892431, | |
| "rewards/margins": 0.00932090263813734, | |
| "rewards/rejected": -0.005261021666228771, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.6778242677824267, | |
| "grad_norm": 0.5591297149658203, | |
| "learning_rate": 4.978118375700895e-06, | |
| "logits/chosen": 1.7742525339126587, | |
| "logits/rejected": 1.8790937662124634, | |
| "logps/chosen": -86.7696533203125, | |
| "logps/rejected": -84.90727233886719, | |
| "loss": 0.6927, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": 0.0033963967580348253, | |
| "rewards/margins": 0.005500240251421928, | |
| "rewards/rejected": -0.0021038432605564594, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.01255230125523, | |
| "grad_norm": 0.6774541735649109, | |
| "learning_rate": 4.953780424089803e-06, | |
| "logits/chosen": 1.8815631866455078, | |
| "logits/rejected": 1.911620855331421, | |
| "logps/chosen": -92.14398193359375, | |
| "logps/rejected": -82.83556365966797, | |
| "loss": 0.6923, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": 0.007645039353519678, | |
| "rewards/margins": 0.005134005565196276, | |
| "rewards/rejected": 0.0025110342539846897, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.3472803347280333, | |
| "grad_norm": 0.6908814907073975, | |
| "learning_rate": 4.920557351506409e-06, | |
| "logits/chosen": 1.7718560695648193, | |
| "logits/rejected": 1.8823477029800415, | |
| "logps/chosen": -91.2622299194336, | |
| "logps/rejected": -96.467529296875, | |
| "loss": 0.6894, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": 0.004941435065120459, | |
| "rewards/margins": 0.01582186669111252, | |
| "rewards/rejected": -0.010880433022975922, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.6820083682008367, | |
| "grad_norm": 0.6954344511032104, | |
| "learning_rate": 4.878569458453592e-06, | |
| "logits/chosen": 1.6916942596435547, | |
| "logits/rejected": 1.7603965997695923, | |
| "logps/chosen": -84.66748046875, | |
| "logps/rejected": -103.5328369140625, | |
| "loss": 0.6889, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": -0.003184904810041189, | |
| "rewards/margins": 0.007628746330738068, | |
| "rewards/rejected": -0.01081365067511797, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.01673640167364, | |
| "grad_norm": 0.6846771240234375, | |
| "learning_rate": 4.827968782785062e-06, | |
| "logits/chosen": 1.8839175701141357, | |
| "logits/rejected": 1.956974744796753, | |
| "logps/chosen": -76.89198303222656, | |
| "logps/rejected": -95.03202056884766, | |
| "loss": 0.6886, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": 0.009448022581636906, | |
| "rewards/margins": 0.018458742648363113, | |
| "rewards/rejected": -0.009010720066726208, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.351464435146443, | |
| "grad_norm": 0.762366771697998, | |
| "learning_rate": 4.7689385491773934e-06, | |
| "logits/chosen": 1.8294252157211304, | |
| "logits/rejected": 1.8144447803497314, | |
| "logps/chosen": -87.03471374511719, | |
| "logps/rejected": -73.86766052246094, | |
| "loss": 0.6847, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": -0.01010747067630291, | |
| "rewards/margins": 0.01468165498226881, | |
| "rewards/rejected": -0.024789121001958847, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.686192468619247, | |
| "grad_norm": 0.7172746658325195, | |
| "learning_rate": 4.70169250567482e-06, | |
| "logits/chosen": 1.7118749618530273, | |
| "logits/rejected": 1.8259985446929932, | |
| "logps/chosen": -68.12081146240234, | |
| "logps/rejected": -75.31085205078125, | |
| "loss": 0.6809, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": -0.014076923951506615, | |
| "rewards/margins": 0.014298361726105213, | |
| "rewards/rejected": -0.028375286608934402, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.02092050209205, | |
| "grad_norm": 0.7188512682914734, | |
| "learning_rate": 4.626474149709127e-06, | |
| "logits/chosen": 1.8864549398422241, | |
| "logits/rejected": 1.8488889932632446, | |
| "logps/chosen": -101.3166275024414, | |
| "logps/rejected": -76.73490905761719, | |
| "loss": 0.6773, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": -0.021979983896017075, | |
| "rewards/margins": 0.023767748847603798, | |
| "rewards/rejected": -0.04574773460626602, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.355648535564853, | |
| "grad_norm": 0.8493714928627014, | |
| "learning_rate": 4.54355584639723e-06, | |
| "logits/chosen": 1.7861169576644897, | |
| "logits/rejected": 1.7927452325820923, | |
| "logps/chosen": -83.28593444824219, | |
| "logps/rejected": -76.04238891601562, | |
| "loss": 0.673, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -0.0320693664252758, | |
| "rewards/margins": 0.04263792932033539, | |
| "rewards/rejected": -0.07470729202032089, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.690376569037657, | |
| "grad_norm": 1.1378726959228516, | |
| "learning_rate": 4.45323784230908e-06, | |
| "logits/chosen": 1.8622329235076904, | |
| "logits/rejected": 1.875741958618164, | |
| "logps/chosen": -72.01219177246094, | |
| "logps/rejected": -77.62818908691406, | |
| "loss": 0.6679, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": -0.05134737491607666, | |
| "rewards/margins": 0.049249742180109024, | |
| "rewards/rejected": -0.10059712082147598, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.02510460251046, | |
| "grad_norm": 0.847227156162262, | |
| "learning_rate": 4.355847178277025e-06, | |
| "logits/chosen": 1.9445765018463135, | |
| "logits/rejected": 2.0065901279449463, | |
| "logps/chosen": -76.14964294433594, | |
| "logps/rejected": -83.71626281738281, | |
| "loss": 0.6621, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": -0.05519520118832588, | |
| "rewards/margins": 0.06803703308105469, | |
| "rewards/rejected": -0.12323222309350967, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.359832635983263, | |
| "grad_norm": 0.8368040919303894, | |
| "learning_rate": 4.2517365051833564e-06, | |
| "logits/chosen": 1.8116003274917603, | |
| "logits/rejected": 1.8839362859725952, | |
| "logps/chosen": -82.7087631225586, | |
| "logps/rejected": -67.3785629272461, | |
| "loss": 0.6538, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.03855596110224724, | |
| "rewards/margins": 0.11562293767929077, | |
| "rewards/rejected": -0.15417888760566711, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 6.694560669456067, | |
| "grad_norm": 0.8636891841888428, | |
| "learning_rate": 4.141282807014034e-06, | |
| "logits/chosen": 1.7250430583953857, | |
| "logits/rejected": 1.775418996810913, | |
| "logps/chosen": -68.51094055175781, | |
| "logps/rejected": -79.88996124267578, | |
| "loss": 0.657, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": -0.11151214689016342, | |
| "rewards/margins": 0.07863004505634308, | |
| "rewards/rejected": -0.1901421844959259, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 7.02928870292887, | |
| "grad_norm": 0.945520281791687, | |
| "learning_rate": 4.024886035802432e-06, | |
| "logits/chosen": 1.7329381704330444, | |
| "logits/rejected": 1.799551010131836, | |
| "logps/chosen": -81.49295043945312, | |
| "logps/rejected": -87.97386169433594, | |
| "loss": 0.6462, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.09862186014652252, | |
| "rewards/margins": 0.10664049535989761, | |
| "rewards/rejected": -0.20526234805583954, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 7.364016736401673, | |
| "grad_norm": 0.9736716151237488, | |
| "learning_rate": 3.9029676634059565e-06, | |
| "logits/chosen": 1.8787791728973389, | |
| "logits/rejected": 1.8832632303237915, | |
| "logps/chosen": -83.21014404296875, | |
| "logps/rejected": -76.4449691772461, | |
| "loss": 0.6413, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -0.07923021167516708, | |
| "rewards/margins": 0.11194779723882675, | |
| "rewards/rejected": -0.19117799401283264, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 7.698744769874477, | |
| "grad_norm": 0.9238549470901489, | |
| "learning_rate": 3.7759691553595214e-06, | |
| "logits/chosen": 1.9044986963272095, | |
| "logits/rejected": 1.8805242776870728, | |
| "logps/chosen": -90.17919158935547, | |
| "logps/rejected": -86.05476379394531, | |
| "loss": 0.6346, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": -0.0993318110704422, | |
| "rewards/margins": 0.12289313971996307, | |
| "rewards/rejected": -0.22222498059272766, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 8.03347280334728, | |
| "grad_norm": 1.0052326917648315, | |
| "learning_rate": 3.6443503723320837e-06, | |
| "logits/chosen": 1.873815894126892, | |
| "logits/rejected": 1.8809674978256226, | |
| "logps/chosen": -74.86824035644531, | |
| "logps/rejected": -81.79768371582031, | |
| "loss": 0.6377, | |
| "rewards/accuracies": 0.612500011920929, | |
| "rewards/chosen": -0.17602138221263885, | |
| "rewards/margins": 0.09737586975097656, | |
| "rewards/rejected": -0.2733972668647766, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 8.368200836820083, | |
| "grad_norm": 1.0167930126190186, | |
| "learning_rate": 3.508587904974522e-06, | |
| "logits/chosen": 1.868101716041565, | |
| "logits/rejected": 1.8685039281845093, | |
| "logps/chosen": -94.48133850097656, | |
| "logps/rejected": -94.00556182861328, | |
| "loss": 0.6216, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.1260211318731308, | |
| "rewards/margins": 0.21002164483070374, | |
| "rewards/rejected": -0.3360427916049957, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 8.702928870292887, | |
| "grad_norm": 0.9955905079841614, | |
| "learning_rate": 3.3691733481883693e-06, | |
| "logits/chosen": 1.7905946969985962, | |
| "logits/rejected": 1.7784850597381592, | |
| "logps/chosen": -94.9794921875, | |
| "logps/rejected": -84.99113464355469, | |
| "loss": 0.6193, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": -0.18853916227817535, | |
| "rewards/margins": 0.19185101985931396, | |
| "rewards/rejected": -0.3803902268409729, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 9.03765690376569, | |
| "grad_norm": 0.943289577960968, | |
| "learning_rate": 3.226611521064278e-06, | |
| "logits/chosen": 1.8119779825210571, | |
| "logits/rejected": 1.817567229270935, | |
| "logps/chosen": -80.78002166748047, | |
| "logps/rejected": -85.42848205566406, | |
| "loss": 0.6201, | |
| "rewards/accuracies": 0.7124999761581421, | |
| "rewards/chosen": -0.16673071682453156, | |
| "rewards/margins": 0.19530437886714935, | |
| "rewards/rejected": -0.3620350658893585, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 9.372384937238493, | |
| "grad_norm": 0.9868927597999573, | |
| "learning_rate": 3.0814186389357765e-06, | |
| "logits/chosen": 1.816300392150879, | |
| "logits/rejected": 1.7763961553573608, | |
| "logps/chosen": -77.19395446777344, | |
| "logps/rejected": -68.16056060791016, | |
| "loss": 0.6157, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": -0.21062004566192627, | |
| "rewards/margins": 0.19489461183547974, | |
| "rewards/rejected": -0.4055147171020508, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 9.707112970711297, | |
| "grad_norm": 1.0557409524917603, | |
| "learning_rate": 2.9341204441673267e-06, | |
| "logits/chosen": 1.8154022693634033, | |
| "logits/rejected": 1.842792272567749, | |
| "logps/chosen": -84.95299530029297, | |
| "logps/rejected": -82.22926330566406, | |
| "loss": 0.5972, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.25716403126716614, | |
| "rewards/margins": 0.2636396288871765, | |
| "rewards/rejected": -0.5208036303520203, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 10.0418410041841, | |
| "grad_norm": 1.0787400007247925, | |
| "learning_rate": 2.785250302445062e-06, | |
| "logits/chosen": 1.8255424499511719, | |
| "logits/rejected": 1.8123111724853516, | |
| "logps/chosen": -94.89508056640625, | |
| "logps/rejected": -99.48709869384766, | |
| "loss": 0.6038, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.25526899099349976, | |
| "rewards/margins": 0.24981728196144104, | |
| "rewards/rejected": -0.5050862431526184, | |
| "step": 300 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 580, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0213454370444411e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |