| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 19.547511312217196, | |
| "eval_steps": 500, | |
| "global_step": 540, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.36199095022624433, | |
| "grad_norm": 4.351782321929932, | |
| "learning_rate": 9.259259259259259e-07, | |
| "logits/chosen": -2.3496108055114746, | |
| "logits/rejected": -2.354576349258423, | |
| "logps/chosen": -86.63139343261719, | |
| "logps/rejected": -76.90333557128906, | |
| "loss": 0.6923, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": -0.00019822348258458078, | |
| "rewards/margins": -0.002337233629077673, | |
| "rewards/rejected": 0.00213901000097394, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.7239819004524887, | |
| "grad_norm": 3.8085336685180664, | |
| "learning_rate": 1.8518518518518519e-06, | |
| "logits/chosen": -2.3482470512390137, | |
| "logits/rejected": -2.3420329093933105, | |
| "logps/chosen": -73.18113708496094, | |
| "logps/rejected": -72.52565002441406, | |
| "loss": 0.6943, | |
| "rewards/accuracies": 0.4000000059604645, | |
| "rewards/chosen": 0.00011640912998700514, | |
| "rewards/margins": -0.002153881825506687, | |
| "rewards/rejected": 0.002270291093736887, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.085972850678733, | |
| "grad_norm": 5.329021453857422, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "logits/chosen": -2.3689351081848145, | |
| "logits/rejected": -2.3885178565979004, | |
| "logps/chosen": -72.15180969238281, | |
| "logps/rejected": -74.70518493652344, | |
| "loss": 0.6924, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.0020837264601141214, | |
| "rewards/margins": 0.00603193324059248, | |
| "rewards/rejected": -0.0039482079446315765, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.4479638009049773, | |
| "grad_norm": 3.671722888946533, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "logits/chosen": -2.3091399669647217, | |
| "logits/rejected": -2.3273236751556396, | |
| "logps/chosen": -69.45216369628906, | |
| "logps/rejected": -260.0699157714844, | |
| "loss": 0.6899, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": -0.0094170942902565, | |
| "rewards/margins": 0.14604035019874573, | |
| "rewards/rejected": -0.15545745193958282, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.8099547511312217, | |
| "grad_norm": 4.172375679016113, | |
| "learning_rate": 4.62962962962963e-06, | |
| "logits/chosen": -2.31831693649292, | |
| "logits/rejected": -2.335618495941162, | |
| "logps/chosen": -72.82856750488281, | |
| "logps/rejected": -90.61243438720703, | |
| "loss": 0.6865, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -0.01846747286617756, | |
| "rewards/margins": 0.015672579407691956, | |
| "rewards/rejected": -0.034140050411224365, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.171945701357466, | |
| "grad_norm": 4.386168479919434, | |
| "learning_rate": 4.998119881260576e-06, | |
| "logits/chosen": -2.3203041553497314, | |
| "logits/rejected": -2.3315658569335938, | |
| "logps/chosen": -67.68244934082031, | |
| "logps/rejected": -86.2356185913086, | |
| "loss": 0.6775, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": -0.03993881493806839, | |
| "rewards/margins": 0.027381544932723045, | |
| "rewards/rejected": -0.06732036173343658, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.5339366515837103, | |
| "grad_norm": 4.38440465927124, | |
| "learning_rate": 4.9866405060165044e-06, | |
| "logits/chosen": -2.3607311248779297, | |
| "logits/rejected": -2.3799984455108643, | |
| "logps/chosen": -72.8117904663086, | |
| "logps/rejected": -79.02535247802734, | |
| "loss": 0.6636, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -0.06544635444879532, | |
| "rewards/margins": 0.069185271859169, | |
| "rewards/rejected": -0.13463163375854492, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.8959276018099547, | |
| "grad_norm": 4.333106517791748, | |
| "learning_rate": 4.964774158361991e-06, | |
| "logits/chosen": -2.392198324203491, | |
| "logits/rejected": -2.3854451179504395, | |
| "logps/chosen": -64.03775787353516, | |
| "logps/rejected": -71.01383972167969, | |
| "loss": 0.6473, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.09924488514661789, | |
| "rewards/margins": 0.1070803552865982, | |
| "rewards/rejected": -0.2063252478837967, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.257918552036199, | |
| "grad_norm": 4.575820446014404, | |
| "learning_rate": 4.93261217644956e-06, | |
| "logits/chosen": -2.3316967487335205, | |
| "logits/rejected": -2.3399548530578613, | |
| "logps/chosen": -66.9199447631836, | |
| "logps/rejected": -67.97264099121094, | |
| "loss": 0.6292, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": -0.1342274248600006, | |
| "rewards/margins": 0.1109505444765091, | |
| "rewards/rejected": -0.2451779842376709, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.6199095022624435, | |
| "grad_norm": 4.666302680969238, | |
| "learning_rate": 4.8902889044347e-06, | |
| "logits/chosen": -2.3398795127868652, | |
| "logits/rejected": -2.3582355976104736, | |
| "logps/chosen": -76.05484771728516, | |
| "logps/rejected": -78.4997787475586, | |
| "loss": 0.6015, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.15528908371925354, | |
| "rewards/margins": 0.17627069354057312, | |
| "rewards/rejected": -0.33155977725982666, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.981900452488688, | |
| "grad_norm": 4.472248554229736, | |
| "learning_rate": 4.837981131305475e-06, | |
| "logits/chosen": -2.3747470378875732, | |
| "logits/rejected": -2.3589282035827637, | |
| "logps/chosen": -75.08245086669922, | |
| "logps/rejected": -88.42532348632812, | |
| "loss": 0.582, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.22706560790538788, | |
| "rewards/margins": 0.2718750834465027, | |
| "rewards/rejected": -0.49894070625305176, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.343891402714932, | |
| "grad_norm": 4.420746326446533, | |
| "learning_rate": 4.775907352415367e-06, | |
| "logits/chosen": -2.3004438877105713, | |
| "logits/rejected": -2.33561635017395, | |
| "logps/chosen": -92.03379821777344, | |
| "logps/rejected": -84.3415298461914, | |
| "loss": 0.5537, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.2706041932106018, | |
| "rewards/margins": 0.2943420112133026, | |
| "rewards/rejected": -0.564946174621582, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.705882352941177, | |
| "grad_norm": 4.54348611831665, | |
| "learning_rate": 4.70432685680402e-06, | |
| "logits/chosen": -2.4000728130340576, | |
| "logits/rejected": -2.3912758827209473, | |
| "logps/chosen": -70.9849624633789, | |
| "logps/rejected": -79.27720642089844, | |
| "loss": 0.5306, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.2949373126029968, | |
| "rewards/margins": 0.4063098430633545, | |
| "rewards/rejected": -0.7012470960617065, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 5.067873303167421, | |
| "grad_norm": 4.43679141998291, | |
| "learning_rate": 4.623538644118244e-06, | |
| "logits/chosen": -2.3637630939483643, | |
| "logits/rejected": -2.3869171142578125, | |
| "logps/chosen": -63.164039611816406, | |
| "logps/rejected": -78.0440673828125, | |
| "loss": 0.5028, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.2740551233291626, | |
| "rewards/margins": 0.5061118006706238, | |
| "rewards/rejected": -0.7801669836044312, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.429864253393665, | |
| "grad_norm": 4.934199810028076, | |
| "learning_rate": 4.533880175657419e-06, | |
| "logits/chosen": -2.342858076095581, | |
| "logits/rejected": -2.3417344093322754, | |
| "logps/chosen": -80.54903411865234, | |
| "logps/rejected": -79.4267807006836, | |
| "loss": 0.4663, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.3702576756477356, | |
| "rewards/margins": 0.5886551141738892, | |
| "rewards/rejected": -0.95891273021698, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.791855203619909, | |
| "grad_norm": 5.551158905029297, | |
| "learning_rate": 4.435725964760331e-06, | |
| "logits/chosen": -2.36419415473938, | |
| "logits/rejected": -2.388049364089966, | |
| "logps/chosen": -69.40000915527344, | |
| "logps/rejected": -91.36970520019531, | |
| "loss": 0.4636, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.42509594559669495, | |
| "rewards/margins": 0.7187294363975525, | |
| "rewards/rejected": -1.1438252925872803, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 6.153846153846154, | |
| "grad_norm": 4.6863555908203125, | |
| "learning_rate": 4.329486012421531e-06, | |
| "logits/chosen": -2.3798611164093018, | |
| "logits/rejected": -2.388327121734619, | |
| "logps/chosen": -84.27762603759766, | |
| "logps/rejected": -95.46641540527344, | |
| "loss": 0.4245, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": -0.4049956202507019, | |
| "rewards/margins": 0.965179443359375, | |
| "rewards/rejected": -1.3701750040054321, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.515837104072398, | |
| "grad_norm": 5.001020431518555, | |
| "learning_rate": 4.215604094671835e-06, | |
| "logits/chosen": -2.376713991165161, | |
| "logits/rejected": -2.413689374923706, | |
| "logps/chosen": -75.96709442138672, | |
| "logps/rejected": -88.12495422363281, | |
| "loss": 0.449, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -0.3547631800174713, | |
| "rewards/margins": 0.8548161387443542, | |
| "rewards/rejected": -1.2095792293548584, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.877828054298643, | |
| "grad_norm": 5.340312957763672, | |
| "learning_rate": 4.094555908876765e-06, | |
| "logits/chosen": -2.385936737060547, | |
| "logits/rejected": -2.4080803394317627, | |
| "logps/chosen": -66.79461669921875, | |
| "logps/rejected": -85.04891204833984, | |
| "loss": 0.3764, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -0.4008978307247162, | |
| "rewards/margins": 0.8593969345092773, | |
| "rewards/rejected": -1.2602946758270264, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 7.239819004524887, | |
| "grad_norm": 4.7585883140563965, | |
| "learning_rate": 3.966847086696045e-06, | |
| "logits/chosen": -2.4034409523010254, | |
| "logits/rejected": -2.425673246383667, | |
| "logps/chosen": -78.95824432373047, | |
| "logps/rejected": -92.42652893066406, | |
| "loss": 0.3362, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": -0.5016047358512878, | |
| "rewards/margins": 1.3585138320922852, | |
| "rewards/rejected": -1.8601186275482178, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 7.601809954751131, | |
| "grad_norm": 5.299131870269775, | |
| "learning_rate": 3.833011082004229e-06, | |
| "logits/chosen": -2.405515193939209, | |
| "logits/rejected": -2.4070794582366943, | |
| "logps/chosen": -70.76835632324219, | |
| "logps/rejected": -94.3203353881836, | |
| "loss": 0.317, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -0.7332154512405396, | |
| "rewards/margins": 1.352203130722046, | |
| "rewards/rejected": -2.085418701171875, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 7.963800904977376, | |
| "grad_norm": 6.385330677032471, | |
| "learning_rate": 3.693606942594873e-06, | |
| "logits/chosen": -2.3782029151916504, | |
| "logits/rejected": -2.3890140056610107, | |
| "logps/chosen": -74.07151794433594, | |
| "logps/rejected": -97.96099853515625, | |
| "loss": 0.3338, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.8589820861816406, | |
| "rewards/margins": 1.498375415802002, | |
| "rewards/rejected": -2.3573575019836426, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 8.32579185520362, | |
| "grad_norm": 5.3784027099609375, | |
| "learning_rate": 3.549216974976073e-06, | |
| "logits/chosen": -2.411106586456299, | |
| "logits/rejected": -2.4102063179016113, | |
| "logps/chosen": -78.97357940673828, | |
| "logps/rejected": -104.22176361083984, | |
| "loss": 0.2894, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": -0.850872814655304, | |
| "rewards/margins": 1.5754810571670532, | |
| "rewards/rejected": -2.426353931427002, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 8.687782805429864, | |
| "grad_norm": 5.306555271148682, | |
| "learning_rate": 3.400444312011776e-06, | |
| "logits/chosen": -2.3758366107940674, | |
| "logits/rejected": -2.393923282623291, | |
| "logps/chosen": -75.22766876220703, | |
| "logps/rejected": -105.24629211425781, | |
| "loss": 0.2666, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": -0.6813958287239075, | |
| "rewards/margins": 1.697392225265503, | |
| "rewards/rejected": -2.3787879943847656, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 9.049773755656108, | |
| "grad_norm": 5.835656642913818, | |
| "learning_rate": 3.2479103935691047e-06, | |
| "logits/chosen": -2.345181465148926, | |
| "logits/rejected": -2.3525192737579346, | |
| "logps/chosen": -99.5466537475586, | |
| "logps/rejected": -115.3326644897461, | |
| "loss": 0.2556, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -1.1117511987686157, | |
| "rewards/margins": 1.774484634399414, | |
| "rewards/rejected": -2.8862357139587402, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 9.411764705882353, | |
| "grad_norm": 5.279304504394531, | |
| "learning_rate": 3.092252370695298e-06, | |
| "logits/chosen": -2.3707776069641113, | |
| "logits/rejected": -2.3579325675964355, | |
| "logps/chosen": -107.48176574707031, | |
| "logps/rejected": -119.63212585449219, | |
| "loss": 0.223, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.34574556350708, | |
| "rewards/margins": 2.105149745941162, | |
| "rewards/rejected": -3.450894832611084, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 9.773755656108598, | |
| "grad_norm": 6.106879711151123, | |
| "learning_rate": 2.9341204441673267e-06, | |
| "logits/chosen": -2.367492198944092, | |
| "logits/rejected": -2.3800530433654785, | |
| "logps/chosen": -94.10942077636719, | |
| "logps/rejected": -108.04286193847656, | |
| "loss": 0.2108, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": -1.319504976272583, | |
| "rewards/margins": 2.0621278285980225, | |
| "rewards/rejected": -3.3816330432891846, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 10.135746606334841, | |
| "grad_norm": 4.824020862579346, | |
| "learning_rate": 2.7741751485313295e-06, | |
| "logits/chosen": -2.29042649269104, | |
| "logits/rejected": -2.2942726612091064, | |
| "logps/chosen": -88.15667724609375, | |
| "logps/rejected": -116.263671875, | |
| "loss": 0.2067, | |
| "rewards/accuracies": 0.9624999761581421, | |
| "rewards/chosen": -1.352482795715332, | |
| "rewards/margins": 2.1809113025665283, | |
| "rewards/rejected": -3.5333938598632812, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 10.497737556561086, | |
| "grad_norm": 5.073860168457031, | |
| "learning_rate": 2.6130845929767662e-06, | |
| "logits/chosen": -2.319164276123047, | |
| "logits/rejected": -2.327695846557617, | |
| "logps/chosen": -89.06367492675781, | |
| "logps/rejected": -113.19393157958984, | |
| "loss": 0.1791, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": -1.4301341772079468, | |
| "rewards/margins": 2.2849090099334717, | |
| "rewards/rejected": -3.715043306350708, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 10.85972850678733, | |
| "grad_norm": 6.990862846374512, | |
| "learning_rate": 2.4515216705704396e-06, | |
| "logits/chosen": -2.322035551071167, | |
| "logits/rejected": -2.343055248260498, | |
| "logps/chosen": -79.6242904663086, | |
| "logps/rejected": -117.89810943603516, | |
| "loss": 0.173, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": -1.5006747245788574, | |
| "rewards/margins": 2.6087098121643066, | |
| "rewards/rejected": -4.109384059906006, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 11.221719457013574, | |
| "grad_norm": 4.418057918548584, | |
| "learning_rate": 2.290161247507733e-06, | |
| "logits/chosen": -2.313563585281372, | |
| "logits/rejected": -2.3379547595977783, | |
| "logps/chosen": -96.51193237304688, | |
| "logps/rejected": -128.72109985351562, | |
| "loss": 0.1525, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": -1.7700340747833252, | |
| "rewards/margins": 2.5669429302215576, | |
| "rewards/rejected": -4.336977005004883, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 11.583710407239819, | |
| "grad_norm": 5.5379638671875, | |
| "learning_rate": 2.129677344121879e-06, | |
| "logits/chosen": -2.3077690601348877, | |
| "logits/rejected": -2.3094325065612793, | |
| "logps/chosen": -97.48485565185547, | |
| "logps/rejected": -120.9418716430664, | |
| "loss": 0.1367, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -1.6908330917358398, | |
| "rewards/margins": 2.76543927192688, | |
| "rewards/rejected": -4.456272602081299, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 11.945701357466064, | |
| "grad_norm": 8.315099716186523, | |
| "learning_rate": 1.970740319426474e-06, | |
| "logits/chosen": -2.2925028800964355, | |
| "logits/rejected": -2.31669545173645, | |
| "logps/chosen": -88.35062408447266, | |
| "logps/rejected": -125.6523208618164, | |
| "loss": 0.1436, | |
| "rewards/accuracies": 0.9750000238418579, | |
| "rewards/chosen": -2.12321400642395, | |
| "rewards/margins": 2.9592180252075195, | |
| "rewards/rejected": -5.082431316375732, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 12.307692307692308, | |
| "grad_norm": 5.883168697357178, | |
| "learning_rate": 1.8140140709517467e-06, | |
| "logits/chosen": -2.2664735317230225, | |
| "logits/rejected": -2.3047263622283936, | |
| "logps/chosen": -87.54987335205078, | |
| "logps/rejected": -120.87919616699219, | |
| "loss": 0.1185, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -1.9975258111953735, | |
| "rewards/margins": 3.0641627311706543, | |
| "rewards/rejected": -5.061688423156738, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 12.669683257918551, | |
| "grad_norm": 4.8013458251953125, | |
| "learning_rate": 1.6601532615711452e-06, | |
| "logits/chosen": -2.2390546798706055, | |
| "logits/rejected": -2.233534097671509, | |
| "logps/chosen": -102.70186614990234, | |
| "logps/rejected": -136.1205291748047, | |
| "loss": 0.111, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -2.10892915725708, | |
| "rewards/margins": 3.2926182746887207, | |
| "rewards/rejected": -5.401547431945801, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 13.031674208144796, | |
| "grad_norm": 4.820348739624023, | |
| "learning_rate": 1.509800584902108e-06, | |
| "logits/chosen": -2.2781853675842285, | |
| "logits/rejected": -2.3037526607513428, | |
| "logps/chosen": -94.27687072753906, | |
| "logps/rejected": -143.52162170410156, | |
| "loss": 0.1133, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -2.185966730117798, | |
| "rewards/margins": 3.149972438812256, | |
| "rewards/rejected": -5.335939884185791, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 13.393665158371041, | |
| "grad_norm": 4.292584419250488, | |
| "learning_rate": 1.3635840807037487e-06, | |
| "logits/chosen": -2.260101079940796, | |
| "logits/rejected": -2.270853042602539, | |
| "logps/chosen": -112.0027847290039, | |
| "logps/rejected": -155.92738342285156, | |
| "loss": 0.0866, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.6447501182556152, | |
| "rewards/margins": 3.6627235412597656, | |
| "rewards/rejected": -6.307473659515381, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 13.755656108597286, | |
| "grad_norm": 4.569724082946777, | |
| "learning_rate": 1.2221145114853172e-06, | |
| "logits/chosen": -2.2924914360046387, | |
| "logits/rejected": -2.3221309185028076, | |
| "logps/chosen": -104.69401550292969, | |
| "logps/rejected": -138.05264282226562, | |
| "loss": 0.0953, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -2.8196561336517334, | |
| "rewards/margins": 3.5180962085723877, | |
| "rewards/rejected": -6.337752342224121, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 14.117647058823529, | |
| "grad_norm": 4.538058280944824, | |
| "learning_rate": 1.085982811283654e-06, | |
| "logits/chosen": -2.2493948936462402, | |
| "logits/rejected": -2.2872321605682373, | |
| "logps/chosen": -102.8060531616211, | |
| "logps/rejected": -135.58599853515625, | |
| "loss": 0.0879, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -2.9318606853485107, | |
| "rewards/margins": 3.4618797302246094, | |
| "rewards/rejected": -6.393740177154541, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 14.479638009049774, | |
| "grad_norm": 4.173523426055908, | |
| "learning_rate": 9.557576172663577e-07, | |
| "logits/chosen": -2.246553659439087, | |
| "logits/rejected": -2.2457058429718018, | |
| "logps/chosen": -109.64227294921875, | |
| "logps/rejected": -147.6108856201172, | |
| "loss": 0.0783, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -2.916919708251953, | |
| "rewards/margins": 3.4423089027404785, | |
| "rewards/rejected": -6.359227657318115, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 14.841628959276019, | |
| "grad_norm": 4.715793132781982, | |
| "learning_rate": 8.319828944714508e-07, | |
| "logits/chosen": -2.241994857788086, | |
| "logits/rejected": -2.2265496253967285, | |
| "logps/chosen": -106.87748718261719, | |
| "logps/rejected": -134.00021362304688, | |
| "loss": 0.0818, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.9697346687316895, | |
| "rewards/margins": 3.2734649181365967, | |
| "rewards/rejected": -6.243198871612549, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 15.203619909502262, | |
| "grad_norm": 3.8070931434631348, | |
| "learning_rate": 7.151756636052529e-07, | |
| "logits/chosen": -2.2427737712860107, | |
| "logits/rejected": -2.287010908126831, | |
| "logps/chosen": -93.37985229492188, | |
| "logps/rejected": -145.51707458496094, | |
| "loss": 0.08, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.7174670696258545, | |
| "rewards/margins": 3.7303779125213623, | |
| "rewards/rejected": -6.447844505310059, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 15.565610859728507, | |
| "grad_norm": 3.25356125831604, | |
| "learning_rate": 6.058238413897052e-07, | |
| "logits/chosen": -2.2657885551452637, | |
| "logits/rejected": -2.259291887283325, | |
| "logps/chosen": -101.24921417236328, | |
| "logps/rejected": -136.51953125, | |
| "loss": 0.0697, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.116041421890259, | |
| "rewards/margins": 3.617115020751953, | |
| "rewards/rejected": -6.733156681060791, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 15.927601809954751, | |
| "grad_norm": 4.29131555557251, | |
| "learning_rate": 5.043842024802675e-07, | |
| "logits/chosen": -2.2412171363830566, | |
| "logits/rejected": -2.2466728687286377, | |
| "logps/chosen": -98.49857330322266, | |
| "logps/rejected": -143.11204528808594, | |
| "loss": 0.0643, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.198906421661377, | |
| "rewards/margins": 3.8427040576934814, | |
| "rewards/rejected": -7.041609764099121, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 16.289592760180994, | |
| "grad_norm": 4.5554375648498535, | |
| "learning_rate": 4.1128047146765936e-07, | |
| "logits/chosen": -2.2196035385131836, | |
| "logits/rejected": -2.243433952331543, | |
| "logps/chosen": -121.4827651977539, | |
| "logps/rejected": -141.68087768554688, | |
| "loss": 0.0693, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.3067448139190674, | |
| "rewards/margins": 4.059030532836914, | |
| "rewards/rejected": -7.365775108337402, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 16.65158371040724, | |
| "grad_norm": 4.047634124755859, | |
| "learning_rate": 3.269015529333805e-07, | |
| "logits/chosen": -2.2159605026245117, | |
| "logits/rejected": -2.2447285652160645, | |
| "logps/chosen": -102.39476013183594, | |
| "logps/rejected": -162.81443786621094, | |
| "loss": 0.0608, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.4078800678253174, | |
| "rewards/margins": 4.151500701904297, | |
| "rewards/rejected": -7.55938196182251, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 17.013574660633484, | |
| "grad_norm": 3.6768975257873535, | |
| "learning_rate": 2.515999069522676e-07, | |
| "logits/chosen": -2.222931385040283, | |
| "logits/rejected": -2.2669026851654053, | |
| "logps/chosen": -99.30271911621094, | |
| "logps/rejected": -134.9786376953125, | |
| "loss": 0.0617, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.145139217376709, | |
| "rewards/margins": 3.7978076934814453, | |
| "rewards/rejected": -6.9429473876953125, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 17.375565610859727, | |
| "grad_norm": 6.302717208862305, | |
| "learning_rate": 1.8569007682777417e-07, | |
| "logits/chosen": -2.2295079231262207, | |
| "logits/rejected": -2.2286057472229004, | |
| "logps/chosen": -107.6258544921875, | |
| "logps/rejected": -135.12925720214844, | |
| "loss": 0.0602, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -3.210822343826294, | |
| "rewards/margins": 3.5960655212402344, | |
| "rewards/rejected": -6.806887626647949, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 17.737556561085974, | |
| "grad_norm": 3.749708414077759, | |
| "learning_rate": 1.2944737520980883e-07, | |
| "logits/chosen": -2.199395179748535, | |
| "logits/rejected": -2.2533044815063477, | |
| "logps/chosen": -103.281005859375, | |
| "logps/rejected": -147.35745239257812, | |
| "loss": 0.0585, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.2957870960235596, | |
| "rewards/margins": 4.124021053314209, | |
| "rewards/rejected": -7.419807434082031, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 18.099547511312217, | |
| "grad_norm": 3.7229220867156982, | |
| "learning_rate": 8.310673408334496e-08, | |
| "logits/chosen": -2.174075126647949, | |
| "logits/rejected": -2.1925175189971924, | |
| "logps/chosen": -110.58296203613281, | |
| "logps/rejected": -148.13821411132812, | |
| "loss": 0.0617, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.2307941913604736, | |
| "rewards/margins": 3.874736785888672, | |
| "rewards/rejected": -7.10552978515625, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 18.46153846153846, | |
| "grad_norm": 3.482407331466675, | |
| "learning_rate": 4.6861723431538273e-08, | |
| "logits/chosen": -2.2031123638153076, | |
| "logits/rejected": -2.2612881660461426, | |
| "logps/chosen": -108.82826232910156, | |
| "logps/rejected": -164.102294921875, | |
| "loss": 0.0639, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.6643009185791016, | |
| "rewards/margins": 4.219679832458496, | |
| "rewards/rejected": -7.883980751037598, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 18.823529411764707, | |
| "grad_norm": 3.6187524795532227, | |
| "learning_rate": 2.0863742672497244e-08, | |
| "logits/chosen": -2.228766918182373, | |
| "logits/rejected": -2.217647075653076, | |
| "logps/chosen": -107.79327392578125, | |
| "logps/rejected": -139.94277954101562, | |
| "loss": 0.0534, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -3.2466843128204346, | |
| "rewards/margins": 3.7326819896698, | |
| "rewards/rejected": -6.979366302490234, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 19.18552036199095, | |
| "grad_norm": 4.491217613220215, | |
| "learning_rate": 5.221388247169945e-09, | |
| "logits/chosen": -2.194672107696533, | |
| "logits/rejected": -2.229614734649658, | |
| "logps/chosen": -97.34280395507812, | |
| "logps/rejected": -144.13217163085938, | |
| "loss": 0.0605, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -2.943859815597534, | |
| "rewards/margins": 3.916800022125244, | |
| "rewards/rejected": -6.860659599304199, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 19.547511312217196, | |
| "grad_norm": 5.3823065757751465, | |
| "learning_rate": 0.0, | |
| "logits/chosen": -2.2234930992126465, | |
| "logits/rejected": -2.2182583808898926, | |
| "logps/chosen": -111.52687072753906, | |
| "logps/rejected": -151.66384887695312, | |
| "loss": 0.0601, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -3.98834228515625, | |
| "rewards/margins": 3.9456241130828857, | |
| "rewards/rejected": -7.93396520614624, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 19.547511312217196, | |
| "step": 540, | |
| "total_flos": 1.993357666398765e+18, | |
| "train_loss": 0.29341091513633727, | |
| "train_runtime": 4173.8169, | |
| "train_samples_per_second": 8.467, | |
| "train_steps_per_second": 0.129 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 540, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.993357666398765e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |