| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.938271604938271, | |
| "eval_steps": 500, | |
| "global_step": 150, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.3292181069958848, | |
| "grad_norm": 0.5561477541923523, | |
| "learning_rate": 8.333333333333333e-07, | |
| "logits/chosen": 1.7485754489898682, | |
| "logits/rejected": 1.8832639455795288, | |
| "logps/chosen": -70.18267059326172, | |
| "logps/rejected": -77.9986343383789, | |
| "loss": 0.6938, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": 0.005453853867948055, | |
| "rewards/margins": 0.013218576088547707, | |
| "rewards/rejected": -0.007764720823615789, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6584362139917695, | |
| "grad_norm": 0.48141908645629883, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "logits/chosen": 1.9016907215118408, | |
| "logits/rejected": 1.9251121282577515, | |
| "logps/chosen": -96.5027847290039, | |
| "logps/rejected": -87.00735473632812, | |
| "loss": 0.6937, | |
| "rewards/accuracies": 0.5249999761581421, | |
| "rewards/chosen": -0.005797500256448984, | |
| "rewards/margins": -0.0004409264656715095, | |
| "rewards/rejected": -0.005356573965400457, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.9876543209876543, | |
| "grad_norm": 0.4639015197753906, | |
| "learning_rate": 2.5e-06, | |
| "logits/chosen": 1.7938541173934937, | |
| "logits/rejected": 1.6962993144989014, | |
| "logps/chosen": -71.47590637207031, | |
| "logps/rejected": -66.45989227294922, | |
| "loss": 0.6942, | |
| "rewards/accuracies": 0.4124999940395355, | |
| "rewards/chosen": -0.004830303601920605, | |
| "rewards/margins": -0.00935445912182331, | |
| "rewards/rejected": 0.004524155054241419, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.316872427983539, | |
| "grad_norm": 0.44931092858314514, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "logits/chosen": 1.8256213665008545, | |
| "logits/rejected": 1.8677200078964233, | |
| "logps/chosen": -75.90711975097656, | |
| "logps/rejected": -76.26548767089844, | |
| "loss": 0.6935, | |
| "rewards/accuracies": 0.5375000238418579, | |
| "rewards/chosen": -0.004859285429120064, | |
| "rewards/margins": -0.007447429001331329, | |
| "rewards/rejected": 0.0025881435722112656, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.646090534979424, | |
| "grad_norm": 0.512350857257843, | |
| "learning_rate": 4.166666666666667e-06, | |
| "logits/chosen": 1.7572576999664307, | |
| "logits/rejected": 1.7408854961395264, | |
| "logps/chosen": -80.90664672851562, | |
| "logps/rejected": -85.82096862792969, | |
| "loss": 0.6937, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": -0.00120059447363019, | |
| "rewards/margins": -0.0018891148502007127, | |
| "rewards/rejected": 0.0006885197362862527, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.9753086419753085, | |
| "grad_norm": 0.5260242819786072, | |
| "learning_rate": 5e-06, | |
| "logits/chosen": 1.827275037765503, | |
| "logits/rejected": 1.8168131113052368, | |
| "logps/chosen": -86.74467468261719, | |
| "logps/rejected": -79.20576477050781, | |
| "loss": 0.6937, | |
| "rewards/accuracies": 0.4124999940395355, | |
| "rewards/chosen": 0.0014693590346723795, | |
| "rewards/margins": -0.0025326632894575596, | |
| "rewards/rejected": 0.00400202302262187, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.3045267489711936, | |
| "grad_norm": 0.5231289267539978, | |
| "learning_rate": 4.995770395678171e-06, | |
| "logits/chosen": 1.7851394414901733, | |
| "logits/rejected": 1.8952877521514893, | |
| "logps/chosen": -81.03253173828125, | |
| "logps/rejected": -88.5263442993164, | |
| "loss": 0.6932, | |
| "rewards/accuracies": 0.44999998807907104, | |
| "rewards/chosen": -0.003017458599060774, | |
| "rewards/margins": -0.002731734188273549, | |
| "rewards/rejected": -0.0002857256622519344, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.633744855967078, | |
| "grad_norm": 0.5552361011505127, | |
| "learning_rate": 4.983095894354858e-06, | |
| "logits/chosen": 1.8227930068969727, | |
| "logits/rejected": 1.7752052545547485, | |
| "logps/chosen": -89.98479461669922, | |
| "logps/rejected": -72.01054382324219, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.48750001192092896, | |
| "rewards/chosen": 0.0013810636010020971, | |
| "rewards/margins": -0.0008344938978552818, | |
| "rewards/rejected": 0.0022155570331960917, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.962962962962963, | |
| "grad_norm": 0.603434681892395, | |
| "learning_rate": 4.962019382530521e-06, | |
| "logits/chosen": 1.829049825668335, | |
| "logits/rejected": 1.78665030002594, | |
| "logps/chosen": -81.95549011230469, | |
| "logps/rejected": -76.07003021240234, | |
| "loss": 0.6909, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.0062006814405322075, | |
| "rewards/margins": 0.003528360743075609, | |
| "rewards/rejected": 0.00267231953330338, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.292181069958848, | |
| "grad_norm": 0.6200206279754639, | |
| "learning_rate": 4.93261217644956e-06, | |
| "logits/chosen": 1.7920262813568115, | |
| "logits/rejected": 1.7817165851593018, | |
| "logps/chosen": -84.39167022705078, | |
| "logps/rejected": -85.04205322265625, | |
| "loss": 0.6907, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": 0.0063859038054943085, | |
| "rewards/margins": 0.007152262143790722, | |
| "rewards/rejected": -0.0007663581636734307, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.6213991769547325, | |
| "grad_norm": 0.5937617421150208, | |
| "learning_rate": 4.894973780788722e-06, | |
| "logits/chosen": 1.8367125988006592, | |
| "logits/rejected": 1.8536920547485352, | |
| "logps/chosen": -67.41716003417969, | |
| "logps/rejected": -72.06455993652344, | |
| "loss": 0.6891, | |
| "rewards/accuracies": 0.574999988079071, | |
| "rewards/chosen": -0.00015740413800813258, | |
| "rewards/margins": 0.013882984407246113, | |
| "rewards/rejected": -0.01404038816690445, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.950617283950617, | |
| "grad_norm": 0.8695696592330933, | |
| "learning_rate": 4.849231551964771e-06, | |
| "logits/chosen": 1.8238048553466797, | |
| "logits/rejected": 1.7972408533096313, | |
| "logps/chosen": -104.34183502197266, | |
| "logps/rejected": -81.703369140625, | |
| "loss": 0.6864, | |
| "rewards/accuracies": 0.6000000238418579, | |
| "rewards/chosen": 0.00020323302305769175, | |
| "rewards/margins": 0.011230283416807652, | |
| "rewards/rejected": -0.011027050204575062, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.279835390946502, | |
| "grad_norm": 0.7833828926086426, | |
| "learning_rate": 4.7955402672006855e-06, | |
| "logits/chosen": 1.7440074682235718, | |
| "logits/rejected": 1.7824723720550537, | |
| "logps/chosen": -92.3200912475586, | |
| "logps/rejected": -85.88248443603516, | |
| "loss": 0.6861, | |
| "rewards/accuracies": 0.5874999761581421, | |
| "rewards/chosen": 0.0005388978170230985, | |
| "rewards/margins": 0.011814715340733528, | |
| "rewards/rejected": -0.011275815777480602, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.609053497942387, | |
| "grad_norm": 0.7381640672683716, | |
| "learning_rate": 4.734081600808531e-06, | |
| "logits/chosen": 1.6734364032745361, | |
| "logits/rejected": 1.7311270236968994, | |
| "logps/chosen": -88.11524963378906, | |
| "logps/rejected": -97.87281799316406, | |
| "loss": 0.682, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": -0.006451706402003765, | |
| "rewards/margins": 0.021939774975180626, | |
| "rewards/rejected": -0.028391480445861816, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.938271604938271, | |
| "grad_norm": 0.7793841361999512, | |
| "learning_rate": 4.665063509461098e-06, | |
| "logits/chosen": 1.8139768838882446, | |
| "logits/rejected": 1.766761064529419, | |
| "logps/chosen": -77.28334045410156, | |
| "logps/rejected": -78.78504943847656, | |
| "loss": 0.6777, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": -0.011237703263759613, | |
| "rewards/margins": 0.03930521756410599, | |
| "rewards/rejected": -0.050542913377285004, | |
| "step": 150 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 600, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.0929507662351565e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |