| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 7.239819004524887, | |
| "eval_steps": 500, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.36199095022624433, | |
| "grad_norm": 4.351782321929932, | |
| "learning_rate": 9.259259259259259e-07, | |
| "logits/chosen": -2.3496108055114746, | |
| "logits/rejected": -2.354576349258423, | |
| "logps/chosen": -86.63139343261719, | |
| "logps/rejected": -76.90333557128906, | |
| "loss": 0.6923, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": -0.00019822348258458078, | |
| "rewards/margins": -0.002337233629077673, | |
| "rewards/rejected": 0.00213901000097394, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.7239819004524887, | |
| "grad_norm": 3.8085336685180664, | |
| "learning_rate": 1.8518518518518519e-06, | |
| "logits/chosen": -2.3482470512390137, | |
| "logits/rejected": -2.3420329093933105, | |
| "logps/chosen": -73.18113708496094, | |
| "logps/rejected": -72.52565002441406, | |
| "loss": 0.6943, | |
| "rewards/accuracies": 0.4000000059604645, | |
| "rewards/chosen": 0.00011640912998700514, | |
| "rewards/margins": -0.002153881825506687, | |
| "rewards/rejected": 0.002270291093736887, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.085972850678733, | |
| "grad_norm": 5.329021453857422, | |
| "learning_rate": 2.7777777777777783e-06, | |
| "logits/chosen": -2.3689351081848145, | |
| "logits/rejected": -2.3885178565979004, | |
| "logps/chosen": -72.15180969238281, | |
| "logps/rejected": -74.70518493652344, | |
| "loss": 0.6924, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.0020837264601141214, | |
| "rewards/margins": 0.00603193324059248, | |
| "rewards/rejected": -0.0039482079446315765, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.4479638009049773, | |
| "grad_norm": 3.671722888946533, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "logits/chosen": -2.3091399669647217, | |
| "logits/rejected": -2.3273236751556396, | |
| "logps/chosen": -69.45216369628906, | |
| "logps/rejected": -260.0699157714844, | |
| "loss": 0.6899, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": -0.0094170942902565, | |
| "rewards/margins": 0.14604035019874573, | |
| "rewards/rejected": -0.15545745193958282, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.8099547511312217, | |
| "grad_norm": 4.172375679016113, | |
| "learning_rate": 4.62962962962963e-06, | |
| "logits/chosen": -2.31831693649292, | |
| "logits/rejected": -2.335618495941162, | |
| "logps/chosen": -72.82856750488281, | |
| "logps/rejected": -90.61243438720703, | |
| "loss": 0.6865, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": -0.01846747286617756, | |
| "rewards/margins": 0.015672579407691956, | |
| "rewards/rejected": -0.034140050411224365, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.171945701357466, | |
| "grad_norm": 4.386168479919434, | |
| "learning_rate": 4.998119881260576e-06, | |
| "logits/chosen": -2.3203041553497314, | |
| "logits/rejected": -2.3315658569335938, | |
| "logps/chosen": -67.68244934082031, | |
| "logps/rejected": -86.2356185913086, | |
| "loss": 0.6775, | |
| "rewards/accuracies": 0.637499988079071, | |
| "rewards/chosen": -0.03993881493806839, | |
| "rewards/margins": 0.027381544932723045, | |
| "rewards/rejected": -0.06732036173343658, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.5339366515837103, | |
| "grad_norm": 4.38440465927124, | |
| "learning_rate": 4.9866405060165044e-06, | |
| "logits/chosen": -2.3607311248779297, | |
| "logits/rejected": -2.3799984455108643, | |
| "logps/chosen": -72.8117904663086, | |
| "logps/rejected": -79.02535247802734, | |
| "loss": 0.6636, | |
| "rewards/accuracies": 0.6875, | |
| "rewards/chosen": -0.06544635444879532, | |
| "rewards/margins": 0.069185271859169, | |
| "rewards/rejected": -0.13463163375854492, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.8959276018099547, | |
| "grad_norm": 4.333106517791748, | |
| "learning_rate": 4.964774158361991e-06, | |
| "logits/chosen": -2.392198324203491, | |
| "logits/rejected": -2.3854451179504395, | |
| "logps/chosen": -64.03775787353516, | |
| "logps/rejected": -71.01383972167969, | |
| "loss": 0.6473, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.09924488514661789, | |
| "rewards/margins": 0.1070803552865982, | |
| "rewards/rejected": -0.2063252478837967, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.257918552036199, | |
| "grad_norm": 4.575820446014404, | |
| "learning_rate": 4.93261217644956e-06, | |
| "logits/chosen": -2.3316967487335205, | |
| "logits/rejected": -2.3399548530578613, | |
| "logps/chosen": -66.9199447631836, | |
| "logps/rejected": -67.97264099121094, | |
| "loss": 0.6292, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": -0.1342274248600006, | |
| "rewards/margins": 0.1109505444765091, | |
| "rewards/rejected": -0.2451779842376709, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.6199095022624435, | |
| "grad_norm": 4.666302680969238, | |
| "learning_rate": 4.8902889044347e-06, | |
| "logits/chosen": -2.3398795127868652, | |
| "logits/rejected": -2.3582355976104736, | |
| "logps/chosen": -76.05484771728516, | |
| "logps/rejected": -78.4997787475586, | |
| "loss": 0.6015, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.15528908371925354, | |
| "rewards/margins": 0.17627069354057312, | |
| "rewards/rejected": -0.33155977725982666, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.981900452488688, | |
| "grad_norm": 4.472248554229736, | |
| "learning_rate": 4.837981131305475e-06, | |
| "logits/chosen": -2.3747470378875732, | |
| "logits/rejected": -2.3589282035827637, | |
| "logps/chosen": -75.08245086669922, | |
| "logps/rejected": -88.42532348632812, | |
| "loss": 0.582, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.22706560790538788, | |
| "rewards/margins": 0.2718750834465027, | |
| "rewards/rejected": -0.49894070625305176, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 4.343891402714932, | |
| "grad_norm": 4.420746326446533, | |
| "learning_rate": 4.775907352415367e-06, | |
| "logits/chosen": -2.3004438877105713, | |
| "logits/rejected": -2.33561635017395, | |
| "logps/chosen": -92.03379821777344, | |
| "logps/rejected": -84.3415298461914, | |
| "loss": 0.5537, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.2706041932106018, | |
| "rewards/margins": 0.2943420112133026, | |
| "rewards/rejected": -0.564946174621582, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.705882352941177, | |
| "grad_norm": 4.54348611831665, | |
| "learning_rate": 4.70432685680402e-06, | |
| "logits/chosen": -2.4000728130340576, | |
| "logits/rejected": -2.3912758827209473, | |
| "logps/chosen": -70.9849624633789, | |
| "logps/rejected": -79.27720642089844, | |
| "loss": 0.5306, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.2949373126029968, | |
| "rewards/margins": 0.4063098430633545, | |
| "rewards/rejected": -0.7012470960617065, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 5.067873303167421, | |
| "grad_norm": 4.43679141998291, | |
| "learning_rate": 4.623538644118244e-06, | |
| "logits/chosen": -2.3637630939483643, | |
| "logits/rejected": -2.3869171142578125, | |
| "logps/chosen": -63.164039611816406, | |
| "logps/rejected": -78.0440673828125, | |
| "loss": 0.5028, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.2740551233291626, | |
| "rewards/margins": 0.5061118006706238, | |
| "rewards/rejected": -0.7801669836044312, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 5.429864253393665, | |
| "grad_norm": 4.934199810028076, | |
| "learning_rate": 4.533880175657419e-06, | |
| "logits/chosen": -2.342858076095581, | |
| "logits/rejected": -2.3417344093322754, | |
| "logps/chosen": -80.54903411865234, | |
| "logps/rejected": -79.4267807006836, | |
| "loss": 0.4663, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.3702576756477356, | |
| "rewards/margins": 0.5886551141738892, | |
| "rewards/rejected": -0.95891273021698, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.791855203619909, | |
| "grad_norm": 5.551158905029297, | |
| "learning_rate": 4.435725964760331e-06, | |
| "logits/chosen": -2.36419415473938, | |
| "logits/rejected": -2.388049364089966, | |
| "logps/chosen": -69.40000915527344, | |
| "logps/rejected": -91.36970520019531, | |
| "loss": 0.4636, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.42509594559669495, | |
| "rewards/margins": 0.7187294363975525, | |
| "rewards/rejected": -1.1438252925872803, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 6.153846153846154, | |
| "grad_norm": 4.6863555908203125, | |
| "learning_rate": 4.329486012421531e-06, | |
| "logits/chosen": -2.3798611164093018, | |
| "logits/rejected": -2.388327121734619, | |
| "logps/chosen": -84.27762603759766, | |
| "logps/rejected": -95.46641540527344, | |
| "loss": 0.4245, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": -0.4049956202507019, | |
| "rewards/margins": 0.965179443359375, | |
| "rewards/rejected": -1.3701750040054321, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 6.515837104072398, | |
| "grad_norm": 5.001020431518555, | |
| "learning_rate": 4.215604094671835e-06, | |
| "logits/chosen": -2.376713991165161, | |
| "logits/rejected": -2.413689374923706, | |
| "logps/chosen": -75.96709442138672, | |
| "logps/rejected": -88.12495422363281, | |
| "loss": 0.449, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -0.3547631800174713, | |
| "rewards/margins": 0.8548161387443542, | |
| "rewards/rejected": -1.2095792293548584, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.877828054298643, | |
| "grad_norm": 5.340312957763672, | |
| "learning_rate": 4.094555908876765e-06, | |
| "logits/chosen": -2.385936737060547, | |
| "logits/rejected": -2.4080803394317627, | |
| "logps/chosen": -66.79461669921875, | |
| "logps/rejected": -85.04891204833984, | |
| "loss": 0.3764, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -0.4008978307247162, | |
| "rewards/margins": 0.8593969345092773, | |
| "rewards/rejected": -1.2602946758270264, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 7.239819004524887, | |
| "grad_norm": 4.7585883140563965, | |
| "learning_rate": 3.966847086696045e-06, | |
| "logits/chosen": -2.4034409523010254, | |
| "logits/rejected": -2.425673246383667, | |
| "logps/chosen": -78.95824432373047, | |
| "logps/rejected": -92.42652893066406, | |
| "loss": 0.3362, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": -0.5016047358512878, | |
| "rewards/margins": 1.3585138320922852, | |
| "rewards/rejected": -1.8601186275482178, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 540, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.389677741722829e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |