| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.584362139917696, | |
| "eval_steps": 500, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.3292181069958848, | |
| "grad_norm": 4.28085994720459, | |
| "learning_rate": 8.333333333333333e-07, | |
| "logits/chosen": -2.348794937133789, | |
| "logits/rejected": -2.3731939792633057, | |
| "logps/chosen": -65.27528381347656, | |
| "logps/rejected": -66.73173522949219, | |
| "loss": 0.6933, | |
| "rewards/accuracies": 0.38749998807907104, | |
| "rewards/chosen": -0.000993417459540069, | |
| "rewards/margins": -0.0003861843142658472, | |
| "rewards/rejected": -0.0006072330288589001, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6584362139917695, | |
| "grad_norm": 3.9006142616271973, | |
| "learning_rate": 1.6666666666666667e-06, | |
| "logits/chosen": -2.381702184677124, | |
| "logits/rejected": -2.387089252471924, | |
| "logps/chosen": -76.84241485595703, | |
| "logps/rejected": -76.77680969238281, | |
| "loss": 0.6941, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": 0.002538852859288454, | |
| "rewards/margins": 0.0011897915974259377, | |
| "rewards/rejected": 0.0013490616111084819, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.9876543209876543, | |
| "grad_norm": 4.658288478851318, | |
| "learning_rate": 2.5e-06, | |
| "logits/chosen": -2.3804943561553955, | |
| "logits/rejected": -2.3802895545959473, | |
| "logps/chosen": -65.05754089355469, | |
| "logps/rejected": -60.8338737487793, | |
| "loss": 0.6938, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.004748785402625799, | |
| "rewards/margins": 0.0050577023066580296, | |
| "rewards/rejected": -0.00030891643837094307, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.316872427983539, | |
| "grad_norm": 10.031479835510254, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "logits/chosen": -2.343177556991577, | |
| "logits/rejected": -2.356895923614502, | |
| "logps/chosen": -72.90717315673828, | |
| "logps/rejected": -64.87867736816406, | |
| "loss": 0.6884, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": 0.02475106716156006, | |
| "rewards/margins": 0.029533248394727707, | |
| "rewards/rejected": -0.0047821830958127975, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.646090534979424, | |
| "grad_norm": 5.148515224456787, | |
| "learning_rate": 4.166666666666667e-06, | |
| "logits/chosen": -2.314805507659912, | |
| "logits/rejected": -2.332075357437134, | |
| "logps/chosen": -74.19972229003906, | |
| "logps/rejected": -84.31201934814453, | |
| "loss": 0.6888, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": 0.008543441072106361, | |
| "rewards/margins": 0.011772889643907547, | |
| "rewards/rejected": -0.0032294485718011856, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 1.9753086419753085, | |
| "grad_norm": 4.609860420227051, | |
| "learning_rate": 5e-06, | |
| "logits/chosen": -2.350900173187256, | |
| "logits/rejected": -2.330029010772705, | |
| "logps/chosen": -72.87406921386719, | |
| "logps/rejected": -69.63726043701172, | |
| "loss": 0.6869, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": 0.009138436987996101, | |
| "rewards/margins": 0.013146847486495972, | |
| "rewards/rejected": -0.0040084123611450195, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.3045267489711936, | |
| "grad_norm": 4.261270523071289, | |
| "learning_rate": 4.995770395678171e-06, | |
| "logits/chosen": -2.3685832023620605, | |
| "logits/rejected": -2.3904385566711426, | |
| "logps/chosen": -75.97269439697266, | |
| "logps/rejected": -78.17133331298828, | |
| "loss": 0.6707, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": 0.03298325464129448, | |
| "rewards/margins": 0.05551639944314957, | |
| "rewards/rejected": -0.02253313548862934, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 2.633744855967078, | |
| "grad_norm": 4.327195644378662, | |
| "learning_rate": 4.983095894354858e-06, | |
| "logits/chosen": -2.3301124572753906, | |
| "logits/rejected": -2.331705093383789, | |
| "logps/chosen": -72.1042251586914, | |
| "logps/rejected": -62.44911575317383, | |
| "loss": 0.6575, | |
| "rewards/accuracies": 0.699999988079071, | |
| "rewards/chosen": 0.02669849991798401, | |
| "rewards/margins": 0.051876507699489594, | |
| "rewards/rejected": -0.025178011506795883, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 2.962962962962963, | |
| "grad_norm": 4.614945888519287, | |
| "learning_rate": 4.962019382530521e-06, | |
| "logits/chosen": -2.3660030364990234, | |
| "logits/rejected": -2.379166603088379, | |
| "logps/chosen": -67.3130874633789, | |
| "logps/rejected": -65.10738372802734, | |
| "loss": 0.6454, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": 0.019906962290406227, | |
| "rewards/margins": 0.07426988333463669, | |
| "rewards/rejected": -0.05436293035745621, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 3.292181069958848, | |
| "grad_norm": 4.318708896636963, | |
| "learning_rate": 4.93261217644956e-06, | |
| "logits/chosen": -2.3698017597198486, | |
| "logits/rejected": -2.378725051879883, | |
| "logps/chosen": -75.62451171875, | |
| "logps/rejected": -80.79871368408203, | |
| "loss": 0.6156, | |
| "rewards/accuracies": 0.887499988079071, | |
| "rewards/chosen": 0.09396305680274963, | |
| "rewards/margins": 0.2190607488155365, | |
| "rewards/rejected": -0.12509770691394806, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 3.6213991769547325, | |
| "grad_norm": 4.499190807342529, | |
| "learning_rate": 4.894973780788722e-06, | |
| "logits/chosen": -2.4184162616729736, | |
| "logits/rejected": -2.400146007537842, | |
| "logps/chosen": -62.17826461791992, | |
| "logps/rejected": -65.74874877929688, | |
| "loss": 0.5929, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": 0.051440201699733734, | |
| "rewards/margins": 0.19785621762275696, | |
| "rewards/rejected": -0.14641599357128143, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 3.950617283950617, | |
| "grad_norm": 4.767049312591553, | |
| "learning_rate": 4.849231551964771e-06, | |
| "logits/chosen": -2.389042615890503, | |
| "logits/rejected": -2.3723196983337402, | |
| "logps/chosen": -81.3266830444336, | |
| "logps/rejected": -79.3870849609375, | |
| "loss": 0.576, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": 0.006978911813348532, | |
| "rewards/margins": 0.3074144721031189, | |
| "rewards/rejected": -0.30043551325798035, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 4.279835390946502, | |
| "grad_norm": 4.824108123779297, | |
| "learning_rate": 4.7955402672006855e-06, | |
| "logits/chosen": -2.393279552459717, | |
| "logits/rejected": -2.4019925594329834, | |
| "logps/chosen": -76.16815185546875, | |
| "logps/rejected": -76.16459655761719, | |
| "loss": 0.5395, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": 0.004183758515864611, | |
| "rewards/margins": 0.3808066248893738, | |
| "rewards/rejected": -0.37662285566329956, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 4.609053497942387, | |
| "grad_norm": 5.023725986480713, | |
| "learning_rate": 4.734081600808531e-06, | |
| "logits/chosen": -2.3473973274230957, | |
| "logits/rejected": -2.361241579055786, | |
| "logps/chosen": -81.27131652832031, | |
| "logps/rejected": -94.87051391601562, | |
| "loss": 0.5009, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.09364889562129974, | |
| "rewards/margins": 0.5326429605484009, | |
| "rewards/rejected": -0.6262918710708618, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 4.938271604938271, | |
| "grad_norm": 5.259047031402588, | |
| "learning_rate": 4.665063509461098e-06, | |
| "logits/chosen": -2.4455697536468506, | |
| "logits/rejected": -2.4381449222564697, | |
| "logps/chosen": -69.75814819335938, | |
| "logps/rejected": -77.3443832397461, | |
| "loss": 0.5111, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.15613806247711182, | |
| "rewards/margins": 0.5841559767723083, | |
| "rewards/rejected": -0.7402940392494202, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 5.267489711934156, | |
| "grad_norm": 4.720038890838623, | |
| "learning_rate": 4.588719528532342e-06, | |
| "logits/chosen": -2.4044508934020996, | |
| "logits/rejected": -2.386610269546509, | |
| "logps/chosen": -89.14405059814453, | |
| "logps/rejected": -86.31761932373047, | |
| "loss": 0.4646, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": -0.24362032115459442, | |
| "rewards/margins": 0.6143159866333008, | |
| "rewards/rejected": -0.8579362630844116, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 5.596707818930041, | |
| "grad_norm": 4.4304094314575195, | |
| "learning_rate": 4.50530798188761e-06, | |
| "logits/chosen": -2.4376425743103027, | |
| "logits/rejected": -2.416869878768921, | |
| "logps/chosen": -83.4544906616211, | |
| "logps/rejected": -87.96864318847656, | |
| "loss": 0.4327, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": -0.25005388259887695, | |
| "rewards/margins": 0.7776281833648682, | |
| "rewards/rejected": -1.0276820659637451, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 5.925925925925926, | |
| "grad_norm": 5.450507164001465, | |
| "learning_rate": 4.415111107797445e-06, | |
| "logits/chosen": -2.429447650909424, | |
| "logits/rejected": -2.422396421432495, | |
| "logps/chosen": -84.02339172363281, | |
| "logps/rejected": -81.29154968261719, | |
| "loss": 0.4313, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -0.4232204556465149, | |
| "rewards/margins": 0.8838070034980774, | |
| "rewards/rejected": -1.3070275783538818, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 6.255144032921811, | |
| "grad_norm": 5.132652282714844, | |
| "learning_rate": 4.318434103932622e-06, | |
| "logits/chosen": -2.433706760406494, | |
| "logits/rejected": -2.460986614227295, | |
| "logps/chosen": -82.15243530273438, | |
| "logps/rejected": -95.60574340820312, | |
| "loss": 0.4021, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": -0.47676190733909607, | |
| "rewards/margins": 1.0315628051757812, | |
| "rewards/rejected": -1.5083246231079102, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 6.584362139917696, | |
| "grad_norm": 5.9813690185546875, | |
| "learning_rate": 4.215604094671835e-06, | |
| "logits/chosen": -2.4577317237854004, | |
| "logits/rejected": -2.474640130996704, | |
| "logps/chosen": -70.86317443847656, | |
| "logps/rejected": -90.49053192138672, | |
| "loss": 0.3643, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -0.5595805048942566, | |
| "rewards/margins": 1.0751607418060303, | |
| "rewards/rejected": -1.634741187095642, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 600, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.313333796093297e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |