| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9984, | |
| "eval_steps": 500, | |
| "global_step": 156, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.064, | |
| "grad_norm": 46.88199996948242, | |
| "learning_rate": 6.249999999999999e-07, | |
| "logits/chosen": -1.8514013290405273, | |
| "logits/rejected": -0.29395362734794617, | |
| "logps/chosen": -214.1394500732422, | |
| "logps/rejected": -737.39794921875, | |
| "loss": 0.722, | |
| "rewards/accuracies": 0.4906249940395355, | |
| "rewards/chosen": 0.0037395646795630455, | |
| "rewards/margins": 0.025518950074911118, | |
| "rewards/rejected": -0.021779386326670647, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.128, | |
| "grad_norm": 13.165843963623047, | |
| "learning_rate": 9.979871469976195e-07, | |
| "logits/chosen": -1.8478971719741821, | |
| "logits/rejected": -0.2753606140613556, | |
| "logps/chosen": -240.26084899902344, | |
| "logps/rejected": -844.6398315429688, | |
| "loss": 0.5035, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 0.035897765308618546, | |
| "rewards/margins": 0.6878108978271484, | |
| "rewards/rejected": -0.6519131660461426, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.192, | |
| "grad_norm": 0.7543407082557678, | |
| "learning_rate": 9.755282581475767e-07, | |
| "logits/chosen": -2.1524031162261963, | |
| "logits/rejected": -0.8206841945648193, | |
| "logps/chosen": -241.90380859375, | |
| "logps/rejected": -830.2996215820312, | |
| "loss": 0.1368, | |
| "rewards/accuracies": 0.996874988079071, | |
| "rewards/chosen": -0.3361475467681885, | |
| "rewards/margins": 4.685339450836182, | |
| "rewards/rejected": -5.021487236022949, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.256, | |
| "grad_norm": 0.9449922442436218, | |
| "learning_rate": 9.29224396800933e-07, | |
| "logits/chosen": -2.6018669605255127, | |
| "logits/rejected": -1.616600513458252, | |
| "logps/chosen": -260.47198486328125, | |
| "logps/rejected": -947.6165161132812, | |
| "loss": 0.0662, | |
| "rewards/accuracies": 0.9937499761581421, | |
| "rewards/chosen": -2.462709665298462, | |
| "rewards/margins": 14.078239440917969, | |
| "rewards/rejected": -16.54094886779785, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.21557988226413727, | |
| "learning_rate": 8.613974319136957e-07, | |
| "logits/chosen": -2.7778477668762207, | |
| "logits/rejected": -1.9211829900741577, | |
| "logps/chosen": -257.9867248535156, | |
| "logps/rejected": -1050.994384765625, | |
| "loss": 0.0562, | |
| "rewards/accuracies": 0.9906250238418579, | |
| "rewards/chosen": -3.9251632690429688, | |
| "rewards/margins": 22.336076736450195, | |
| "rewards/rejected": -26.26123809814453, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.384, | |
| "grad_norm": 0.16911636292934418, | |
| "learning_rate": 7.754484907260512e-07, | |
| "logits/chosen": -2.7103047370910645, | |
| "logits/rejected": -1.904673457145691, | |
| "logps/chosen": -272.1762390136719, | |
| "logps/rejected": -1050.43212890625, | |
| "loss": 0.051, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -4.340147018432617, | |
| "rewards/margins": 22.80539321899414, | |
| "rewards/rejected": -27.14554214477539, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.448, | |
| "grad_norm": 0.3055303394794464, | |
| "learning_rate": 6.756874120406714e-07, | |
| "logits/chosen": -2.6818180084228516, | |
| "logits/rejected": -1.7974838018417358, | |
| "logps/chosen": -252.34637451171875, | |
| "logps/rejected": -1027.539306640625, | |
| "loss": 0.0596, | |
| "rewards/accuracies": 0.984375, | |
| "rewards/chosen": -3.2451460361480713, | |
| "rewards/margins": 21.46558952331543, | |
| "rewards/rejected": -24.710735321044922, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.512, | |
| "grad_norm": 0.15121270716190338, | |
| "learning_rate": 5.671166329088277e-07, | |
| "logits/chosen": -2.543553590774536, | |
| "logits/rejected": -1.6689860820770264, | |
| "logps/chosen": -260.95526123046875, | |
| "logps/rejected": -1009.7645874023438, | |
| "loss": 0.0476, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -2.7273285388946533, | |
| "rewards/margins": 19.519502639770508, | |
| "rewards/rejected": -22.246829986572266, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.576, | |
| "grad_norm": 0.17506758868694305, | |
| "learning_rate": 4.5518034554828327e-07, | |
| "logits/chosen": -2.5249202251434326, | |
| "logits/rejected": -1.6314860582351685, | |
| "logps/chosen": -258.52435302734375, | |
| "logps/rejected": -976.0228271484375, | |
| "loss": 0.0575, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -2.350813150405884, | |
| "rewards/margins": 18.23154067993164, | |
| "rewards/rejected": -20.582355499267578, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.20912963151931763, | |
| "learning_rate": 3.454915028125263e-07, | |
| "logits/chosen": -2.5144877433776855, | |
| "logits/rejected": -1.5639787912368774, | |
| "logps/chosen": -247.0535430908203, | |
| "logps/rejected": -963.437744140625, | |
| "loss": 0.0454, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.8558976650238037, | |
| "rewards/margins": 17.28997230529785, | |
| "rewards/rejected": -19.145870208740234, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.704, | |
| "grad_norm": 0.2550797462463379, | |
| "learning_rate": 2.4355036129704696e-07, | |
| "logits/chosen": -2.4958443641662598, | |
| "logits/rejected": -1.5513837337493896, | |
| "logps/chosen": -241.15231323242188, | |
| "logps/rejected": -962.02587890625, | |
| "loss": 0.0481, | |
| "rewards/accuracies": 1.0, | |
| "rewards/chosen": -1.829073190689087, | |
| "rewards/margins": 16.586135864257812, | |
| "rewards/rejected": -18.41520881652832, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.768, | |
| "grad_norm": 0.15323293209075928, | |
| "learning_rate": 1.5446867550656767e-07, | |
| "logits/chosen": -2.4677257537841797, | |
| "logits/rejected": -1.5029109716415405, | |
| "logps/chosen": -249.34861755371094, | |
| "logps/rejected": -962.841552734375, | |
| "loss": 0.0838, | |
| "rewards/accuracies": 0.984375, | |
| "rewards/chosen": -1.7131061553955078, | |
| "rewards/margins": 16.56055450439453, | |
| "rewards/rejected": -18.27366065979004, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.832, | |
| "grad_norm": 0.14431753754615784, | |
| "learning_rate": 8.271337313934867e-08, | |
| "logits/chosen": -2.411092519760132, | |
| "logits/rejected": -1.4570066928863525, | |
| "logps/chosen": -262.32379150390625, | |
| "logps/rejected": -950.74560546875, | |
| "loss": 0.0652, | |
| "rewards/accuracies": 0.984375, | |
| "rewards/chosen": -1.68479585647583, | |
| "rewards/margins": 15.87053394317627, | |
| "rewards/rejected": -17.555328369140625, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.896, | |
| "grad_norm": 0.17921504378318787, | |
| "learning_rate": 3.188256468013139e-08, | |
| "logits/chosen": -2.4623465538024902, | |
| "logits/rejected": -1.4546464681625366, | |
| "logps/chosen": -243.91656494140625, | |
| "logps/rejected": -970.7305297851562, | |
| "loss": 0.0472, | |
| "rewards/accuracies": 0.996874988079071, | |
| "rewards/chosen": -1.3906798362731934, | |
| "rewards/margins": 16.57068634033203, | |
| "rewards/rejected": -17.961366653442383, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.1529853641986847, | |
| "learning_rate": 4.5251191160326495e-09, | |
| "logits/chosen": -2.4238433837890625, | |
| "logits/rejected": -1.397405982017517, | |
| "logps/chosen": -264.49542236328125, | |
| "logps/rejected": -1022.7637329101562, | |
| "loss": 0.0801, | |
| "rewards/accuracies": 0.987500011920929, | |
| "rewards/chosen": -1.6072216033935547, | |
| "rewards/margins": 16.616043090820312, | |
| "rewards/rejected": -18.2232666015625, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.9984, | |
| "step": 156, | |
| "total_flos": 1.1115841451898962e+18, | |
| "train_loss": 0.1350558667610853, | |
| "train_runtime": 5886.4808, | |
| "train_samples_per_second": 0.849, | |
| "train_steps_per_second": 0.027 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 156, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.1115841451898962e+18, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |