| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9914529914529915, | |
| "eval_steps": 100, | |
| "global_step": 58, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 4.071954585641894, | |
| "learning_rate": 8.333333333333333e-08, | |
| "logits/chosen": -0.2491038590669632, | |
| "logits/rejected": -0.3451940417289734, | |
| "logps/chosen": -294.6504211425781, | |
| "logps/rejected": -404.2626647949219, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 3.905690256665182, | |
| "learning_rate": 4.92735454356513e-07, | |
| "logits/chosen": -0.2290731519460678, | |
| "logits/rejected": -0.42057597637176514, | |
| "logps/chosen": -285.1579895019531, | |
| "logps/rejected": -440.47698974609375, | |
| "loss": 0.6926, | |
| "rewards/accuracies": 0.4375, | |
| "rewards/chosen": 0.0010758544085547328, | |
| "rewards/margins": 3.7795203411405964e-07, | |
| "rewards/rejected": 0.0010754765244200826, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 4.061739908914029, | |
| "learning_rate": 4.157806645601988e-07, | |
| "logits/chosen": -0.24342350661754608, | |
| "logits/rejected": -0.45833396911621094, | |
| "logps/chosen": -230.465576171875, | |
| "logps/rejected": -421.91131591796875, | |
| "loss": 0.6873, | |
| "rewards/accuracies": 0.7562500238418579, | |
| "rewards/chosen": 0.014977594837546349, | |
| "rewards/margins": 0.012003140524029732, | |
| "rewards/rejected": 0.002974454313516617, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 3.7362242077940304, | |
| "learning_rate": 2.801341700638307e-07, | |
| "logits/chosen": -0.22939440608024597, | |
| "logits/rejected": -0.3433351516723633, | |
| "logps/chosen": -282.84619140625, | |
| "logps/rejected": -379.5437927246094, | |
| "loss": 0.6789, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": 0.03496216982603073, | |
| "rewards/margins": 0.027732372283935547, | |
| "rewards/rejected": 0.007229797542095184, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 3.7818612304795747, | |
| "learning_rate": 1.3381920698905784e-07, | |
| "logits/chosen": -0.22702646255493164, | |
| "logits/rejected": -0.45289507508277893, | |
| "logps/chosen": -244.3043212890625, | |
| "logps/rejected": -453.29522705078125, | |
| "loss": 0.6714, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": 0.04880012944340706, | |
| "rewards/margins": 0.04337088391184807, | |
| "rewards/rejected": 0.005429237149655819, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 3.8140472657170306, | |
| "learning_rate": 2.863599358669755e-08, | |
| "logits/chosen": -0.32328224182128906, | |
| "logits/rejected": -0.45025634765625, | |
| "logps/chosen": -264.197021484375, | |
| "logps/rejected": -470.7127380371094, | |
| "loss": 0.6676, | |
| "rewards/accuracies": 0.856249988079071, | |
| "rewards/chosen": 0.05549341440200806, | |
| "rewards/margins": 0.05046632140874863, | |
| "rewards/rejected": 0.005027088802307844, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "step": 58, | |
| "total_flos": 0.0, | |
| "train_loss": 0.6775914965004757, | |
| "train_runtime": 852.993, | |
| "train_samples_per_second": 4.361, | |
| "train_steps_per_second": 0.068 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 58, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |