| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.23880597014925373, | |
| "eval_steps": 500, | |
| "global_step": 600, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03980099502487562, | |
| "grad_norm": 2.466599941253662, | |
| "learning_rate": 4.980474401576887e-07, | |
| "logits/chosen": -1.0382839441299438, | |
| "logits/rejected": -1.054585576057434, | |
| "logps/chosen": -180.16372680664062, | |
| "logps/rejected": -179.63958740234375, | |
| "loss": 0.7169, | |
| "rewards/accuracies": 0.508593738079071, | |
| "rewards/chosen": -0.278283029794693, | |
| "rewards/margins": 0.017729664221405983, | |
| "rewards/rejected": -0.2960126996040344, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07960199004975124, | |
| "grad_norm": 1.6285502910614014, | |
| "learning_rate": 4.922202605502572e-07, | |
| "logits/chosen": -1.0206574201583862, | |
| "logits/rejected": -1.0379180908203125, | |
| "logps/chosen": -180.13119506835938, | |
| "logps/rejected": -180.29315185546875, | |
| "loss": 0.7103, | |
| "rewards/accuracies": 0.5193750262260437, | |
| "rewards/chosen": -0.2392772138118744, | |
| "rewards/margins": 0.0195913203060627, | |
| "rewards/rejected": -0.2588685154914856, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.11940298507462686, | |
| "grad_norm": 2.7344954013824463, | |
| "learning_rate": 4.82609484512869e-07, | |
| "logits/chosen": -1.0241189002990723, | |
| "logits/rejected": -1.0348433256149292, | |
| "logps/chosen": -178.86837768554688, | |
| "logps/rejected": -179.53598022460938, | |
| "loss": 0.7077, | |
| "rewards/accuracies": 0.5171874761581421, | |
| "rewards/chosen": -0.21391521394252777, | |
| "rewards/margins": 0.015260601416230202, | |
| "rewards/rejected": -0.22917583584785461, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.15920398009950248, | |
| "grad_norm": 1.7553608417510986, | |
| "learning_rate": 4.6936523696827614e-07, | |
| "logits/chosen": -1.0034430027008057, | |
| "logits/rejected": -1.0131057500839233, | |
| "logps/chosen": -180.70912170410156, | |
| "logps/rejected": -181.44435119628906, | |
| "loss": 0.7064, | |
| "rewards/accuracies": 0.5106250047683716, | |
| "rewards/chosen": -0.18936260044574738, | |
| "rewards/margins": 0.013838082551956177, | |
| "rewards/rejected": -0.20320068299770355, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.19900497512437812, | |
| "grad_norm": 1.4984805583953857, | |
| "learning_rate": 4.5269439940365644e-07, | |
| "logits/chosen": -1.0015250444412231, | |
| "logits/rejected": -1.0229138135910034, | |
| "logps/chosen": -178.01638793945312, | |
| "logps/rejected": -177.6285400390625, | |
| "loss": 0.7093, | |
| "rewards/accuracies": 0.4985937476158142, | |
| "rewards/chosen": -0.1724223494529724, | |
| "rewards/margins": 0.005522388964891434, | |
| "rewards/rejected": -0.17794474959373474, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.23880597014925373, | |
| "grad_norm": 1.539502501487732, | |
| "learning_rate": 4.328573782827409e-07, | |
| "logits/chosen": -0.9833173155784607, | |
| "logits/rejected": -1.0065593719482422, | |
| "logps/chosen": -176.92718505859375, | |
| "logps/rejected": -174.7305145263672, | |
| "loss": 0.7017, | |
| "rewards/accuracies": 0.507031261920929, | |
| "rewards/chosen": -0.14655204117298126, | |
| "rewards/margins": 0.01735287345945835, | |
| "rewards/rejected": -0.16390492022037506, | |
| "step": 600 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 2512, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |