| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.8944543828264758, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.22361359570661896, | |
| "grad_norm": 55.74723434448242, | |
| "learning_rate": 3.899821109123435e-05, | |
| "logits/chosen": -0.6040188074111938, | |
| "logits/rejected": -0.7450945973396301, | |
| "logps/chosen": -420.3378601074219, | |
| "logps/rejected": -462.8780517578125, | |
| "loss": 1.797, | |
| "rewards/accuracies": 0.6399999856948853, | |
| "rewards/chosen": -8.708248138427734, | |
| "rewards/margins": 2.4897494316101074, | |
| "rewards/rejected": -11.197998046875, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.4472271914132379, | |
| "grad_norm": 53.70317459106445, | |
| "learning_rate": 2.78175313059034e-05, | |
| "logits/chosen": -0.5758157968521118, | |
| "logits/rejected": -0.6371915936470032, | |
| "logps/chosen": -458.7496032714844, | |
| "logps/rejected": -508.36602783203125, | |
| "loss": 2.0075, | |
| "rewards/accuracies": 0.6168749928474426, | |
| "rewards/chosen": -12.504631042480469, | |
| "rewards/margins": 3.4706857204437256, | |
| "rewards/rejected": -15.975316047668457, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.6708407871198568, | |
| "grad_norm": 24.010499954223633, | |
| "learning_rate": 1.6636851520572452e-05, | |
| "logits/chosen": -0.9147199392318726, | |
| "logits/rejected": -0.9995958805084229, | |
| "logps/chosen": -451.2334899902344, | |
| "logps/rejected": -513.6981811523438, | |
| "loss": 1.7831, | |
| "rewards/accuracies": 0.6547499895095825, | |
| "rewards/chosen": -11.545605659484863, | |
| "rewards/margins": 4.830613613128662, | |
| "rewards/rejected": -16.376218795776367, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.8944543828264758, | |
| "grad_norm": 38.50263977050781, | |
| "learning_rate": 5.456171735241503e-06, | |
| "logits/chosen": -0.8216423392295837, | |
| "logits/rejected": -0.8953205347061157, | |
| "logps/chosen": -446.2303771972656, | |
| "logps/rejected": -521.4434814453125, | |
| "loss": 1.5334, | |
| "rewards/accuracies": 0.6894999742507935, | |
| "rewards/chosen": -11.270264625549316, | |
| "rewards/margins": 6.051136016845703, | |
| "rewards/rejected": -17.321399688720703, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 2236, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 250, | |
| "total_flos": 0.0, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |