| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.6877637130801688, | |
| "eval_steps": 500, | |
| "global_step": 50, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.33755274261603374, | |
| "grad_norm": 0.4396141469478607, | |
| "learning_rate": 8.620689655172415e-07, | |
| "logits/chosen": 1.6453087329864502, | |
| "logits/rejected": 1.694819450378418, | |
| "logps/chosen": -74.5937728881836, | |
| "logps/rejected": -83.19783782958984, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": 0.003385844174772501, | |
| "rewards/margins": 0.004794469103217125, | |
| "rewards/rejected": -0.0014086246956139803, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6751054852320675, | |
| "grad_norm": 3.18404483795166, | |
| "learning_rate": 1.724137931034483e-06, | |
| "logits/chosen": 1.7945035696029663, | |
| "logits/rejected": 1.8347476720809937, | |
| "logps/chosen": -95.46636199951172, | |
| "logps/rejected": -101.22709655761719, | |
| "loss": 0.6933, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": -0.0005593777750618756, | |
| "rewards/margins": -0.002013001125305891, | |
| "rewards/rejected": 0.0014536241069436073, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.0126582278481013, | |
| "grad_norm": 0.48596081137657166, | |
| "learning_rate": 2.5862068965517246e-06, | |
| "logits/chosen": 1.774155616760254, | |
| "logits/rejected": 1.8374344110488892, | |
| "logps/chosen": -82.01265716552734, | |
| "logps/rejected": -84.23635864257812, | |
| "loss": 0.6948, | |
| "rewards/accuracies": 0.44999998807907104, | |
| "rewards/chosen": -0.0031326995231211185, | |
| "rewards/margins": -0.00463916826993227, | |
| "rewards/rejected": 0.0015064675826579332, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.350210970464135, | |
| "grad_norm": 0.5144609808921814, | |
| "learning_rate": 3.448275862068966e-06, | |
| "logits/chosen": 1.838865876197815, | |
| "logits/rejected": 1.9505430459976196, | |
| "logps/chosen": -73.50071716308594, | |
| "logps/rejected": -88.8648452758789, | |
| "loss": 0.6928, | |
| "rewards/accuracies": 0.42500001192092896, | |
| "rewards/chosen": -0.0035957477521151304, | |
| "rewards/margins": -0.008830643258988857, | |
| "rewards/rejected": 0.005234894808381796, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.6877637130801688, | |
| "grad_norm": 0.48295095562934875, | |
| "learning_rate": 4.310344827586207e-06, | |
| "logits/chosen": 1.8197529315948486, | |
| "logits/rejected": 1.8459796905517578, | |
| "logps/chosen": -83.36590576171875, | |
| "logps/rejected": -70.26930236816406, | |
| "loss": 0.6929, | |
| "rewards/accuracies": 0.4749999940395355, | |
| "rewards/chosen": -0.0030026868917047977, | |
| "rewards/margins": -0.006082554347813129, | |
| "rewards/rejected": 0.003079867223277688, | |
| "step": 50 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 580, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.7011436887526605e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |