| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9874476987447699, | |
| "eval_steps": 500, | |
| "global_step": 59, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.016736401673640166, | |
| "grad_norm": 0.15945951840902217, | |
| "learning_rate": 8.333333333333333e-08, | |
| "logits/chosen": -1.923944354057312, | |
| "logits/rejected": -1.923944354057312, | |
| "logps/chosen": -152.47433471679688, | |
| "logps/pi_response": -152.48638916015625, | |
| "logps/ref_response": -152.48638916015625, | |
| "logps/rejected": -152.47433471679688, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.16736401673640167, | |
| "grad_norm": 0.3170532004182247, | |
| "learning_rate": 4.930057285201027e-07, | |
| "logits/chosen": -1.9577691555023193, | |
| "logits/rejected": -1.9577691555023193, | |
| "logps/chosen": -169.63162231445312, | |
| "logps/pi_response": -169.6338653564453, | |
| "logps/ref_response": -151.2162322998047, | |
| "logps/rejected": -169.63162231445312, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": -0.18470749258995056, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": -0.18470749258995056, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.33472803347280333, | |
| "grad_norm": 0.28020898661548793, | |
| "learning_rate": 4.187457503795526e-07, | |
| "logits/chosen": -1.840193510055542, | |
| "logits/rejected": -1.840193510055542, | |
| "logps/chosen": -359.313720703125, | |
| "logps/pi_response": -359.30316162109375, | |
| "logps/ref_response": -151.60671997070312, | |
| "logps/rejected": -359.313720703125, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": -2.077146053314209, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": -2.077146053314209, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.502092050209205, | |
| "grad_norm": 0.3219085642544482, | |
| "learning_rate": 2.8691164100062034e-07, | |
| "logits/chosen": -1.2987468242645264, | |
| "logits/rejected": -1.2987468242645264, | |
| "logps/chosen": -588.2769165039062, | |
| "logps/pi_response": -588.268310546875, | |
| "logps/ref_response": -151.4405517578125, | |
| "logps/rejected": -588.2769165039062, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": -4.368352890014648, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": -4.368352890014648, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.6694560669456067, | |
| "grad_norm": 0.6799069863533791, | |
| "learning_rate": 1.4248369943086995e-07, | |
| "logits/chosen": 1.524221420288086, | |
| "logits/rejected": 1.524221420288086, | |
| "logps/chosen": -909.3970947265625, | |
| "logps/pi_response": -909.3892822265625, | |
| "logps/ref_response": -150.93089294433594, | |
| "logps/rejected": -909.3970947265625, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": -7.585102081298828, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": -7.585102081298828, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.8368200836820083, | |
| "grad_norm": 0.7614228828029587, | |
| "learning_rate": 3.473909705816111e-08, | |
| "logits/chosen": 3.145961284637451, | |
| "logits/rejected": 3.145961284637451, | |
| "logps/chosen": -1317.4720458984375, | |
| "logps/pi_response": -1317.4351806640625, | |
| "logps/ref_response": -151.639404296875, | |
| "logps/rejected": -1317.4720458984375, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": -11.658597946166992, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": -11.658597946166992, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.9874476987447699, | |
| "step": 59, | |
| "total_flos": 0.0, | |
| "train_loss": 0.69314208980334, | |
| "train_runtime": 1040.491, | |
| "train_samples_per_second": 14.688, | |
| "train_steps_per_second": 0.057 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 59, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 100, | |
| "total_flos": 0.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |