| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.994535519125683, | |
| "eval_steps": 500, | |
| "global_step": 91, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 19.759845167026548, | |
| "learning_rate": 5e-08, | |
| "logits/chosen": -2.483218193054199, | |
| "logits/rejected": -2.51887583732605, | |
| "logps/chosen": -173.969482421875, | |
| "logps/rejected": -466.1477966308594, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 20.030240595618945, | |
| "learning_rate": 5e-07, | |
| "logits/chosen": -2.575467586517334, | |
| "logits/rejected": -2.533165454864502, | |
| "logps/chosen": -198.4555206298828, | |
| "logps/rejected": -388.2120361328125, | |
| "loss": 0.6923, | |
| "rewards/accuracies": 0.5416666865348816, | |
| "rewards/chosen": 0.0029368316754698753, | |
| "rewards/margins": 0.0030300142243504524, | |
| "rewards/rejected": -9.318282536696643e-05, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 18.975065752388222, | |
| "learning_rate": 4.814309923172227e-07, | |
| "logits/chosen": -2.5513834953308105, | |
| "logits/rejected": -2.62607479095459, | |
| "logps/chosen": -228.58560180664062, | |
| "logps/rejected": -401.50775146484375, | |
| "loss": 0.6717, | |
| "rewards/accuracies": 0.768750011920929, | |
| "rewards/chosen": 0.040665917098522186, | |
| "rewards/margins": 0.04159024730324745, | |
| "rewards/rejected": -0.0009243293898180127, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "grad_norm": 20.69855762605237, | |
| "learning_rate": 4.284824336394748e-07, | |
| "logits/chosen": -2.7989377975463867, | |
| "logits/rejected": -2.578190326690674, | |
| "logps/chosen": -222.75927734375, | |
| "logps/rejected": -369.4903869628906, | |
| "loss": 0.6245, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": 0.1455826759338379, | |
| "rewards/margins": 0.1740579754114151, | |
| "rewards/rejected": -0.028475318104028702, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 22.28073899245071, | |
| "learning_rate": 3.490199415097892e-07, | |
| "logits/chosen": -3.0074660778045654, | |
| "logits/rejected": -3.020289421081543, | |
| "logps/chosen": -209.62252807617188, | |
| "logps/rejected": -435.1111755371094, | |
| "loss": 0.5559, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": 0.13761594891548157, | |
| "rewards/margins": 0.311123788356781, | |
| "rewards/rejected": -0.17350785434246063, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 22.260338017683775, | |
| "learning_rate": 2.548478329429561e-07, | |
| "logits/chosen": -3.5367908477783203, | |
| "logits/rejected": -3.447880983352661, | |
| "logps/chosen": -207.8536834716797, | |
| "logps/rejected": -427.22650146484375, | |
| "loss": 0.4648, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": 0.021256055682897568, | |
| "rewards/margins": 0.612757682800293, | |
| "rewards/rejected": -0.5915015935897827, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 26.04547260085489, | |
| "learning_rate": 1.5995556879882243e-07, | |
| "logits/chosen": -3.8727428913116455, | |
| "logits/rejected": -3.773128032684326, | |
| "logps/chosen": -249.71963500976562, | |
| "logps/rejected": -478.6151428222656, | |
| "loss": 0.411, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.19216546416282654, | |
| "rewards/margins": 0.928192138671875, | |
| "rewards/rejected": -1.1203575134277344, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "grad_norm": 18.92789031237145, | |
| "learning_rate": 7.843959053281663e-08, | |
| "logits/chosen": -3.900287628173828, | |
| "logits/rejected": -3.9814674854278564, | |
| "logps/chosen": -228.2233123779297, | |
| "logps/rejected": -514.8861694335938, | |
| "loss": 0.3719, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -0.07359044253826141, | |
| "rewards/margins": 1.2602514028549194, | |
| "rewards/rejected": -1.3338419198989868, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "grad_norm": 19.01382974055132, | |
| "learning_rate": 2.2409264758463358e-08, | |
| "logits/chosen": -3.850555419921875, | |
| "logits/rejected": -3.8778367042541504, | |
| "logps/chosen": -222.34033203125, | |
| "logps/rejected": -520.5603637695312, | |
| "loss": 0.3401, | |
| "rewards/accuracies": 0.9312499761581421, | |
| "rewards/chosen": -0.008037117309868336, | |
| "rewards/margins": 1.370126724243164, | |
| "rewards/rejected": -1.3781636953353882, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 21.287217551030523, | |
| "learning_rate": 1.8801187394248964e-10, | |
| "logits/chosen": -3.9702095985412598, | |
| "logits/rejected": -3.810866117477417, | |
| "logps/chosen": -205.1525421142578, | |
| "logps/rejected": -497.5603942871094, | |
| "loss": 0.3408, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": 0.03864166885614395, | |
| "rewards/margins": 1.3442068099975586, | |
| "rewards/rejected": -1.3055652379989624, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "step": 91, | |
| "total_flos": 0.0, | |
| "train_loss": 0.49557680382833375, | |
| "train_runtime": 1348.5444, | |
| "train_samples_per_second": 4.321, | |
| "train_steps_per_second": 0.067 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 91, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |