| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.23529411764705882, | |
| "eval_steps": 3, | |
| "global_step": 9, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0, | |
| "eval_logits/chosen": -2.3223893642425537, | |
| "eval_logits/rejected": -1.7441076040267944, | |
| "eval_logps/chosen": -205.81431579589844, | |
| "eval_logps/rejected": -160.84881591796875, | |
| "eval_loss": 0.6931471824645996, | |
| "eval_rewards/accuracies": 0.0, | |
| "eval_rewards/chosen": 0.0, | |
| "eval_rewards/margins": 0.0, | |
| "eval_rewards/rejected": 0.0, | |
| "eval_runtime": 13.1039, | |
| "eval_samples_per_second": 1.297, | |
| "eval_steps_per_second": 0.687, | |
| "step": 0 | |
| }, | |
| { | |
| "epoch": 0.026143790849673203, | |
| "grad_norm": 11.304482460021973, | |
| "learning_rate": 0.0, | |
| "logits/chosen": -2.2234325408935547, | |
| "logits/rejected": -2.2330026626586914, | |
| "logps/chosen": -251.4762420654297, | |
| "logps/rejected": -287.5872497558594, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.05228758169934641, | |
| "grad_norm": 11.91282844543457, | |
| "learning_rate": 2e-05, | |
| "logits/chosen": -2.2173714637756348, | |
| "logits/rejected": -2.2363157272338867, | |
| "logps/chosen": -227.2211151123047, | |
| "logps/rejected": -262.26251220703125, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0784313725490196, | |
| "grad_norm": 9.456698417663574, | |
| "learning_rate": 4e-05, | |
| "logits/chosen": -1.7777986526489258, | |
| "logits/rejected": -1.5732976198196411, | |
| "logps/chosen": -166.3558807373047, | |
| "logps/rejected": -206.31166076660156, | |
| "loss": 0.6976, | |
| "rewards/accuracies": 0.375, | |
| "rewards/chosen": 0.005008268635720015, | |
| "rewards/margins": -0.007055856287479401, | |
| "rewards/rejected": 0.012064123526215553, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.0784313725490196, | |
| "eval_logits/chosen": -2.3259360790252686, | |
| "eval_logits/rejected": -1.7466574907302856, | |
| "eval_logps/chosen": -205.6438751220703, | |
| "eval_logps/rejected": -160.8700408935547, | |
| "eval_loss": 0.6828210353851318, | |
| "eval_rewards/accuracies": 0.6666666865348816, | |
| "eval_rewards/chosen": 0.017044195905327797, | |
| "eval_rewards/margins": 0.019166946411132812, | |
| "eval_rewards/rejected": -0.0021227519027888775, | |
| "eval_runtime": 12.9552, | |
| "eval_samples_per_second": 1.312, | |
| "eval_steps_per_second": 0.695, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.10457516339869281, | |
| "grad_norm": 11.230616569519043, | |
| "learning_rate": 6e-05, | |
| "logits/chosen": -2.7505903244018555, | |
| "logits/rejected": -1.348630428314209, | |
| "logps/chosen": -209.1761474609375, | |
| "logps/rejected": -190.92935180664062, | |
| "loss": 0.6807, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.005297848954796791, | |
| "rewards/margins": 0.027892589569091797, | |
| "rewards/rejected": -0.033190444111824036, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.13071895424836602, | |
| "grad_norm": 9.93552017211914, | |
| "learning_rate": 8e-05, | |
| "logits/chosen": -1.8584319353103638, | |
| "logits/rejected": -1.2940995693206787, | |
| "logps/chosen": -245.94874572753906, | |
| "logps/rejected": -135.0790252685547, | |
| "loss": 0.6947, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.0011208057403564453, | |
| "rewards/margins": 0.0006671445444226265, | |
| "rewards/rejected": -0.0017879479564726353, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.1568627450980392, | |
| "grad_norm": 11.156682014465332, | |
| "learning_rate": 0.0001, | |
| "logits/chosen": -2.436879873275757, | |
| "logits/rejected": -2.5771214962005615, | |
| "logps/chosen": -204.76026916503906, | |
| "logps/rejected": -262.71990966796875, | |
| "loss": 0.6572, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.015309764072299004, | |
| "rewards/margins": 0.07809243351221085, | |
| "rewards/rejected": -0.093402199447155, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.1568627450980392, | |
| "eval_logits/chosen": -2.3328909873962402, | |
| "eval_logits/rejected": -1.7542129755020142, | |
| "eval_logps/chosen": -205.6370391845703, | |
| "eval_logps/rejected": -161.59091186523438, | |
| "eval_loss": 0.6492722034454346, | |
| "eval_rewards/accuracies": 0.6666666865348816, | |
| "eval_rewards/chosen": 0.017727533355355263, | |
| "eval_rewards/margins": 0.09193826466798782, | |
| "eval_rewards/rejected": -0.07421071827411652, | |
| "eval_runtime": 11.3826, | |
| "eval_samples_per_second": 1.494, | |
| "eval_steps_per_second": 0.791, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.1830065359477124, | |
| "grad_norm": 9.182978630065918, | |
| "learning_rate": 0.00012, | |
| "logits/chosen": -3.110889434814453, | |
| "logits/rejected": -2.425046443939209, | |
| "logps/chosen": -183.1647186279297, | |
| "logps/rejected": -238.2554168701172, | |
| "loss": 0.6602, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.02616577222943306, | |
| "rewards/margins": 0.07209749519824982, | |
| "rewards/rejected": -0.04593172296881676, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.20915032679738563, | |
| "grad_norm": 10.583196640014648, | |
| "learning_rate": 0.00014, | |
| "logits/chosen": -2.9872114658355713, | |
| "logits/rejected": -2.2816989421844482, | |
| "logps/chosen": -195.46536254882812, | |
| "logps/rejected": -200.18411254882812, | |
| "loss": 0.6859, | |
| "rewards/accuracies": 0.5, | |
| "rewards/chosen": -0.03734993934631348, | |
| "rewards/margins": 0.020749665796756744, | |
| "rewards/rejected": -0.05809960514307022, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.23529411764705882, | |
| "grad_norm": 8.478883743286133, | |
| "learning_rate": 0.00016, | |
| "logits/chosen": -2.448885440826416, | |
| "logits/rejected": -1.7898311614990234, | |
| "logps/chosen": -121.42498779296875, | |
| "logps/rejected": -155.28976440429688, | |
| "loss": 0.6212, | |
| "rewards/accuracies": 0.625, | |
| "rewards/chosen": 0.11882638931274414, | |
| "rewards/margins": 0.1818961203098297, | |
| "rewards/rejected": -0.06306972354650497, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.23529411764705882, | |
| "eval_logits/chosen": -2.3386030197143555, | |
| "eval_logits/rejected": -1.7669346332550049, | |
| "eval_logps/chosen": -205.10494995117188, | |
| "eval_logps/rejected": -162.58966064453125, | |
| "eval_loss": 0.5985996127128601, | |
| "eval_rewards/accuracies": 0.5555555820465088, | |
| "eval_rewards/chosen": 0.07093840092420578, | |
| "eval_rewards/margins": 0.245022714138031, | |
| "eval_rewards/rejected": -0.17408432066440582, | |
| "eval_runtime": 10.3489, | |
| "eval_samples_per_second": 1.643, | |
| "eval_steps_per_second": 0.87, | |
| "step": 9 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 10, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 3, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |