| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.6736401673640167, | |
| "eval_steps": 500, | |
| "global_step": 50, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.33472803347280333, | |
| "grad_norm": 0.4676998555660248, | |
| "learning_rate": 8.620689655172415e-07, | |
| "logits/chosen": 1.7077701091766357, | |
| "logits/rejected": 1.8646482229232788, | |
| "logps/chosen": -85.7728271484375, | |
| "logps/rejected": -88.1952896118164, | |
| "loss": 0.6938, | |
| "rewards/accuracies": 0.32499998807907104, | |
| "rewards/chosen": -0.004770822823047638, | |
| "rewards/margins": -0.007881464436650276, | |
| "rewards/rejected": 0.0031106427777558565, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.6694560669456067, | |
| "grad_norm": 0.43230223655700684, | |
| "learning_rate": 1.724137931034483e-06, | |
| "logits/chosen": 1.8023220300674438, | |
| "logits/rejected": 1.8210970163345337, | |
| "logps/chosen": -78.37618255615234, | |
| "logps/rejected": -75.44720458984375, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.48750001192092896, | |
| "rewards/chosen": -0.0056939031928777695, | |
| "rewards/margins": -0.003848772030323744, | |
| "rewards/rejected": -0.001845130929723382, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.00418410041841, | |
| "grad_norm": 2.431753396987915, | |
| "learning_rate": 2.5862068965517246e-06, | |
| "logits/chosen": 1.8643144369125366, | |
| "logits/rejected": 1.8500900268554688, | |
| "logps/chosen": -86.84412384033203, | |
| "logps/rejected": -90.78925323486328, | |
| "loss": 0.6946, | |
| "rewards/accuracies": 0.44999998807907104, | |
| "rewards/chosen": -0.0008674233104102314, | |
| "rewards/margins": -0.008064134046435356, | |
| "rewards/rejected": 0.0071967123076319695, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.3389121338912133, | |
| "grad_norm": 0.5327289700508118, | |
| "learning_rate": 3.448275862068966e-06, | |
| "logits/chosen": 1.7701479196548462, | |
| "logits/rejected": 1.7749736309051514, | |
| "logps/chosen": -83.61552429199219, | |
| "logps/rejected": -72.89176177978516, | |
| "loss": 0.6939, | |
| "rewards/accuracies": 0.4625000059604645, | |
| "rewards/chosen": -0.0019066383829340339, | |
| "rewards/margins": -0.0035008196718990803, | |
| "rewards/rejected": 0.0015941811725497246, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 1.6736401673640167, | |
| "grad_norm": 0.5017096996307373, | |
| "learning_rate": 4.310344827586207e-06, | |
| "logits/chosen": 1.7465788125991821, | |
| "logits/rejected": 1.7925498485565186, | |
| "logps/chosen": -84.85249328613281, | |
| "logps/rejected": -88.79469299316406, | |
| "loss": 0.6938, | |
| "rewards/accuracies": 0.550000011920929, | |
| "rewards/chosen": 0.0003656863118521869, | |
| "rewards/margins": 0.00012080222222721204, | |
| "rewards/rejected": 0.00024488387862220407, | |
| "step": 50 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 580, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 20, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.714296510700585e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |