rloo_experiment_v5 / checkpoint-8 /trainer_state.json
lapp0's picture
Upload folder using huggingface_hub
833f8f9 verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.002535004852157725,
"eval_steps": 500,
"global_step": 8,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.0003168756065197156,
"grad_norm": 6.832624876551563e-06,
"learning_rate": 2.9996830427892237e-06,
"loss": 0.0,
"loss/policy_avg": 2.0139850676059723e-08,
"objective/entropy": 56.029441833496094,
"objective/kl": 0.0,
"objective/rlhf_reward": -0.5591201782226562,
"objective/scores": -0.559173583984375,
"policy/approxkl_avg": 0.0,
"policy/clipfrac_avg": 0.0,
"policy/entropy_avg": 0.46982458233833313,
"step": 1,
"timer/training_step": 4.71628999710083,
"val/num_eos_tokens": 0.75,
"val/ratio": 1.0
},
{
"epoch": 0.0006337512130394313,
"grad_norm": 7.318521966226399e-06,
"learning_rate": 2.999366085578447e-06,
"loss": 0.0,
"loss/policy_avg": 1.0943040251731873e-08,
"objective/entropy": 49.566184997558594,
"objective/kl": 0.0,
"objective/rlhf_reward": -0.7360076904296875,
"objective/scores": -0.73590087890625,
"policy/approxkl_avg": 0.0,
"policy/clipfrac_avg": 0.0,
"policy/entropy_avg": 0.47101089358329773,
"step": 2,
"timer/training_step": 5.188745975494385,
"val/num_eos_tokens": 1.21875,
"val/ratio": 1.0
},
{
"epoch": 0.0009506268195591468,
"grad_norm": 6.249741545616416e-06,
"learning_rate": 2.9990491283676707e-06,
"loss": 0.0,
"loss/policy_avg": 8.149072527885437e-09,
"objective/entropy": 57.22004699707031,
"objective/kl": 0.0,
"objective/rlhf_reward": -0.70709228515625,
"objective/scores": -0.70703125,
"policy/approxkl_avg": 0.0,
"policy/clipfrac_avg": 0.0,
"policy/entropy_avg": 0.4886399507522583,
"step": 3,
"timer/training_step": 4.847959518432617,
"val/num_eos_tokens": 0.53125,
"val/ratio": 1.0
},
{
"epoch": 0.0012675024260788625,
"grad_norm": 7.052934051898774e-06,
"learning_rate": 2.998732171156894e-06,
"loss": 0.0,
"loss/policy_avg": 1.0710209608078003e-08,
"objective/entropy": 59.56321716308594,
"objective/kl": 0.0,
"objective/rlhf_reward": -0.716552734375,
"objective/scores": -0.71685791015625,
"policy/approxkl_avg": 0.0,
"policy/clipfrac_avg": 0.0,
"policy/entropy_avg": 0.4839285612106323,
"step": 4,
"timer/training_step": 5.577988624572754,
"val/num_eos_tokens": 0.4375,
"val/ratio": 1.0
},
{
"epoch": 0.001584378032598578,
"grad_norm": 8.835036169330124e-06,
"learning_rate": 2.9984152139461176e-06,
"loss": 0.0,
"loss/policy_avg": 2.3748725652694702e-08,
"objective/entropy": 45.10405731201172,
"objective/kl": 0.0,
"objective/rlhf_reward": -0.444000244140625,
"objective/scores": -0.44439697265625,
"policy/approxkl_avg": 0.0,
"policy/clipfrac_avg": 0.0,
"policy/entropy_avg": 0.4351224899291992,
"step": 5,
"timer/training_step": 5.305084705352783,
"val/num_eos_tokens": 1.0,
"val/ratio": 1.0
},
{
"epoch": 0.0019012536391182935,
"grad_norm": 6.025600214343285e-06,
"learning_rate": 2.998098256735341e-06,
"loss": 0.0,
"loss/policy_avg": 7.450580596923828e-09,
"objective/entropy": 60.55895233154297,
"objective/kl": 0.0,
"objective/rlhf_reward": -0.746917724609375,
"objective/scores": -0.7470703125,
"policy/approxkl_avg": 0.0,
"policy/clipfrac_avg": 0.0,
"policy/entropy_avg": 0.4965381920337677,
"step": 6,
"timer/training_step": 4.756265640258789,
"val/num_eos_tokens": 0.53125,
"val/ratio": 1.0
},
{
"epoch": 0.002218129245638009,
"grad_norm": 9.883336133498233e-06,
"learning_rate": 2.997781299524564e-06,
"loss": 0.0,
"loss/policy_avg": 1.6298145055770874e-08,
"objective/entropy": 56.12989807128906,
"objective/kl": 0.0,
"objective/rlhf_reward": -0.58819580078125,
"objective/scores": -0.58782958984375,
"policy/approxkl_avg": 0.0,
"policy/clipfrac_avg": 0.0,
"policy/entropy_avg": 0.4676559567451477,
"step": 7,
"timer/training_step": 5.001364707946777,
"val/num_eos_tokens": 0.65625,
"val/ratio": 1.0
},
{
"epoch": 0.002535004852157725,
"grad_norm": 3.0753049031773116e-06,
"learning_rate": 2.9974643423137877e-06,
"loss": 0.0,
"loss/policy_avg": 3.259629011154175e-09,
"objective/entropy": 52.772064208984375,
"objective/kl": 0.0,
"objective/rlhf_reward": -0.8326416015625,
"objective/scores": -0.83258056640625,
"policy/approxkl_avg": 0.0,
"policy/clipfrac_avg": 0.0,
"policy/entropy_avg": 0.4242881238460541,
"step": 8,
"timer/training_step": 5.542971134185791,
"val/num_eos_tokens": 0.40625,
"val/ratio": 1.0
}
],
"logging_steps": 1,
"max_steps": 9465,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 8,
"total_flos": 5677993863069696.0,
"train_batch_size": 2,
"trial_name": null,
"trial_params": null
}