GemmaDPO2 / checkpoint-1000 /trainer_state.json
POGroup's picture
Training in progress, step 1000, checkpoint
e42cb97 verified
raw
history blame
6.13 kB
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.39800995024875624,
"eval_steps": 500,
"global_step": 1000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03980099502487562,
"grad_norm": 9.524229049682617,
"learning_rate": 4.980474401576887e-07,
"logits/chosen": -0.1925463080406189,
"logits/rejected": -0.15873177349567413,
"logps/chosen": -239.6322021484375,
"logps/rejected": -242.57594299316406,
"loss": 0.8819,
"rewards/accuracies": 0.49281251430511475,
"rewards/chosen": -1.3072433471679688,
"rewards/margins": 0.008615786209702492,
"rewards/rejected": -1.315859079360962,
"step": 100
},
{
"epoch": 0.07960199004975124,
"grad_norm": 7.333236217498779,
"learning_rate": 4.922202605502572e-07,
"logits/chosen": 0.046361636370420456,
"logits/rejected": 0.08236894011497498,
"logps/chosen": -231.10765075683594,
"logps/rejected": -233.24984741210938,
"loss": 0.8361,
"rewards/accuracies": 0.48875001072883606,
"rewards/chosen": -0.9621695876121521,
"rewards/margins": -0.021255964413285255,
"rewards/rejected": -0.9409136772155762,
"step": 200
},
{
"epoch": 0.11940298507462686,
"grad_norm": 5.164106369018555,
"learning_rate": 4.82609484512869e-07,
"logits/chosen": 0.0250965878367424,
"logits/rejected": 0.054141998291015625,
"logps/chosen": -229.79965209960938,
"logps/rejected": -231.60562133789062,
"loss": 0.8114,
"rewards/accuracies": 0.48750001192092896,
"rewards/chosen": -0.8564431667327881,
"rewards/margins": -0.01034404058009386,
"rewards/rejected": -0.8460990786552429,
"step": 300
},
{
"epoch": 0.15920398009950248,
"grad_norm": 5.328795909881592,
"learning_rate": 4.6936523696827614e-07,
"logits/chosen": 0.039505548775196075,
"logits/rejected": 0.05482972040772438,
"logps/chosen": -233.69537353515625,
"logps/rejected": -236.25210571289062,
"loss": 0.7907,
"rewards/accuracies": 0.49406251311302185,
"rewards/chosen": -0.8005841970443726,
"rewards/margins": 0.00799934659153223,
"rewards/rejected": -0.8085834980010986,
"step": 400
},
{
"epoch": 0.19900497512437812,
"grad_norm": 8.521681785583496,
"learning_rate": 4.5269439940365644e-07,
"logits/chosen": 0.11151163280010223,
"logits/rejected": 0.1460810899734497,
"logps/chosen": -227.23020935058594,
"logps/rejected": -230.19451904296875,
"loss": 0.7849,
"rewards/accuracies": 0.4946874976158142,
"rewards/chosen": -0.7644023299217224,
"rewards/margins": -0.004693666007369757,
"rewards/rejected": -0.7597086429595947,
"step": 500
},
{
"epoch": 0.23880597014925373,
"grad_norm": 7.143113136291504,
"learning_rate": 4.328573782827409e-07,
"logits/chosen": 0.09673156589269638,
"logits/rejected": 0.14390873908996582,
"logps/chosen": -225.45919799804688,
"logps/rejected": -225.44052124023438,
"loss": 0.77,
"rewards/accuracies": 0.5045312643051147,
"rewards/chosen": -0.7256423830986023,
"rewards/margins": 0.004257932770997286,
"rewards/rejected": -0.7299003005027771,
"step": 600
},
{
"epoch": 0.27860696517412936,
"grad_norm": 6.525391101837158,
"learning_rate": 4.1016403737218373e-07,
"logits/chosen": 0.05486857891082764,
"logits/rejected": 0.09620587527751923,
"logps/chosen": -228.3573760986328,
"logps/rejected": -223.32395935058594,
"loss": 0.7696,
"rewards/accuracies": 0.5034375190734863,
"rewards/chosen": -0.7243590354919434,
"rewards/margins": 0.0008349200943484902,
"rewards/rejected": -0.7251940369606018,
"step": 700
},
{
"epoch": 0.31840796019900497,
"grad_norm": 8.071100234985352,
"learning_rate": 3.849688575211836e-07,
"logits/chosen": 0.004134657327085733,
"logits/rejected": 0.02180260606110096,
"logps/chosen": -234.44007873535156,
"logps/rejected": -232.92913818359375,
"loss": 0.7682,
"rewards/accuracies": 0.5073437690734863,
"rewards/chosen": -0.7022367119789124,
"rewards/margins": 0.0076367598958313465,
"rewards/rejected": -0.7098734974861145,
"step": 800
},
{
"epoch": 0.3582089552238806,
"grad_norm": 9.765380859375,
"learning_rate": 3.576653995009154e-07,
"logits/chosen": 0.018308693543076515,
"logits/rejected": 0.04190211370587349,
"logps/chosen": -224.4905242919922,
"logps/rejected": -228.26222229003906,
"loss": 0.7467,
"rewards/accuracies": 0.5228124856948853,
"rewards/chosen": -0.7309367656707764,
"rewards/margins": 0.04116936773061752,
"rewards/rejected": -0.7721061110496521,
"step": 900
},
{
"epoch": 0.39800995024875624,
"grad_norm": 6.442622184753418,
"learning_rate": 3.286801563968721e-07,
"logits/chosen": -0.018274417147040367,
"logits/rejected": -0.015018883161246777,
"logps/chosen": -230.45724487304688,
"logps/rejected": -229.37107849121094,
"loss": 0.7517,
"rewards/accuracies": 0.5123437643051147,
"rewards/chosen": -0.738405704498291,
"rewards/margins": 0.026958582922816277,
"rewards/rejected": -0.765364408493042,
"step": 1000
}
],
"logging_steps": 100,
"max_steps": 2512,
"num_input_tokens_seen": 0,
"num_train_epochs": 1,
"save_steps": 100,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}