wf8888884's picture
Add files using upload-large-folder tool
54eddbe verified
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.172413793103448,
"eval_steps": 500,
"global_step": 150,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.3448275862068966,
"grad_norm": 0.5030671954154968,
"learning_rate": 8.620689655172415e-07,
"logits/chosen": 1.8564815521240234,
"logits/rejected": 1.8255866765975952,
"logps/chosen": -95.54290008544922,
"logps/rejected": -79.79582214355469,
"loss": 0.693,
"rewards/accuracies": 0.4000000059604645,
"rewards/chosen": -0.0028573228046298027,
"rewards/margins": 0.0005112116923555732,
"rewards/rejected": -0.0033685355447232723,
"step": 10
},
{
"epoch": 0.6896551724137931,
"grad_norm": 0.45081979036331177,
"learning_rate": 1.724137931034483e-06,
"logits/chosen": 1.781947374343872,
"logits/rejected": 1.6911979913711548,
"logps/chosen": -104.20463562011719,
"logps/rejected": -78.91150665283203,
"loss": 0.6929,
"rewards/accuracies": 0.512499988079071,
"rewards/chosen": 0.00018558502779342234,
"rewards/margins": -0.002152198925614357,
"rewards/rejected": 0.0023377849720418453,
"step": 20
},
{
"epoch": 1.0344827586206897,
"grad_norm": 0.41456305980682373,
"learning_rate": 2.5862068965517246e-06,
"logits/chosen": 1.7937275171279907,
"logits/rejected": 1.729128122329712,
"logps/chosen": -90.21867370605469,
"logps/rejected": -71.69265747070312,
"loss": 0.6935,
"rewards/accuracies": 0.574999988079071,
"rewards/chosen": 0.003012509550899267,
"rewards/margins": 0.009945740923285484,
"rewards/rejected": -0.006933231838047504,
"step": 30
},
{
"epoch": 1.3793103448275863,
"grad_norm": 0.45128729939460754,
"learning_rate": 3.448275862068966e-06,
"logits/chosen": 1.864363670349121,
"logits/rejected": 1.9002879858016968,
"logps/chosen": -87.1126480102539,
"logps/rejected": -77.18392181396484,
"loss": 0.6923,
"rewards/accuracies": 0.5625,
"rewards/chosen": 0.0013037443859502673,
"rewards/margins": 0.006342612206935883,
"rewards/rejected": -0.005038867238909006,
"step": 40
},
{
"epoch": 1.7241379310344827,
"grad_norm": 0.4960842728614807,
"learning_rate": 4.310344827586207e-06,
"logits/chosen": 1.7825076580047607,
"logits/rejected": 1.829602837562561,
"logps/chosen": -78.63069915771484,
"logps/rejected": -91.36685180664062,
"loss": 0.6954,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.009792634285986423,
"rewards/margins": 0.014146638102829456,
"rewards/rejected": -0.0043540047481656075,
"step": 50
},
{
"epoch": 2.0689655172413794,
"grad_norm": 0.5404626727104187,
"learning_rate": 4.999818897894192e-06,
"logits/chosen": 1.8061244487762451,
"logits/rejected": 1.7855304479599,
"logps/chosen": -90.69072723388672,
"logps/rejected": -72.0459213256836,
"loss": 0.6938,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": 0.002143201883882284,
"rewards/margins": 0.005239076912403107,
"rewards/rejected": -0.0030958750285208225,
"step": 60
},
{
"epoch": 2.413793103448276,
"grad_norm": 0.5091063380241394,
"learning_rate": 4.9934830787948756e-06,
"logits/chosen": 1.6158870458602905,
"logits/rejected": 1.696754813194275,
"logps/chosen": -76.28319549560547,
"logps/rejected": -77.81846618652344,
"loss": 0.6905,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": 1.5049334251671098e-05,
"rewards/margins": 0.007967600598931313,
"rewards/rejected": -0.00795255322009325,
"step": 70
},
{
"epoch": 2.7586206896551726,
"grad_norm": 0.6066665053367615,
"learning_rate": 4.978118375700895e-06,
"logits/chosen": 1.6109062433242798,
"logits/rejected": 1.7185981273651123,
"logps/chosen": -84.615966796875,
"logps/rejected": -96.0793228149414,
"loss": 0.6942,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": -0.000627221364993602,
"rewards/margins": 0.0009560534963384271,
"rewards/rejected": -0.0015832759672775865,
"step": 80
},
{
"epoch": 3.103448275862069,
"grad_norm": 0.5341666340827942,
"learning_rate": 4.953780424089803e-06,
"logits/chosen": 1.8657306432724,
"logits/rejected": 1.8894052505493164,
"logps/chosen": -87.67496490478516,
"logps/rejected": -77.41777038574219,
"loss": 0.6919,
"rewards/accuracies": 0.550000011920929,
"rewards/chosen": -0.004135184455662966,
"rewards/margins": 0.011894735507667065,
"rewards/rejected": -0.016029920428991318,
"step": 90
},
{
"epoch": 3.4482758620689653,
"grad_norm": 0.5891664028167725,
"learning_rate": 4.920557351506409e-06,
"logits/chosen": 1.8143476247787476,
"logits/rejected": 1.8618648052215576,
"logps/chosen": -82.7096176147461,
"logps/rejected": -80.64532470703125,
"loss": 0.6905,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": -0.014110831543803215,
"rewards/margins": -0.007867741398513317,
"rewards/rejected": -0.0062430910766124725,
"step": 100
},
{
"epoch": 3.793103448275862,
"grad_norm": 0.5773605704307556,
"learning_rate": 4.878569458453592e-06,
"logits/chosen": 1.7850589752197266,
"logits/rejected": 1.76922607421875,
"logps/chosen": -87.4909896850586,
"logps/rejected": -90.67459869384766,
"loss": 0.6899,
"rewards/accuracies": 0.5874999761581421,
"rewards/chosen": 0.0026252653915435076,
"rewards/margins": 0.014214910566806793,
"rewards/rejected": -0.011589646339416504,
"step": 110
},
{
"epoch": 4.137931034482759,
"grad_norm": 0.6582341194152832,
"learning_rate": 4.827968782785062e-06,
"logits/chosen": 1.7467533349990845,
"logits/rejected": 1.8858740329742432,
"logps/chosen": -69.12274169921875,
"logps/rejected": -94.86824035644531,
"loss": 0.6878,
"rewards/accuracies": 0.4749999940395355,
"rewards/chosen": -0.009279675781726837,
"rewards/margins": 0.0035783485509455204,
"rewards/rejected": -0.012858022935688496,
"step": 120
},
{
"epoch": 4.482758620689655,
"grad_norm": 0.7790700793266296,
"learning_rate": 4.7689385491773934e-06,
"logits/chosen": 1.8058643341064453,
"logits/rejected": 1.866713523864746,
"logps/chosen": -79.86953735351562,
"logps/rejected": -76.4103012084961,
"loss": 0.6857,
"rewards/accuracies": 0.625,
"rewards/chosen": 0.00016659722314216197,
"rewards/margins": 0.023889193311333656,
"rewards/rejected": -0.02372259460389614,
"step": 130
},
{
"epoch": 4.827586206896552,
"grad_norm": 0.7933465242385864,
"learning_rate": 4.70169250567482e-06,
"logits/chosen": 1.7705074548721313,
"logits/rejected": 1.7728191614151,
"logps/chosen": -86.46583557128906,
"logps/rejected": -75.83828735351562,
"loss": 0.6804,
"rewards/accuracies": 0.6625000238418579,
"rewards/chosen": -0.0001745700865285471,
"rewards/margins": 0.029326915740966797,
"rewards/rejected": -0.02950148656964302,
"step": 140
},
{
"epoch": 5.172413793103448,
"grad_norm": 0.7435988187789917,
"learning_rate": 4.626474149709127e-06,
"logits/chosen": 1.740312933921814,
"logits/rejected": 1.7526963949203491,
"logps/chosen": -100.93019104003906,
"logps/rejected": -90.39695739746094,
"loss": 0.6803,
"rewards/accuracies": 0.637499988079071,
"rewards/chosen": -0.013870477676391602,
"rewards/margins": 0.03614342585206032,
"rewards/rejected": -0.05001390725374222,
"step": 150
}
],
"logging_steps": 10,
"max_steps": 580,
"num_input_tokens_seen": 0,
"num_train_epochs": 20,
"save_steps": 50,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 5.0774627061438874e+17,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}