| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9997519225998511, | |
| "eval_steps": 500, | |
| "global_step": 2015, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05011163483006698, | |
| "grad_norm": 4.580502986907959, | |
| "learning_rate": 5e-07, | |
| "logits/chosen": -0.5429307222366333, | |
| "logits/rejected": -0.4496941864490509, | |
| "logps/chosen": -68.28372192382812, | |
| "logps/rejected": -9.223593711853027, | |
| "loss": 0.6923, | |
| "rewards/accuracies": 0.48514851927757263, | |
| "rewards/chosen": 0.0015708295395597816, | |
| "rewards/margins": 0.0019196495413780212, | |
| "rewards/rejected": -0.00034882003092207015, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.10022326966013397, | |
| "grad_norm": 5.995315074920654, | |
| "learning_rate": 1e-06, | |
| "logits/chosen": -0.5516948103904724, | |
| "logits/rejected": -0.4607711434364319, | |
| "logps/chosen": -68.89019012451172, | |
| "logps/rejected": -9.164521217346191, | |
| "loss": 0.6541, | |
| "rewards/accuracies": 0.8923267126083374, | |
| "rewards/chosen": 0.07136224955320358, | |
| "rewards/margins": 0.0827057808637619, | |
| "rewards/rejected": -0.011343526653945446, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.15033490449020095, | |
| "grad_norm": 2.77810001373291, | |
| "learning_rate": 9.442912300055157e-07, | |
| "logits/chosen": -0.5540570616722107, | |
| "logits/rejected": -0.4689684510231018, | |
| "logps/chosen": -65.95732879638672, | |
| "logps/rejected": -12.217219352722168, | |
| "loss": 0.4367, | |
| "rewards/accuracies": 0.978960394859314, | |
| "rewards/chosen": 0.5759641528129578, | |
| "rewards/margins": 0.7263422012329102, | |
| "rewards/rejected": -0.15037815272808075, | |
| "step": 303 | |
| }, | |
| { | |
| "epoch": 0.20044653932026793, | |
| "grad_norm": 2.343784809112549, | |
| "learning_rate": 8.885824600110314e-07, | |
| "logits/chosen": -0.552952766418457, | |
| "logits/rejected": -0.49141836166381836, | |
| "logps/chosen": -57.347042083740234, | |
| "logps/rejected": -16.417877197265625, | |
| "loss": 0.2188, | |
| "rewards/accuracies": 0.978960394859314, | |
| "rewards/chosen": 1.19060218334198, | |
| "rewards/margins": 1.9093645811080933, | |
| "rewards/rejected": -0.7187623977661133, | |
| "step": 404 | |
| }, | |
| { | |
| "epoch": 0.2505581741503349, | |
| "grad_norm": 1.4373774528503418, | |
| "learning_rate": 8.328736900165472e-07, | |
| "logits/chosen": -0.5194080471992493, | |
| "logits/rejected": -0.4702938497066498, | |
| "logps/chosen": -52.69013977050781, | |
| "logps/rejected": -23.52432632446289, | |
| "loss": 0.1223, | |
| "rewards/accuracies": 0.9764851331710815, | |
| "rewards/chosen": 1.3888177871704102, | |
| "rewards/margins": 2.7484614849090576, | |
| "rewards/rejected": -1.3596433401107788, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.3006698089804019, | |
| "grad_norm": 0.54764324426651, | |
| "learning_rate": 7.771649200220628e-07, | |
| "logits/chosen": -0.527655303478241, | |
| "logits/rejected": -0.47077974677085876, | |
| "logps/chosen": -53.11764907836914, | |
| "logps/rejected": -30.12430763244629, | |
| "loss": 0.0623, | |
| "rewards/accuracies": 0.9826732277870178, | |
| "rewards/chosen": 1.5675214529037476, | |
| "rewards/margins": 3.6883296966552734, | |
| "rewards/rejected": -2.1208081245422363, | |
| "step": 606 | |
| }, | |
| { | |
| "epoch": 0.3507814438104689, | |
| "grad_norm": 0.21296469867229462, | |
| "learning_rate": 7.214561500275785e-07, | |
| "logits/chosen": -0.5120936632156372, | |
| "logits/rejected": -0.44169196486473083, | |
| "logps/chosen": -54.65872573852539, | |
| "logps/rejected": -38.288719177246094, | |
| "loss": 0.0399, | |
| "rewards/accuracies": 0.983910858631134, | |
| "rewards/chosen": 1.726381778717041, | |
| "rewards/margins": 4.44348669052124, | |
| "rewards/rejected": -2.71710467338562, | |
| "step": 707 | |
| }, | |
| { | |
| "epoch": 0.40089307864053586, | |
| "grad_norm": 0.3419667184352875, | |
| "learning_rate": 6.657473800330943e-07, | |
| "logits/chosen": -0.48799896240234375, | |
| "logits/rejected": -0.3921656906604767, | |
| "logps/chosen": -52.6276969909668, | |
| "logps/rejected": -42.38192367553711, | |
| "loss": 0.0344, | |
| "rewards/accuracies": 0.9863861203193665, | |
| "rewards/chosen": 1.681420922279358, | |
| "rewards/margins": 4.920510768890381, | |
| "rewards/rejected": -3.2390897274017334, | |
| "step": 808 | |
| }, | |
| { | |
| "epoch": 0.45100471347060284, | |
| "grad_norm": 5.888083457946777, | |
| "learning_rate": 6.100386100386101e-07, | |
| "logits/chosen": -0.4662970006465912, | |
| "logits/rejected": -0.3673667013645172, | |
| "logps/chosen": -52.87725067138672, | |
| "logps/rejected": -45.869571685791016, | |
| "loss": 0.037, | |
| "rewards/accuracies": 0.9826732277870178, | |
| "rewards/chosen": 1.6799063682556152, | |
| "rewards/margins": 5.169793128967285, | |
| "rewards/rejected": -3.48988676071167, | |
| "step": 909 | |
| }, | |
| { | |
| "epoch": 0.5011163483006698, | |
| "grad_norm": 4.230249881744385, | |
| "learning_rate": 5.543298400441257e-07, | |
| "logits/chosen": -0.47577834129333496, | |
| "logits/rejected": -0.35569173097610474, | |
| "logps/chosen": -50.923885345458984, | |
| "logps/rejected": -44.373191833496094, | |
| "loss": 0.0331, | |
| "rewards/accuracies": 0.9876237511634827, | |
| "rewards/chosen": 1.6794066429138184, | |
| "rewards/margins": 5.304332256317139, | |
| "rewards/rejected": -3.6249256134033203, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.5512279831307368, | |
| "grad_norm": 0.3410126268863678, | |
| "learning_rate": 4.986210700496414e-07, | |
| "logits/chosen": -0.45471659302711487, | |
| "logits/rejected": -0.3273502290248871, | |
| "logps/chosen": -55.552947998046875, | |
| "logps/rejected": -49.69540786743164, | |
| "loss": 0.025, | |
| "rewards/accuracies": 0.9876237511634827, | |
| "rewards/chosen": 1.725969910621643, | |
| "rewards/margins": 5.597514629364014, | |
| "rewards/rejected": -3.8715438842773438, | |
| "step": 1111 | |
| }, | |
| { | |
| "epoch": 0.6013396179608038, | |
| "grad_norm": 3.1412646770477295, | |
| "learning_rate": 4.4291230005515716e-07, | |
| "logits/chosen": -0.46453720331192017, | |
| "logits/rejected": -0.3384020924568176, | |
| "logps/chosen": -50.724090576171875, | |
| "logps/rejected": -50.1302490234375, | |
| "loss": 0.0212, | |
| "rewards/accuracies": 0.9863861203193665, | |
| "rewards/chosen": 1.6714025735855103, | |
| "rewards/margins": 5.69786262512207, | |
| "rewards/rejected": -4.026459217071533, | |
| "step": 1212 | |
| }, | |
| { | |
| "epoch": 0.6514512527908708, | |
| "grad_norm": 0.11911242455244064, | |
| "learning_rate": 3.8720353006067294e-07, | |
| "logits/chosen": -0.45919135212898254, | |
| "logits/rejected": -0.32431861758232117, | |
| "logps/chosen": -50.14983367919922, | |
| "logps/rejected": -50.361228942871094, | |
| "loss": 0.024, | |
| "rewards/accuracies": 0.9888613820075989, | |
| "rewards/chosen": 1.6939829587936401, | |
| "rewards/margins": 5.85902738571167, | |
| "rewards/rejected": -4.165044784545898, | |
| "step": 1313 | |
| }, | |
| { | |
| "epoch": 0.7015628876209378, | |
| "grad_norm": 0.08934925496578217, | |
| "learning_rate": 3.3149476006618866e-07, | |
| "logits/chosen": -0.45346295833587646, | |
| "logits/rejected": -0.30696311593055725, | |
| "logps/chosen": -53.92500305175781, | |
| "logps/rejected": -52.11055374145508, | |
| "loss": 0.025, | |
| "rewards/accuracies": 0.983910858631134, | |
| "rewards/chosen": 1.760947823524475, | |
| "rewards/margins": 5.941510200500488, | |
| "rewards/rejected": -4.180562496185303, | |
| "step": 1414 | |
| }, | |
| { | |
| "epoch": 0.7516745224510047, | |
| "grad_norm": 0.08924766629934311, | |
| "learning_rate": 2.757859900717043e-07, | |
| "logits/chosen": -0.4205220341682434, | |
| "logits/rejected": -0.27568432688713074, | |
| "logps/chosen": -53.17680358886719, | |
| "logps/rejected": -51.90163040161133, | |
| "loss": 0.024, | |
| "rewards/accuracies": 0.9900990128517151, | |
| "rewards/chosen": 1.7735264301300049, | |
| "rewards/margins": 5.996668815612793, | |
| "rewards/rejected": -4.223142147064209, | |
| "step": 1515 | |
| }, | |
| { | |
| "epoch": 0.8017861572810717, | |
| "grad_norm": 4.9424543380737305, | |
| "learning_rate": 2.2007722007722007e-07, | |
| "logits/chosen": -0.4162166714668274, | |
| "logits/rejected": -0.2717900276184082, | |
| "logps/chosen": -50.12358856201172, | |
| "logps/rejected": -52.617244720458984, | |
| "loss": 0.0325, | |
| "rewards/accuracies": 0.9851484894752502, | |
| "rewards/chosen": 1.7092137336730957, | |
| "rewards/margins": 6.046271800994873, | |
| "rewards/rejected": -4.337057590484619, | |
| "step": 1616 | |
| }, | |
| { | |
| "epoch": 0.8518977921111387, | |
| "grad_norm": 0.1587940901517868, | |
| "learning_rate": 1.643684500827358e-07, | |
| "logits/chosen": -0.40968555212020874, | |
| "logits/rejected": -0.2546483874320984, | |
| "logps/chosen": -53.707828521728516, | |
| "logps/rejected": -52.081024169921875, | |
| "loss": 0.0266, | |
| "rewards/accuracies": 0.9888613820075989, | |
| "rewards/chosen": 1.7978192567825317, | |
| "rewards/margins": 6.121737003326416, | |
| "rewards/rejected": -4.323917865753174, | |
| "step": 1717 | |
| }, | |
| { | |
| "epoch": 0.9020094269412057, | |
| "grad_norm": 0.13928450644016266, | |
| "learning_rate": 1.086596800882515e-07, | |
| "logits/chosen": -0.4125988185405731, | |
| "logits/rejected": -0.2660834491252899, | |
| "logps/chosen": -49.55975341796875, | |
| "logps/rejected": -53.512306213378906, | |
| "loss": 0.0283, | |
| "rewards/accuracies": 0.9876237511634827, | |
| "rewards/chosen": 1.7114804983139038, | |
| "rewards/margins": 6.104257583618164, | |
| "rewards/rejected": -4.392776966094971, | |
| "step": 1818 | |
| }, | |
| { | |
| "epoch": 0.9521210617712726, | |
| "grad_norm": 0.08584673702716827, | |
| "learning_rate": 5.295091009376723e-08, | |
| "logits/chosen": -0.44209304451942444, | |
| "logits/rejected": -0.28407391905784607, | |
| "logps/chosen": -53.01245880126953, | |
| "logps/rejected": -55.516937255859375, | |
| "loss": 0.0178, | |
| "rewards/accuracies": 0.9925742149353027, | |
| "rewards/chosen": 1.8011056184768677, | |
| "rewards/margins": 6.30369758605957, | |
| "rewards/rejected": -4.502592086791992, | |
| "step": 1919 | |
| }, | |
| { | |
| "epoch": 0.9997519225998511, | |
| "step": 2015, | |
| "total_flos": 1.1431058439017595e+18, | |
| "train_loss": 0.12960598971648488, | |
| "train_runtime": 28770.7393, | |
| "train_samples_per_second": 0.56, | |
| "train_steps_per_second": 0.07 | |
| } | |
| ], | |
| "logging_steps": 101, | |
| "max_steps": 2015, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.1431058439017595e+18, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |