| { | |
| "best_metric": 0.2021823674440384, | |
| "best_model_checkpoint": "./checkpoints/ultrafeedback_binarized/phi-2-ultrafeedback_binarized-lambda0.25-ORPO-17-4-15/checkpoint-2994", | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 2994, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.016700066800267203, | |
| "grad_norm": 36.0, | |
| "learning_rate": 2.0000000000000002e-07, | |
| "loss": 0.5754, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.033400133600534405, | |
| "grad_norm": 9.0, | |
| "learning_rate": 4.0000000000000003e-07, | |
| "loss": 0.3356, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.050100200400801605, | |
| "grad_norm": 8.1875, | |
| "learning_rate": 6.000000000000001e-07, | |
| "loss": 0.2953, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.06680026720106881, | |
| "grad_norm": 9.8125, | |
| "learning_rate": 8.000000000000001e-07, | |
| "loss": 0.2954, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.08350033400133601, | |
| "grad_norm": 66.5, | |
| "learning_rate": 1.0000000000000002e-06, | |
| "loss": 0.2946, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.10020040080160321, | |
| "grad_norm": 15.5625, | |
| "learning_rate": 1.2000000000000002e-06, | |
| "loss": 0.2982, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.11690046760187041, | |
| "grad_norm": 9.0, | |
| "learning_rate": 1.4000000000000001e-06, | |
| "loss": 0.2912, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.13360053440213762, | |
| "grad_norm": 18.125, | |
| "learning_rate": 1.6000000000000001e-06, | |
| "loss": 0.2994, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.15030060120240482, | |
| "grad_norm": 14.75, | |
| "learning_rate": 1.8000000000000001e-06, | |
| "loss": 0.2869, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.16700066800267202, | |
| "grad_norm": 7.4375, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 0.2779, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.18370073480293922, | |
| "grad_norm": 6.3125, | |
| "learning_rate": 2.2e-06, | |
| "loss": 0.2781, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.20040080160320642, | |
| "grad_norm": 10.625, | |
| "learning_rate": 2.4000000000000003e-06, | |
| "loss": 0.2836, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.21710086840347362, | |
| "grad_norm": 18.25, | |
| "learning_rate": 2.6e-06, | |
| "loss": 0.2825, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.23380093520374082, | |
| "grad_norm": 9.875, | |
| "learning_rate": 2.8000000000000003e-06, | |
| "loss": 0.2932, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.250501002004008, | |
| "grad_norm": 20.125, | |
| "learning_rate": 3e-06, | |
| "loss": 0.2681, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.26720106880427524, | |
| "grad_norm": 9.8125, | |
| "learning_rate": 3.2000000000000003e-06, | |
| "loss": 0.2639, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.28390113560454244, | |
| "grad_norm": 6.65625, | |
| "learning_rate": 3.4000000000000005e-06, | |
| "loss": 0.2706, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.30060120240480964, | |
| "grad_norm": 15.875, | |
| "learning_rate": 3.6000000000000003e-06, | |
| "loss": 0.2742, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.31730126920507684, | |
| "grad_norm": 8.375, | |
| "learning_rate": 3.8000000000000005e-06, | |
| "loss": 0.2729, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.33400133600534404, | |
| "grad_norm": 6.65625, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 0.2595, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.35070140280561124, | |
| "grad_norm": 7.3125, | |
| "learning_rate": 4.2000000000000004e-06, | |
| "loss": 0.2596, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.36740146960587844, | |
| "grad_norm": 7.6875, | |
| "learning_rate": 4.4e-06, | |
| "loss": 0.2615, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.38410153640614564, | |
| "grad_norm": 12.125, | |
| "learning_rate": 4.600000000000001e-06, | |
| "loss": 0.2614, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.40080160320641284, | |
| "grad_norm": 20.375, | |
| "learning_rate": 4.800000000000001e-06, | |
| "loss": 0.2537, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.41750167000668004, | |
| "grad_norm": 8.75, | |
| "learning_rate": 5e-06, | |
| "loss": 0.2646, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.43420173680694724, | |
| "grad_norm": 14.9375, | |
| "learning_rate": 5.2e-06, | |
| "loss": 0.2609, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.45090180360721444, | |
| "grad_norm": 24.375, | |
| "learning_rate": 5.400000000000001e-06, | |
| "loss": 0.2541, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.46760187040748163, | |
| "grad_norm": 30.25, | |
| "learning_rate": 5.600000000000001e-06, | |
| "loss": 0.2584, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.48430193720774883, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 5.8e-06, | |
| "loss": 0.2572, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.501002004008016, | |
| "grad_norm": 9.0625, | |
| "learning_rate": 6e-06, | |
| "loss": 0.2511, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.5177020708082832, | |
| "grad_norm": 26.25, | |
| "learning_rate": 6.200000000000001e-06, | |
| "loss": 0.2595, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.5344021376085505, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 6.4000000000000006e-06, | |
| "loss": 0.2555, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.5511022044088176, | |
| "grad_norm": 14.375, | |
| "learning_rate": 6.600000000000001e-06, | |
| "loss": 0.2283, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.5678022712090849, | |
| "grad_norm": 15.6875, | |
| "learning_rate": 6.800000000000001e-06, | |
| "loss": 0.2375, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.584502338009352, | |
| "grad_norm": 7.3125, | |
| "learning_rate": 7e-06, | |
| "loss": 0.2378, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.6012024048096193, | |
| "grad_norm": 8.25, | |
| "learning_rate": 7.2000000000000005e-06, | |
| "loss": 0.231, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.6179024716098864, | |
| "grad_norm": 9.4375, | |
| "learning_rate": 7.4e-06, | |
| "loss": 0.2211, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.6346025384101537, | |
| "grad_norm": 10.1875, | |
| "learning_rate": 7.600000000000001e-06, | |
| "loss": 0.2427, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.6513026052104208, | |
| "grad_norm": 21.375, | |
| "learning_rate": 7.800000000000002e-06, | |
| "loss": 0.23, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.6680026720106881, | |
| "grad_norm": 6.0, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.2312, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.6847027388109552, | |
| "grad_norm": 12.1875, | |
| "learning_rate": 8.2e-06, | |
| "loss": 0.2334, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.7014028056112225, | |
| "grad_norm": 13.875, | |
| "learning_rate": 8.400000000000001e-06, | |
| "loss": 0.2316, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.7181028724114896, | |
| "grad_norm": 15.1875, | |
| "learning_rate": 8.6e-06, | |
| "loss": 0.2337, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.7348029392117569, | |
| "grad_norm": 9.0, | |
| "learning_rate": 8.8e-06, | |
| "loss": 0.2315, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.751503006012024, | |
| "grad_norm": 10.9375, | |
| "learning_rate": 9e-06, | |
| "loss": 0.2182, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.7682030728122913, | |
| "grad_norm": 10.4375, | |
| "learning_rate": 9.200000000000002e-06, | |
| "loss": 0.2326, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.7849031396125584, | |
| "grad_norm": 37.0, | |
| "learning_rate": 9.4e-06, | |
| "loss": 0.2186, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.8016032064128257, | |
| "grad_norm": 8.5, | |
| "learning_rate": 9.600000000000001e-06, | |
| "loss": 0.2195, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.8183032732130928, | |
| "grad_norm": 33.0, | |
| "learning_rate": 9.800000000000001e-06, | |
| "loss": 0.2154, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.8350033400133601, | |
| "grad_norm": 8.375, | |
| "learning_rate": 1e-05, | |
| "loss": 0.2301, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.8517034068136272, | |
| "grad_norm": 11.625, | |
| "learning_rate": 1.02e-05, | |
| "loss": 0.235, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.8684034736138945, | |
| "grad_norm": 5.25, | |
| "learning_rate": 1.04e-05, | |
| "loss": 0.2086, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.8851035404141616, | |
| "grad_norm": 10.0, | |
| "learning_rate": 1.0600000000000002e-05, | |
| "loss": 0.2183, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.9018036072144289, | |
| "grad_norm": 5.25, | |
| "learning_rate": 1.0800000000000002e-05, | |
| "loss": 0.2047, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.918503674014696, | |
| "grad_norm": 44.5, | |
| "learning_rate": 1.1000000000000001e-05, | |
| "loss": 0.206, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.9352037408149633, | |
| "grad_norm": 19.5, | |
| "learning_rate": 1.1200000000000001e-05, | |
| "loss": 0.2095, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.9519038076152304, | |
| "grad_norm": 9.5625, | |
| "learning_rate": 1.14e-05, | |
| "loss": 0.2039, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.9686038744154977, | |
| "grad_norm": 11.375, | |
| "learning_rate": 1.16e-05, | |
| "loss": 0.2096, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.9853039412157648, | |
| "grad_norm": 5.09375, | |
| "learning_rate": 1.18e-05, | |
| "loss": 0.2087, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.2021823674440384, | |
| "eval_runtime": 181.6869, | |
| "eval_samples_per_second": 8.872, | |
| "eval_steps_per_second": 0.556, | |
| "step": 2994 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 5988, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.795126539858739e+17, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |