| { | |
| "best_global_step": 229, | |
| "best_metric": 0.22333618998527527, | |
| "best_model_checkpoint": "./lora_qwen7b_python_abdiff_v1/checkpoint-229", | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 687, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.043835616438356165, | |
| "grad_norm": 0.210234135389328, | |
| "learning_rate": 3.913043478260869e-06, | |
| "loss": 0.6169, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.08767123287671233, | |
| "grad_norm": 0.25168153643608093, | |
| "learning_rate": 8.260869565217392e-06, | |
| "loss": 0.6133, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.13150684931506848, | |
| "grad_norm": 0.19567625224590302, | |
| "learning_rate": 1.2608695652173912e-05, | |
| "loss": 0.5688, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.17534246575342466, | |
| "grad_norm": 0.30335545539855957, | |
| "learning_rate": 1.6956521739130433e-05, | |
| "loss": 0.5225, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.2191780821917808, | |
| "grad_norm": 0.24670398235321045, | |
| "learning_rate": 2.1304347826086958e-05, | |
| "loss": 0.3971, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.26301369863013696, | |
| "grad_norm": 0.12593719363212585, | |
| "learning_rate": 2.565217391304348e-05, | |
| "loss": 0.2952, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.30684931506849317, | |
| "grad_norm": 0.11282460391521454, | |
| "learning_rate": 3e-05, | |
| "loss": 0.2592, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.3506849315068493, | |
| "grad_norm": 0.10163664072751999, | |
| "learning_rate": 2.9514563106796115e-05, | |
| "loss": 0.2402, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.39452054794520547, | |
| "grad_norm": 0.09871397167444229, | |
| "learning_rate": 2.9029126213592237e-05, | |
| "loss": 0.2251, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.4383561643835616, | |
| "grad_norm": 0.09532774984836578, | |
| "learning_rate": 2.854368932038835e-05, | |
| "loss": 0.2132, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4821917808219178, | |
| "grad_norm": 0.08755984157323837, | |
| "learning_rate": 2.8058252427184466e-05, | |
| "loss": 0.2086, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.5260273972602739, | |
| "grad_norm": 0.09590521454811096, | |
| "learning_rate": 2.757281553398058e-05, | |
| "loss": 0.198, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.5698630136986301, | |
| "grad_norm": 0.09363297373056412, | |
| "learning_rate": 2.7087378640776702e-05, | |
| "loss": 0.1954, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.6136986301369863, | |
| "grad_norm": 0.09872984886169434, | |
| "learning_rate": 2.6601941747572816e-05, | |
| "loss": 0.1902, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.6575342465753424, | |
| "grad_norm": 0.12371695041656494, | |
| "learning_rate": 2.611650485436893e-05, | |
| "loss": 0.1892, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.7013698630136986, | |
| "grad_norm": 0.10814935714006424, | |
| "learning_rate": 2.563106796116505e-05, | |
| "loss": 0.1808, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.7452054794520548, | |
| "grad_norm": 0.12739220261573792, | |
| "learning_rate": 2.5145631067961167e-05, | |
| "loss": 0.1815, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.7890410958904109, | |
| "grad_norm": 0.1482655555009842, | |
| "learning_rate": 2.466019417475728e-05, | |
| "loss": 0.1652, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.8328767123287671, | |
| "grad_norm": 0.13323327898979187, | |
| "learning_rate": 2.41747572815534e-05, | |
| "loss": 0.1663, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.8767123287671232, | |
| "grad_norm": 0.1497439742088318, | |
| "learning_rate": 2.3689320388349514e-05, | |
| "loss": 0.1693, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.9205479452054794, | |
| "grad_norm": 0.16727504134178162, | |
| "learning_rate": 2.3203883495145632e-05, | |
| "loss": 0.1588, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.9643835616438357, | |
| "grad_norm": 0.159692645072937, | |
| "learning_rate": 2.2718446601941746e-05, | |
| "loss": 0.1489, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.22333618998527527, | |
| "eval_runtime": 65.6094, | |
| "eval_samples_per_second": 18.808, | |
| "eval_steps_per_second": 9.404, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 1.0043835616438357, | |
| "grad_norm": 0.18387436866760254, | |
| "learning_rate": 2.2233009708737864e-05, | |
| "loss": 0.1542, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.0482191780821917, | |
| "grad_norm": 0.17115731537342072, | |
| "learning_rate": 2.1747572815533982e-05, | |
| "loss": 0.1419, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.0920547945205479, | |
| "grad_norm": 0.20462283492088318, | |
| "learning_rate": 2.1262135922330097e-05, | |
| "loss": 0.1291, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.135890410958904, | |
| "grad_norm": 0.21275432407855988, | |
| "learning_rate": 2.0776699029126215e-05, | |
| "loss": 0.1345, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.1797260273972603, | |
| "grad_norm": 0.19069133698940277, | |
| "learning_rate": 2.029126213592233e-05, | |
| "loss": 0.1304, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.2235616438356165, | |
| "grad_norm": 0.23898746073246002, | |
| "learning_rate": 1.9805825242718447e-05, | |
| "loss": 0.1286, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.2673972602739725, | |
| "grad_norm": 0.22382740676403046, | |
| "learning_rate": 1.9320388349514565e-05, | |
| "loss": 0.1257, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.3112328767123287, | |
| "grad_norm": 0.21581463515758514, | |
| "learning_rate": 1.883495145631068e-05, | |
| "loss": 0.1075, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.355068493150685, | |
| "grad_norm": 0.20519264042377472, | |
| "learning_rate": 1.8349514563106795e-05, | |
| "loss": 0.114, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.398904109589041, | |
| "grad_norm": 0.24191145598888397, | |
| "learning_rate": 1.7864077669902913e-05, | |
| "loss": 0.1084, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.4427397260273973, | |
| "grad_norm": 0.2942422032356262, | |
| "learning_rate": 1.737864077669903e-05, | |
| "loss": 0.1169, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.4865753424657533, | |
| "grad_norm": 0.29472362995147705, | |
| "learning_rate": 1.6893203883495145e-05, | |
| "loss": 0.1048, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.5304109589041097, | |
| "grad_norm": 0.27294427156448364, | |
| "learning_rate": 1.6407766990291263e-05, | |
| "loss": 0.104, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.5742465753424657, | |
| "grad_norm": 0.3031008243560791, | |
| "learning_rate": 1.592233009708738e-05, | |
| "loss": 0.1064, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.618082191780822, | |
| "grad_norm": 0.3000255227088928, | |
| "learning_rate": 1.5436893203883496e-05, | |
| "loss": 0.0965, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.6619178082191781, | |
| "grad_norm": 0.3417374789714813, | |
| "learning_rate": 1.4951456310679612e-05, | |
| "loss": 0.103, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.705753424657534, | |
| "grad_norm": 0.260896235704422, | |
| "learning_rate": 1.4466019417475728e-05, | |
| "loss": 0.0949, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.7495890410958905, | |
| "grad_norm": 0.3573839068412781, | |
| "learning_rate": 1.3980582524271846e-05, | |
| "loss": 0.1005, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.7934246575342465, | |
| "grad_norm": 0.32356780767440796, | |
| "learning_rate": 1.349514563106796e-05, | |
| "loss": 0.0924, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.8372602739726027, | |
| "grad_norm": 0.3374144434928894, | |
| "learning_rate": 1.3009708737864079e-05, | |
| "loss": 0.0962, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.881095890410959, | |
| "grad_norm": 0.30275803804397583, | |
| "learning_rate": 1.2524271844660193e-05, | |
| "loss": 0.098, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.924931506849315, | |
| "grad_norm": 0.2706526815891266, | |
| "learning_rate": 1.2038834951456311e-05, | |
| "loss": 0.093, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.9687671232876713, | |
| "grad_norm": 0.30319514870643616, | |
| "learning_rate": 1.1553398058252427e-05, | |
| "loss": 0.0932, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.23950284719467163, | |
| "eval_runtime": 65.404, | |
| "eval_samples_per_second": 18.867, | |
| "eval_steps_per_second": 9.434, | |
| "step": 458 | |
| }, | |
| { | |
| "epoch": 2.0087671232876714, | |
| "grad_norm": 0.3190622627735138, | |
| "learning_rate": 1.1067961165048544e-05, | |
| "loss": 0.0796, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.0526027397260274, | |
| "grad_norm": 0.20361511409282684, | |
| "learning_rate": 1.058252427184466e-05, | |
| "loss": 0.0852, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.0964383561643833, | |
| "grad_norm": 0.29404351115226746, | |
| "learning_rate": 1.0097087378640776e-05, | |
| "loss": 0.0745, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.1402739726027398, | |
| "grad_norm": 0.2433903068304062, | |
| "learning_rate": 9.611650485436894e-06, | |
| "loss": 0.0793, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 2.1841095890410958, | |
| "grad_norm": 0.31765592098236084, | |
| "learning_rate": 9.12621359223301e-06, | |
| "loss": 0.086, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.227945205479452, | |
| "grad_norm": 0.28951069712638855, | |
| "learning_rate": 8.640776699029127e-06, | |
| "loss": 0.0814, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.271780821917808, | |
| "grad_norm": 0.3745494782924652, | |
| "learning_rate": 8.155339805825243e-06, | |
| "loss": 0.0856, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.315616438356164, | |
| "grad_norm": 0.2525760531425476, | |
| "learning_rate": 7.66990291262136e-06, | |
| "loss": 0.078, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 2.3594520547945206, | |
| "grad_norm": 0.2950601279735565, | |
| "learning_rate": 7.1844660194174755e-06, | |
| "loss": 0.0758, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.4032876712328766, | |
| "grad_norm": 0.23691079020500183, | |
| "learning_rate": 6.699029126213593e-06, | |
| "loss": 0.0706, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.447123287671233, | |
| "grad_norm": 0.2603223919868469, | |
| "learning_rate": 6.213592233009709e-06, | |
| "loss": 0.0711, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 2.490958904109589, | |
| "grad_norm": 0.2159614861011505, | |
| "learning_rate": 5.728155339805826e-06, | |
| "loss": 0.075, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 2.534794520547945, | |
| "grad_norm": 0.23896284401416779, | |
| "learning_rate": 5.242718446601942e-06, | |
| "loss": 0.0824, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 2.5786301369863014, | |
| "grad_norm": 0.2901216149330139, | |
| "learning_rate": 4.7572815533980585e-06, | |
| "loss": 0.0903, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 2.6224657534246574, | |
| "grad_norm": 0.29583826661109924, | |
| "learning_rate": 4.271844660194175e-06, | |
| "loss": 0.0796, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 2.666301369863014, | |
| "grad_norm": 0.3026266098022461, | |
| "learning_rate": 3.7864077669902915e-06, | |
| "loss": 0.0733, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 2.71013698630137, | |
| "grad_norm": 0.2562008202075958, | |
| "learning_rate": 3.3009708737864078e-06, | |
| "loss": 0.0797, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 2.7539726027397258, | |
| "grad_norm": 0.30706337094306946, | |
| "learning_rate": 2.815533980582524e-06, | |
| "loss": 0.0755, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 2.797808219178082, | |
| "grad_norm": 0.32250040769577026, | |
| "learning_rate": 2.3300970873786407e-06, | |
| "loss": 0.0717, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 2.8416438356164386, | |
| "grad_norm": 0.301578164100647, | |
| "learning_rate": 1.8446601941747572e-06, | |
| "loss": 0.0663, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.8854794520547946, | |
| "grad_norm": 0.26296401023864746, | |
| "learning_rate": 1.359223300970874e-06, | |
| "loss": 0.0817, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.9293150684931506, | |
| "grad_norm": 0.32804426550865173, | |
| "learning_rate": 8.737864077669904e-07, | |
| "loss": 0.0778, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.9731506849315066, | |
| "grad_norm": 0.3096996247768402, | |
| "learning_rate": 3.883495145631068e-07, | |
| "loss": 0.0776, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.2448684424161911, | |
| "eval_runtime": 65.3239, | |
| "eval_samples_per_second": 18.89, | |
| "eval_steps_per_second": 9.445, | |
| "step": 687 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 687, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.379988366727496e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |