| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 5.0, | |
| "eval_steps": 500, | |
| "global_step": 565, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 0.3933267489075661, | |
| "epoch": 0.08888888888888889, | |
| "grad_norm": 19.091772079467773, | |
| "learning_rate": 2e-05, | |
| "loss": 4.2829, | |
| "mean_token_accuracy": 0.5681655570864678, | |
| "num_tokens": 29409.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 0.7650148034095764, | |
| "epoch": 0.17777777777777778, | |
| "grad_norm": 3.8214948177337646, | |
| "learning_rate": 2e-05, | |
| "loss": 1.9461, | |
| "mean_token_accuracy": 0.6879387736320496, | |
| "num_tokens": 58827.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 1.1828202903270721, | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 2.6364810466766357, | |
| "learning_rate": 2e-05, | |
| "loss": 1.4227, | |
| "mean_token_accuracy": 0.7338245347142219, | |
| "num_tokens": 88299.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 1.134617891907692, | |
| "epoch": 0.35555555555555557, | |
| "grad_norm": 1.9795665740966797, | |
| "learning_rate": 2e-05, | |
| "loss": 1.1307, | |
| "mean_token_accuracy": 0.7887017637491226, | |
| "num_tokens": 117759.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.877768449485302, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 1.8397494554519653, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8535, | |
| "mean_token_accuracy": 0.8351033940911293, | |
| "num_tokens": 147140.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.587233804166317, | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 1.7626832723617554, | |
| "learning_rate": 2e-05, | |
| "loss": 0.5781, | |
| "mean_token_accuracy": 0.8860435307025909, | |
| "num_tokens": 176659.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.3405880033969879, | |
| "epoch": 0.6222222222222222, | |
| "grad_norm": 1.520534634590149, | |
| "learning_rate": 2e-05, | |
| "loss": 0.3419, | |
| "mean_token_accuracy": 0.9315642505884171, | |
| "num_tokens": 206147.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.19235755391418935, | |
| "epoch": 0.7111111111111111, | |
| "grad_norm": 1.268977403640747, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1858, | |
| "mean_token_accuracy": 0.9728681713342666, | |
| "num_tokens": 235603.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.11804858762770891, | |
| "epoch": 0.8, | |
| "grad_norm": 0.781975269317627, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1064, | |
| "mean_token_accuracy": 0.9887924045324326, | |
| "num_tokens": 265047.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.09983876422047615, | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.4874080419540405, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0815, | |
| "mean_token_accuracy": 0.9895498856902123, | |
| "num_tokens": 294524.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.08479245882481337, | |
| "epoch": 0.9777777777777777, | |
| "grad_norm": 0.3734425902366638, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0768, | |
| "mean_token_accuracy": 0.9904760375618935, | |
| "num_tokens": 324032.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.07308715611304108, | |
| "epoch": 1.0622222222222222, | |
| "grad_norm": 0.37292563915252686, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0646, | |
| "mean_token_accuracy": 0.9918417679636102, | |
| "num_tokens": 351993.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.06726833172142506, | |
| "epoch": 1.1511111111111112, | |
| "grad_norm": 0.5837728977203369, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0625, | |
| "mean_token_accuracy": 0.9917700842022896, | |
| "num_tokens": 381506.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.06390567375347019, | |
| "epoch": 1.24, | |
| "grad_norm": 0.3549947738647461, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0606, | |
| "mean_token_accuracy": 0.9916005581617355, | |
| "num_tokens": 411017.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.06265801694244147, | |
| "epoch": 1.3288888888888888, | |
| "grad_norm": 0.2998465597629547, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0564, | |
| "mean_token_accuracy": 0.9918477952480316, | |
| "num_tokens": 440460.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.060036468971520665, | |
| "epoch": 1.4177777777777778, | |
| "grad_norm": 0.5888973474502563, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0557, | |
| "mean_token_accuracy": 0.9919296875596046, | |
| "num_tokens": 469970.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.05825678938999772, | |
| "epoch": 1.5066666666666668, | |
| "grad_norm": 0.27591630816459656, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0557, | |
| "mean_token_accuracy": 0.991797935962677, | |
| "num_tokens": 499471.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.058804288040846586, | |
| "epoch": 1.5955555555555554, | |
| "grad_norm": 0.3655984401702881, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0537, | |
| "mean_token_accuracy": 0.9921618834137916, | |
| "num_tokens": 528959.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.05707454737275839, | |
| "epoch": 1.6844444444444444, | |
| "grad_norm": 0.3361314535140991, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0514, | |
| "mean_token_accuracy": 0.9922538578510285, | |
| "num_tokens": 558384.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.05885109649971128, | |
| "epoch": 1.7733333333333334, | |
| "grad_norm": 0.2926768958568573, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0565, | |
| "mean_token_accuracy": 0.9916261032223701, | |
| "num_tokens": 587873.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.05613070921972394, | |
| "epoch": 1.8622222222222222, | |
| "grad_norm": 0.2952570617198944, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0495, | |
| "mean_token_accuracy": 0.9927162423729896, | |
| "num_tokens": 617263.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.05452471813187003, | |
| "epoch": 1.951111111111111, | |
| "grad_norm": 0.2781422734260559, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0503, | |
| "mean_token_accuracy": 0.9923128932714462, | |
| "num_tokens": 646673.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 0.05329967241146063, | |
| "epoch": 2.0355555555555553, | |
| "grad_norm": 0.3413642942905426, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0495, | |
| "mean_token_accuracy": 0.9925352836910047, | |
| "num_tokens": 674550.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 0.055740222427994014, | |
| "epoch": 2.1244444444444444, | |
| "grad_norm": 0.3916967213153839, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0494, | |
| "mean_token_accuracy": 0.992403993010521, | |
| "num_tokens": 704049.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 0.05344773568212986, | |
| "epoch": 2.2133333333333334, | |
| "grad_norm": 0.41811126470565796, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0458, | |
| "mean_token_accuracy": 0.9928064867854118, | |
| "num_tokens": 733527.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.05095162307843566, | |
| "epoch": 2.3022222222222224, | |
| "grad_norm": 0.5080037117004395, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0462, | |
| "mean_token_accuracy": 0.9926309958100319, | |
| "num_tokens": 762980.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 0.05230198642238974, | |
| "epoch": 2.391111111111111, | |
| "grad_norm": 0.34135064482688904, | |
| "learning_rate": 2e-05, | |
| "loss": 0.047, | |
| "mean_token_accuracy": 0.9925719112157821, | |
| "num_tokens": 792457.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 0.053009994141757485, | |
| "epoch": 2.48, | |
| "grad_norm": 0.261165976524353, | |
| "learning_rate": 2e-05, | |
| "loss": 0.044, | |
| "mean_token_accuracy": 0.9927724987268448, | |
| "num_tokens": 821928.0, | |
| "step": 280 | |
| }, | |
| { | |
| "entropy": 0.052336765173822644, | |
| "epoch": 2.568888888888889, | |
| "grad_norm": 0.2925412356853485, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0459, | |
| "mean_token_accuracy": 0.9917180150747299, | |
| "num_tokens": 851523.0, | |
| "step": 290 | |
| }, | |
| { | |
| "entropy": 0.04876706637442112, | |
| "epoch": 2.6577777777777776, | |
| "grad_norm": 0.37404191493988037, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0406, | |
| "mean_token_accuracy": 0.9927994713187218, | |
| "num_tokens": 880960.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.051569243893027306, | |
| "epoch": 2.7466666666666666, | |
| "grad_norm": 0.33673104643821716, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0408, | |
| "mean_token_accuracy": 0.9931992888450623, | |
| "num_tokens": 910363.0, | |
| "step": 310 | |
| }, | |
| { | |
| "entropy": 0.049042111821472646, | |
| "epoch": 2.8355555555555556, | |
| "grad_norm": 0.3380878269672394, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0405, | |
| "mean_token_accuracy": 0.9932568341493606, | |
| "num_tokens": 939743.0, | |
| "step": 320 | |
| }, | |
| { | |
| "entropy": 0.04809595588594675, | |
| "epoch": 2.924444444444444, | |
| "grad_norm": 0.27717074751853943, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0401, | |
| "mean_token_accuracy": 0.9928347066044807, | |
| "num_tokens": 969188.0, | |
| "step": 330 | |
| }, | |
| { | |
| "entropy": 0.047154336873638, | |
| "epoch": 3.008888888888889, | |
| "grad_norm": 0.26735636591911316, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0388, | |
| "mean_token_accuracy": 0.9933515975349828, | |
| "num_tokens": 997173.0, | |
| "step": 340 | |
| }, | |
| { | |
| "entropy": 0.0476642238907516, | |
| "epoch": 3.097777777777778, | |
| "grad_norm": 0.3359168767929077, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0405, | |
| "mean_token_accuracy": 0.9925565898418427, | |
| "num_tokens": 1026726.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.04855702333152294, | |
| "epoch": 3.1866666666666665, | |
| "grad_norm": 0.29510918259620667, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0364, | |
| "mean_token_accuracy": 0.993024954199791, | |
| "num_tokens": 1056268.0, | |
| "step": 360 | |
| }, | |
| { | |
| "entropy": 0.044791849609464404, | |
| "epoch": 3.2755555555555556, | |
| "grad_norm": 0.27017977833747864, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0359, | |
| "mean_token_accuracy": 0.9939059600234031, | |
| "num_tokens": 1085652.0, | |
| "step": 370 | |
| }, | |
| { | |
| "entropy": 0.045806001592427495, | |
| "epoch": 3.3644444444444446, | |
| "grad_norm": 0.2858346700668335, | |
| "learning_rate": 2e-05, | |
| "loss": 0.036, | |
| "mean_token_accuracy": 0.9928093075752258, | |
| "num_tokens": 1115142.0, | |
| "step": 380 | |
| }, | |
| { | |
| "entropy": 0.04463256490416825, | |
| "epoch": 3.453333333333333, | |
| "grad_norm": 0.3520112931728363, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0362, | |
| "mean_token_accuracy": 0.9932136535644531, | |
| "num_tokens": 1144607.0, | |
| "step": 390 | |
| }, | |
| { | |
| "entropy": 0.04314003074541688, | |
| "epoch": 3.542222222222222, | |
| "grad_norm": 0.3692231774330139, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0353, | |
| "mean_token_accuracy": 0.9933312207460403, | |
| "num_tokens": 1173996.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.0454170742072165, | |
| "epoch": 3.631111111111111, | |
| "grad_norm": 0.23268748819828033, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0342, | |
| "mean_token_accuracy": 0.9934337288141251, | |
| "num_tokens": 1203400.0, | |
| "step": 410 | |
| }, | |
| { | |
| "entropy": 0.043088483065366744, | |
| "epoch": 3.7199999999999998, | |
| "grad_norm": 0.40086743235588074, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0363, | |
| "mean_token_accuracy": 0.9924963280558586, | |
| "num_tokens": 1232863.0, | |
| "step": 420 | |
| }, | |
| { | |
| "entropy": 0.042456195782870056, | |
| "epoch": 3.8088888888888888, | |
| "grad_norm": 0.30352523922920227, | |
| "learning_rate": 2e-05, | |
| "loss": 0.035, | |
| "mean_token_accuracy": 0.9930051982402801, | |
| "num_tokens": 1262316.0, | |
| "step": 430 | |
| }, | |
| { | |
| "entropy": 0.04498578486964107, | |
| "epoch": 3.897777777777778, | |
| "grad_norm": 0.2815539836883545, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0337, | |
| "mean_token_accuracy": 0.9933026045560837, | |
| "num_tokens": 1291738.0, | |
| "step": 440 | |
| }, | |
| { | |
| "entropy": 0.04180623982101679, | |
| "epoch": 3.986666666666667, | |
| "grad_norm": 0.4016154706478119, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0335, | |
| "mean_token_accuracy": 0.9940232038497925, | |
| "num_tokens": 1321198.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.040797956140809936, | |
| "epoch": 4.071111111111111, | |
| "grad_norm": 0.3967713415622711, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0303, | |
| "mean_token_accuracy": 0.9949048961463728, | |
| "num_tokens": 1349259.0, | |
| "step": 460 | |
| }, | |
| { | |
| "entropy": 0.041362932836636904, | |
| "epoch": 4.16, | |
| "grad_norm": 0.3272438943386078, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0313, | |
| "mean_token_accuracy": 0.9941667526960373, | |
| "num_tokens": 1378731.0, | |
| "step": 470 | |
| }, | |
| { | |
| "entropy": 0.042792328353971246, | |
| "epoch": 4.248888888888889, | |
| "grad_norm": 0.34763824939727783, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0321, | |
| "mean_token_accuracy": 0.9943796172738075, | |
| "num_tokens": 1408249.0, | |
| "step": 480 | |
| }, | |
| { | |
| "entropy": 0.042336183181032536, | |
| "epoch": 4.337777777777778, | |
| "grad_norm": 0.2802582383155823, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0317, | |
| "mean_token_accuracy": 0.9944384515285491, | |
| "num_tokens": 1437770.0, | |
| "step": 490 | |
| }, | |
| { | |
| "entropy": 0.04013953167013824, | |
| "epoch": 4.426666666666667, | |
| "grad_norm": 0.31569746136665344, | |
| "learning_rate": 2e-05, | |
| "loss": 0.028, | |
| "mean_token_accuracy": 0.9952725186944008, | |
| "num_tokens": 1467204.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.039097797218710185, | |
| "epoch": 4.515555555555555, | |
| "grad_norm": 0.33069005608558655, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0312, | |
| "mean_token_accuracy": 0.9946726128458977, | |
| "num_tokens": 1496671.0, | |
| "step": 510 | |
| }, | |
| { | |
| "entropy": 0.03892837380990386, | |
| "epoch": 4.604444444444445, | |
| "grad_norm": 0.24601057171821594, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0288, | |
| "mean_token_accuracy": 0.9946699410676956, | |
| "num_tokens": 1526129.0, | |
| "step": 520 | |
| }, | |
| { | |
| "entropy": 0.039517662627622484, | |
| "epoch": 4.693333333333333, | |
| "grad_norm": 0.2797907292842865, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0294, | |
| "mean_token_accuracy": 0.9946090206503868, | |
| "num_tokens": 1555591.0, | |
| "step": 530 | |
| }, | |
| { | |
| "entropy": 0.039668824058026075, | |
| "epoch": 4.782222222222222, | |
| "grad_norm": 0.2804340422153473, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0286, | |
| "mean_token_accuracy": 0.9947286590933799, | |
| "num_tokens": 1584969.0, | |
| "step": 540 | |
| }, | |
| { | |
| "entropy": 0.038332745106890796, | |
| "epoch": 4.871111111111111, | |
| "grad_norm": 0.22508393228054047, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0298, | |
| "mean_token_accuracy": 0.9944633066654205, | |
| "num_tokens": 1614402.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.03850612454116344, | |
| "epoch": 4.96, | |
| "grad_norm": 0.29442259669303894, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0263, | |
| "mean_token_accuracy": 0.9948647573590279, | |
| "num_tokens": 1643824.0, | |
| "step": 560 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 904, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1035206563987200.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |