| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.993050193050193, | |
| "eval_steps": 500, | |
| "global_step": 969, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03088803088803089, | |
| "grad_norm": 1.9920737405210878, | |
| "learning_rate": 5e-06, | |
| "loss": 0.8015, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.06177606177606178, | |
| "grad_norm": 2.6204521466523314, | |
| "learning_rate": 5e-06, | |
| "loss": 0.7199, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.09266409266409266, | |
| "grad_norm": 1.3723889792780992, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6903, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.12355212355212356, | |
| "grad_norm": 0.8864238330359895, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6793, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.15444015444015444, | |
| "grad_norm": 13.397151069895909, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6675, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.18532818532818532, | |
| "grad_norm": 2.1374873857216876, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6488, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.21621621621621623, | |
| "grad_norm": 0.7775938162830551, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6457, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.2471042471042471, | |
| "grad_norm": 0.8956285245200575, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6479, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.277992277992278, | |
| "grad_norm": 0.7136145785303137, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6365, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.3088803088803089, | |
| "grad_norm": 0.5386743245333256, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6366, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.33976833976833976, | |
| "grad_norm": 0.5331290951707172, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6305, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.37065637065637064, | |
| "grad_norm": 0.5589168290698243, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6305, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.4015444015444015, | |
| "grad_norm": 0.6427317993478433, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6221, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.43243243243243246, | |
| "grad_norm": 0.5345254165110678, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6251, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.46332046332046334, | |
| "grad_norm": 0.5523122802530471, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6183, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.4942084942084942, | |
| "grad_norm": 0.6345828990779662, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6211, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.525096525096525, | |
| "grad_norm": 0.5635690118220865, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6222, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.555984555984556, | |
| "grad_norm": 0.5674431409857721, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6121, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.5868725868725869, | |
| "grad_norm": 0.9846408481503562, | |
| "learning_rate": 5e-06, | |
| "loss": 0.627, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.6177606177606177, | |
| "grad_norm": 0.5946873633912808, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6171, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.6486486486486487, | |
| "grad_norm": 1.1118602099699486, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6244, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.6795366795366795, | |
| "grad_norm": 0.6560189254340667, | |
| "learning_rate": 5e-06, | |
| "loss": 0.615, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.7104247104247104, | |
| "grad_norm": 1.108952229503608, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6147, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.7413127413127413, | |
| "grad_norm": 0.6239111214970273, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6097, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.7722007722007722, | |
| "grad_norm": 1.0310044782453138, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6167, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.803088803088803, | |
| "grad_norm": 0.5083008941812818, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6088, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.833976833976834, | |
| "grad_norm": 0.5393819554919369, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6054, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.8648648648648649, | |
| "grad_norm": 0.501525790082182, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6095, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.8957528957528957, | |
| "grad_norm": 0.5189867584989447, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6199, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.9266409266409267, | |
| "grad_norm": 0.4557994704814137, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6008, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.9575289575289575, | |
| "grad_norm": 0.5100750571195128, | |
| "learning_rate": 5e-06, | |
| "loss": 0.6118, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.9884169884169884, | |
| "grad_norm": 0.5597550458388434, | |
| "learning_rate": 5e-06, | |
| "loss": 0.604, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.9976833976833976, | |
| "eval_loss": 0.6096363663673401, | |
| "eval_runtime": 174.3952, | |
| "eval_samples_per_second": 50.001, | |
| "eval_steps_per_second": 0.396, | |
| "step": 323 | |
| }, | |
| { | |
| "epoch": 1.0193050193050193, | |
| "grad_norm": 1.0059049758489014, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5788, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.05019305019305, | |
| "grad_norm": 0.8266294447843203, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5555, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.0810810810810811, | |
| "grad_norm": 0.6817740146761346, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5568, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.111969111969112, | |
| "grad_norm": 0.5212640526638012, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5711, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.1428571428571428, | |
| "grad_norm": 0.6082293357332792, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5631, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.1737451737451738, | |
| "grad_norm": 0.4984378073822691, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5597, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 1.2046332046332047, | |
| "grad_norm": 0.5400232508156533, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5563, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 1.2355212355212355, | |
| "grad_norm": 0.4863797369607136, | |
| "learning_rate": 5e-06, | |
| "loss": 0.563, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.2664092664092665, | |
| "grad_norm": 0.5275923409775821, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5625, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.2972972972972974, | |
| "grad_norm": 0.527704404092815, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5577, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.3281853281853282, | |
| "grad_norm": 0.6166415284725348, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5658, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.359073359073359, | |
| "grad_norm": 0.6544026128628749, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5544, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.3899613899613898, | |
| "grad_norm": 0.5151291141077943, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5497, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.420849420849421, | |
| "grad_norm": 0.5534465037275664, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5614, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.4517374517374517, | |
| "grad_norm": 0.5300403375821853, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5583, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.4826254826254825, | |
| "grad_norm": 0.7438189920291365, | |
| "learning_rate": 5e-06, | |
| "loss": 0.558, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.5135135135135136, | |
| "grad_norm": 0.531280818371624, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5629, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.5444015444015444, | |
| "grad_norm": 0.5680517069700968, | |
| "learning_rate": 5e-06, | |
| "loss": 0.557, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.5752895752895753, | |
| "grad_norm": 0.5612219075386209, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5653, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.606177606177606, | |
| "grad_norm": 0.649010796286653, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5553, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.637065637065637, | |
| "grad_norm": 0.5724021305893356, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5554, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.667953667953668, | |
| "grad_norm": 0.5266418342750984, | |
| "learning_rate": 5e-06, | |
| "loss": 0.556, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.698841698841699, | |
| "grad_norm": 0.4764947467562162, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5637, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.7297297297297298, | |
| "grad_norm": 0.49367950454054643, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5616, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.7606177606177607, | |
| "grad_norm": 0.48282246019994013, | |
| "learning_rate": 5e-06, | |
| "loss": 0.559, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.7915057915057915, | |
| "grad_norm": 0.47502284833211744, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5575, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.8223938223938223, | |
| "grad_norm": 0.45633738284447206, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5532, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.8532818532818531, | |
| "grad_norm": 0.5759433055884126, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5615, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.8841698841698842, | |
| "grad_norm": 0.5858108727658949, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5593, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.915057915057915, | |
| "grad_norm": 0.5395422279987274, | |
| "learning_rate": 5e-06, | |
| "loss": 0.566, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.945945945945946, | |
| "grad_norm": 0.544007523438176, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5546, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.9768339768339769, | |
| "grad_norm": 0.4756136721421921, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5577, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.9984555984555985, | |
| "eval_loss": 0.6016931533813477, | |
| "eval_runtime": 175.4937, | |
| "eval_samples_per_second": 49.688, | |
| "eval_steps_per_second": 0.393, | |
| "step": 647 | |
| }, | |
| { | |
| "epoch": 2.0077220077220077, | |
| "grad_norm": 0.9647134139465964, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5481, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 2.0386100386100385, | |
| "grad_norm": 0.6126014654142371, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5195, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 2.0694980694980694, | |
| "grad_norm": 0.642576302083791, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5154, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 2.1003861003861, | |
| "grad_norm": 0.6053831547491819, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5141, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 2.1312741312741315, | |
| "grad_norm": 0.570602965823511, | |
| "learning_rate": 5e-06, | |
| "loss": 0.4963, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 2.1621621621621623, | |
| "grad_norm": 0.5789932913521146, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5023, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 2.193050193050193, | |
| "grad_norm": 0.49902888045361504, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5053, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 2.223938223938224, | |
| "grad_norm": 0.5200818247457489, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5086, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 2.2548262548262548, | |
| "grad_norm": 0.5147752817966699, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5144, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 2.2857142857142856, | |
| "grad_norm": 0.5202904589332674, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5075, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 2.3166023166023164, | |
| "grad_norm": 0.6419271084211798, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5095, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 2.3474903474903477, | |
| "grad_norm": 0.48888244575595774, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5074, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 2.3783783783783785, | |
| "grad_norm": 0.4949992881722656, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5058, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 2.4092664092664093, | |
| "grad_norm": 0.6977527327479829, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5055, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 2.44015444015444, | |
| "grad_norm": 0.5281157241550238, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5101, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 2.471042471042471, | |
| "grad_norm": 0.5317168799948615, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5092, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.501930501930502, | |
| "grad_norm": 0.5665091196485048, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5123, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.532818532818533, | |
| "grad_norm": 0.5733443367920803, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5045, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.563706563706564, | |
| "grad_norm": 0.4895951656550531, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5245, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 2.5945945945945947, | |
| "grad_norm": 0.5509555644837258, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5074, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.6254826254826256, | |
| "grad_norm": 0.5242119378254794, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5102, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.6563706563706564, | |
| "grad_norm": 0.6137964577932209, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5066, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 2.687258687258687, | |
| "grad_norm": 0.517696061441138, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5156, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.718146718146718, | |
| "grad_norm": 0.5449458713675641, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5153, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.749034749034749, | |
| "grad_norm": 0.5127690582430875, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5121, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.7799227799227797, | |
| "grad_norm": 0.5989390375727383, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5149, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.810810810810811, | |
| "grad_norm": 0.5398603643342522, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5116, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.841698841698842, | |
| "grad_norm": 0.5257472179121192, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5154, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.8725868725868726, | |
| "grad_norm": 0.5919523831497919, | |
| "learning_rate": 5e-06, | |
| "loss": 0.516, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.9034749034749034, | |
| "grad_norm": 0.5425769518104601, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5114, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.9343629343629343, | |
| "grad_norm": 0.49233460424864756, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5126, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.965250965250965, | |
| "grad_norm": 0.5105330314981293, | |
| "learning_rate": 5e-06, | |
| "loss": 0.5096, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.993050193050193, | |
| "eval_loss": 0.6065478920936584, | |
| "eval_runtime": 175.4708, | |
| "eval_samples_per_second": 49.695, | |
| "eval_steps_per_second": 0.393, | |
| "step": 969 | |
| }, | |
| { | |
| "epoch": 2.993050193050193, | |
| "step": 969, | |
| "total_flos": 1622692331520000.0, | |
| "train_loss": 0.5687421354839061, | |
| "train_runtime": 29217.0698, | |
| "train_samples_per_second": 17.012, | |
| "train_steps_per_second": 0.033 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 969, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1622692331520000.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |