| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.9991537376586743, | |
| "eval_steps": 500, | |
| "global_step": 1329, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.022566995768688293, | |
| "grad_norm": 1.3541167390816127, | |
| "learning_rate": 1e-06, | |
| "loss": 0.7892, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.045133991537376586, | |
| "grad_norm": 0.7264231850744329, | |
| "learning_rate": 1e-06, | |
| "loss": 0.7186, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06770098730606489, | |
| "grad_norm": 0.6066684028794114, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6958, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09026798307475317, | |
| "grad_norm": 0.5922845047115146, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6703, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.11283497884344147, | |
| "grad_norm": 0.5655949106141192, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6624, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13540197461212977, | |
| "grad_norm": 0.6001006048051749, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6603, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15796897038081806, | |
| "grad_norm": 0.5943745260723494, | |
| "learning_rate": 1e-06, | |
| "loss": 0.655, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.18053596614950634, | |
| "grad_norm": 0.627834773775133, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6557, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.20310296191819463, | |
| "grad_norm": 0.5550793132057606, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6531, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22566995768688294, | |
| "grad_norm": 0.5689760442146818, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6513, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.24823695345557123, | |
| "grad_norm": 0.5623137575600806, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6417, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.27080394922425954, | |
| "grad_norm": 0.5486362710231304, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6406, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.2933709449929478, | |
| "grad_norm": 0.7239936549173934, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6415, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.3159379407616361, | |
| "grad_norm": 0.5461401863831158, | |
| "learning_rate": 1e-06, | |
| "loss": 0.636, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.3385049365303244, | |
| "grad_norm": 0.5483972404016155, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6361, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.3610719322990127, | |
| "grad_norm": 0.798239248846604, | |
| "learning_rate": 1e-06, | |
| "loss": 0.638, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.383638928067701, | |
| "grad_norm": 0.5775493583413234, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6337, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.40620592383638926, | |
| "grad_norm": 0.5471975796667967, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6308, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4287729196050776, | |
| "grad_norm": 0.5770116378368964, | |
| "learning_rate": 1e-06, | |
| "loss": 0.625, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.4513399153737659, | |
| "grad_norm": 0.6018358262436428, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6311, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.47390691114245415, | |
| "grad_norm": 0.6214575004609858, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6277, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.49647390691114246, | |
| "grad_norm": 0.5762569358894746, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6189, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5190409026798307, | |
| "grad_norm": 0.572204338525046, | |
| "learning_rate": 1e-06, | |
| "loss": 0.628, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5416078984485191, | |
| "grad_norm": 0.5406800730091907, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6264, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.5641748942172073, | |
| "grad_norm": 0.7049926618754724, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6202, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.5867418899858956, | |
| "grad_norm": 0.5706039865907114, | |
| "learning_rate": 1e-06, | |
| "loss": 0.623, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.609308885754584, | |
| "grad_norm": 0.6199169688295726, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6183, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6318758815232722, | |
| "grad_norm": 0.5856905093166911, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6251, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.6544428772919605, | |
| "grad_norm": 0.6047672043963276, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6184, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.6770098730606487, | |
| "grad_norm": 0.5860307869500213, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6243, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.6995768688293371, | |
| "grad_norm": 0.5557486139623895, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6172, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7221438645980254, | |
| "grad_norm": 0.5891234296869776, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6207, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.7447108603667136, | |
| "grad_norm": 0.547918374354159, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6162, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.767277856135402, | |
| "grad_norm": 0.5442402563667577, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6151, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.7898448519040903, | |
| "grad_norm": 0.5860646515907174, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6165, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8124118476727785, | |
| "grad_norm": 0.5535463576219704, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6156, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8349788434414669, | |
| "grad_norm": 0.5756281236150095, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6226, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.8575458392101551, | |
| "grad_norm": 0.5950482068214076, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6191, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.8801128349788434, | |
| "grad_norm": 0.6260982756368931, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6131, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9026798307475318, | |
| "grad_norm": 0.5364333350591519, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6154, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.92524682651622, | |
| "grad_norm": 0.6098043968571839, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6134, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.9478138222849083, | |
| "grad_norm": 0.6449811728681593, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6138, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.9703808180535967, | |
| "grad_norm": 0.5568535469034295, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6078, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.9929478138222849, | |
| "grad_norm": 0.6431203491501234, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6126, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.9997179125528914, | |
| "eval_loss": 0.6105000972747803, | |
| "eval_runtime": 685.4377, | |
| "eval_samples_per_second": 17.421, | |
| "eval_steps_per_second": 0.546, | |
| "step": 443 | |
| }, | |
| { | |
| "epoch": 1.0155148095909732, | |
| "grad_norm": 0.7779306881983701, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6478, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.0380818053596614, | |
| "grad_norm": 0.6328828703701227, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5849, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.0606488011283497, | |
| "grad_norm": 0.6082430510445967, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6009, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0832157968970382, | |
| "grad_norm": 0.602028057200349, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5871, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.1057827926657264, | |
| "grad_norm": 0.7408617800573869, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5984, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.1283497884344147, | |
| "grad_norm": 0.5611898278455757, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5964, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.150916784203103, | |
| "grad_norm": 0.5832472204993896, | |
| "learning_rate": 1e-06, | |
| "loss": 0.596, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.1734837799717912, | |
| "grad_norm": 0.5695284469211872, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5875, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.1960507757404795, | |
| "grad_norm": 0.6568577698478514, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5939, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.2186177715091677, | |
| "grad_norm": 0.586435498560335, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5906, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.2411847672778562, | |
| "grad_norm": 0.5855700220689813, | |
| "learning_rate": 1e-06, | |
| "loss": 0.595, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.2637517630465445, | |
| "grad_norm": 0.5614379605237669, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5965, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.2863187588152327, | |
| "grad_norm": 0.5641915546644509, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5932, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.308885754583921, | |
| "grad_norm": 0.5891648230252526, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5925, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.3314527503526092, | |
| "grad_norm": 0.6055365904668806, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5917, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.3540197461212977, | |
| "grad_norm": 0.5835116087715779, | |
| "learning_rate": 1e-06, | |
| "loss": 0.598, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.376586741889986, | |
| "grad_norm": 0.5717115602804459, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5894, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.3991537376586742, | |
| "grad_norm": 0.6550994090581721, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5932, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.4217207334273625, | |
| "grad_norm": 0.5652647115951249, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5909, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.4442877291960508, | |
| "grad_norm": 0.603846199030464, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5907, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.466854724964739, | |
| "grad_norm": 0.576216687167576, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5912, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.4894217207334273, | |
| "grad_norm": 0.5854798505920723, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5947, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.5119887165021155, | |
| "grad_norm": 0.6758805948706326, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5887, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.5345557122708038, | |
| "grad_norm": 0.5597355473960971, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5823, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.5571227080394923, | |
| "grad_norm": 0.5956678886860574, | |
| "learning_rate": 1e-06, | |
| "loss": 0.585, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.5796897038081805, | |
| "grad_norm": 0.5686506141405391, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5886, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.6022566995768688, | |
| "grad_norm": 0.5755832522278349, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5912, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.6248236953455573, | |
| "grad_norm": 0.6014689265229756, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5996, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.6473906911142455, | |
| "grad_norm": 0.5728757135668859, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5815, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.6699576868829338, | |
| "grad_norm": 0.5921114444919704, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5985, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.692524682651622, | |
| "grad_norm": 0.5806339365041464, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5891, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.7150916784203103, | |
| "grad_norm": 0.5685739123888505, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5874, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.7376586741889986, | |
| "grad_norm": 0.596569886181905, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5876, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.7602256699576868, | |
| "grad_norm": 0.5948382867064587, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5834, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.782792665726375, | |
| "grad_norm": 0.560419630409488, | |
| "learning_rate": 1e-06, | |
| "loss": 0.587, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.8053596614950633, | |
| "grad_norm": 0.6073231967396352, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5813, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.8279266572637518, | |
| "grad_norm": 0.5595516250387841, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5923, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.85049365303244, | |
| "grad_norm": 0.5733241612129921, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5789, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.8730606488011283, | |
| "grad_norm": 0.574054154592995, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5917, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.8956276445698168, | |
| "grad_norm": 0.5814856685071563, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5913, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.918194640338505, | |
| "grad_norm": 0.5895975578808779, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5781, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.9407616361071933, | |
| "grad_norm": 0.5589949521607741, | |
| "learning_rate": 1e-06, | |
| "loss": 0.583, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.9633286318758816, | |
| "grad_norm": 0.5843925173653625, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5899, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.9858956276445698, | |
| "grad_norm": 0.5728967924600331, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5879, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.9994358251057829, | |
| "eval_loss": 0.5974977016448975, | |
| "eval_runtime": 685.9643, | |
| "eval_samples_per_second": 17.408, | |
| "eval_steps_per_second": 0.545, | |
| "step": 886 | |
| }, | |
| { | |
| "epoch": 2.008462623413258, | |
| "grad_norm": 0.6457869644641364, | |
| "learning_rate": 1e-06, | |
| "loss": 0.6318, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.0310296191819464, | |
| "grad_norm": 0.5930954348218526, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5662, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.0535966149506346, | |
| "grad_norm": 0.6055346690385593, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5694, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.076163610719323, | |
| "grad_norm": 0.562520339476423, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5611, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.098730606488011, | |
| "grad_norm": 0.5561710805119879, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5685, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.1212976022566994, | |
| "grad_norm": 0.5979304183331132, | |
| "learning_rate": 1e-06, | |
| "loss": 0.572, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.143864598025388, | |
| "grad_norm": 0.6093159324869943, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5734, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.1664315937940763, | |
| "grad_norm": 0.6175829170597708, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5691, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.1889985895627646, | |
| "grad_norm": 0.6068960323831453, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5652, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.211565585331453, | |
| "grad_norm": 0.6177800363612478, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5591, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.234132581100141, | |
| "grad_norm": 0.6120518804072075, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5581, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.2566995768688294, | |
| "grad_norm": 0.6185740976783666, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5689, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.2792665726375176, | |
| "grad_norm": 0.6044905148971333, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5755, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.301833568406206, | |
| "grad_norm": 0.6787947041417691, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5678, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.324400564174894, | |
| "grad_norm": 0.6244782221013093, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5726, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.3469675599435824, | |
| "grad_norm": 0.5838463678550949, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5688, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.3695345557122707, | |
| "grad_norm": 0.5986795934805791, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5654, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.392101551480959, | |
| "grad_norm": 0.5491793607349916, | |
| "learning_rate": 1e-06, | |
| "loss": 0.565, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.414668547249647, | |
| "grad_norm": 0.6275876027256113, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5652, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.4372355430183354, | |
| "grad_norm": 0.5952317032256541, | |
| "learning_rate": 1e-06, | |
| "loss": 0.569, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.459802538787024, | |
| "grad_norm": 0.5915109016237521, | |
| "learning_rate": 1e-06, | |
| "loss": 0.567, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.4823695345557124, | |
| "grad_norm": 0.5905585385598298, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5693, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.5049365303244007, | |
| "grad_norm": 0.6031395826071679, | |
| "learning_rate": 1e-06, | |
| "loss": 0.573, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.527503526093089, | |
| "grad_norm": 0.5861509889782255, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5616, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.550070521861777, | |
| "grad_norm": 0.6059625495599589, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5672, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.5726375176304654, | |
| "grad_norm": 0.570836533993128, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5651, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.5952045133991537, | |
| "grad_norm": 0.5561735145605855, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5659, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.617771509167842, | |
| "grad_norm": 0.6334567898240941, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5696, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.64033850493653, | |
| "grad_norm": 0.5610258684119728, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5564, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.6629055007052185, | |
| "grad_norm": 0.6013204802258304, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5686, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.685472496473907, | |
| "grad_norm": 0.5914030753618424, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5691, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.7080394922425954, | |
| "grad_norm": 0.6080013945750986, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5673, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.7306064880112837, | |
| "grad_norm": 0.6801858228679418, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5684, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 2.753173483779972, | |
| "grad_norm": 0.6863898034527391, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5638, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 2.77574047954866, | |
| "grad_norm": 0.5906541395902623, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5713, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 2.7983074753173485, | |
| "grad_norm": 0.5644242194086181, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5694, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 2.8208744710860367, | |
| "grad_norm": 0.5714345353820881, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5689, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 2.843441466854725, | |
| "grad_norm": 0.5962882121230534, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5704, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 2.8660084626234132, | |
| "grad_norm": 0.5799929393173268, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5669, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 2.8885754583921015, | |
| "grad_norm": 0.5896163258532005, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5641, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 2.9111424541607898, | |
| "grad_norm": 0.5855508417080076, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5688, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 2.933709449929478, | |
| "grad_norm": 0.5566279321823554, | |
| "learning_rate": 1e-06, | |
| "loss": 0.563, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 2.9562764456981663, | |
| "grad_norm": 0.6400231422216401, | |
| "learning_rate": 1e-06, | |
| "loss": 0.5677, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 2.9788434414668545, | |
| "grad_norm": 0.6067593940730119, | |
| "learning_rate": 1e-06, | |
| "loss": 0.562, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 2.9991537376586743, | |
| "eval_loss": 0.5919322371482849, | |
| "eval_runtime": 686.7261, | |
| "eval_samples_per_second": 17.388, | |
| "eval_steps_per_second": 0.545, | |
| "step": 1329 | |
| }, | |
| { | |
| "epoch": 2.9991537376586743, | |
| "step": 1329, | |
| "total_flos": 5064195066298368.0, | |
| "train_loss": 0.5987174125252316, | |
| "train_runtime": 120974.831, | |
| "train_samples_per_second": 5.626, | |
| "train_steps_per_second": 0.011 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1329, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5064195066298368.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |