| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.9991537376586743, |
| "eval_steps": 500, |
| "global_step": 1329, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.022566995768688293, |
| "grad_norm": 1.1215263297319142, |
| "learning_rate": 5e-06, |
| "loss": 0.7439, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.045133991537376586, |
| "grad_norm": 0.9589553037421321, |
| "learning_rate": 5e-06, |
| "loss": 0.6805, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.06770098730606489, |
| "grad_norm": 0.6978000561973827, |
| "learning_rate": 5e-06, |
| "loss": 0.6629, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.09026798307475317, |
| "grad_norm": 0.6895643857510211, |
| "learning_rate": 5e-06, |
| "loss": 0.6405, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.11283497884344147, |
| "grad_norm": 0.669717971503289, |
| "learning_rate": 5e-06, |
| "loss": 0.634, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.13540197461212977, |
| "grad_norm": 0.7332747657746563, |
| "learning_rate": 5e-06, |
| "loss": 0.6333, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.15796897038081806, |
| "grad_norm": 0.6254612146946901, |
| "learning_rate": 5e-06, |
| "loss": 0.629, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.18053596614950634, |
| "grad_norm": 0.6162920370865537, |
| "learning_rate": 5e-06, |
| "loss": 0.6305, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.20310296191819463, |
| "grad_norm": 0.7199925252344376, |
| "learning_rate": 5e-06, |
| "loss": 0.6283, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.22566995768688294, |
| "grad_norm": 0.606363673682039, |
| "learning_rate": 5e-06, |
| "loss": 0.6273, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.24823695345557123, |
| "grad_norm": 0.6169229815124295, |
| "learning_rate": 5e-06, |
| "loss": 0.6182, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.27080394922425954, |
| "grad_norm": 0.663355682543403, |
| "learning_rate": 5e-06, |
| "loss": 0.6173, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.2933709449929478, |
| "grad_norm": 0.9594220268680589, |
| "learning_rate": 5e-06, |
| "loss": 0.6186, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.3159379407616361, |
| "grad_norm": 0.5976872854174679, |
| "learning_rate": 5e-06, |
| "loss": 0.6136, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.3385049365303244, |
| "grad_norm": 0.5740168215963144, |
| "learning_rate": 5e-06, |
| "loss": 0.6141, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.3610719322990127, |
| "grad_norm": 0.5971637732871506, |
| "learning_rate": 5e-06, |
| "loss": 0.6159, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.383638928067701, |
| "grad_norm": 0.6411106529565855, |
| "learning_rate": 5e-06, |
| "loss": 0.6119, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.40620592383638926, |
| "grad_norm": 0.5958283513824145, |
| "learning_rate": 5e-06, |
| "loss": 0.6087, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.4287729196050776, |
| "grad_norm": 0.6255247024653819, |
| "learning_rate": 5e-06, |
| "loss": 0.6032, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.4513399153737659, |
| "grad_norm": 0.6470955847975602, |
| "learning_rate": 5e-06, |
| "loss": 0.6099, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.47390691114245415, |
| "grad_norm": 0.5529431973312159, |
| "learning_rate": 5e-06, |
| "loss": 0.6063, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.49647390691114246, |
| "grad_norm": 0.6004350981188518, |
| "learning_rate": 5e-06, |
| "loss": 0.598, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.5190409026798307, |
| "grad_norm": 0.6361892647480081, |
| "learning_rate": 5e-06, |
| "loss": 0.607, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.5416078984485191, |
| "grad_norm": 0.6773504924409298, |
| "learning_rate": 5e-06, |
| "loss": 0.6057, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.5641748942172073, |
| "grad_norm": 0.5876189539321539, |
| "learning_rate": 5e-06, |
| "loss": 0.5999, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.5867418899858956, |
| "grad_norm": 0.731635941799137, |
| "learning_rate": 5e-06, |
| "loss": 0.6025, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.609308885754584, |
| "grad_norm": 0.618293220922638, |
| "learning_rate": 5e-06, |
| "loss": 0.598, |
| "step": 270 |
| }, |
| { |
| "epoch": 0.6318758815232722, |
| "grad_norm": 0.5701192779576828, |
| "learning_rate": 5e-06, |
| "loss": 0.6046, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.6544428772919605, |
| "grad_norm": 0.6190773600106566, |
| "learning_rate": 5e-06, |
| "loss": 0.598, |
| "step": 290 |
| }, |
| { |
| "epoch": 0.6770098730606487, |
| "grad_norm": 0.6946749076903362, |
| "learning_rate": 5e-06, |
| "loss": 0.6041, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.6995768688293371, |
| "grad_norm": 0.5584517869895729, |
| "learning_rate": 5e-06, |
| "loss": 0.5974, |
| "step": 310 |
| }, |
| { |
| "epoch": 0.7221438645980254, |
| "grad_norm": 0.534959469393443, |
| "learning_rate": 5e-06, |
| "loss": 0.6006, |
| "step": 320 |
| }, |
| { |
| "epoch": 0.7447108603667136, |
| "grad_norm": 0.5613326275534066, |
| "learning_rate": 5e-06, |
| "loss": 0.5963, |
| "step": 330 |
| }, |
| { |
| "epoch": 0.767277856135402, |
| "grad_norm": 0.6183949473058837, |
| "learning_rate": 5e-06, |
| "loss": 0.5957, |
| "step": 340 |
| }, |
| { |
| "epoch": 0.7898448519040903, |
| "grad_norm": 0.8087764124934571, |
| "learning_rate": 5e-06, |
| "loss": 0.5973, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.8124118476727785, |
| "grad_norm": 0.6363358932507842, |
| "learning_rate": 5e-06, |
| "loss": 0.5962, |
| "step": 360 |
| }, |
| { |
| "epoch": 0.8349788434414669, |
| "grad_norm": 0.5830534924641125, |
| "learning_rate": 5e-06, |
| "loss": 0.6032, |
| "step": 370 |
| }, |
| { |
| "epoch": 0.8575458392101551, |
| "grad_norm": 0.5633036138599161, |
| "learning_rate": 5e-06, |
| "loss": 0.6, |
| "step": 380 |
| }, |
| { |
| "epoch": 0.8801128349788434, |
| "grad_norm": 0.6662273097138371, |
| "learning_rate": 5e-06, |
| "loss": 0.594, |
| "step": 390 |
| }, |
| { |
| "epoch": 0.9026798307475318, |
| "grad_norm": 0.6082363867875173, |
| "learning_rate": 5e-06, |
| "loss": 0.5962, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.92524682651622, |
| "grad_norm": 0.5951090822435101, |
| "learning_rate": 5e-06, |
| "loss": 0.5943, |
| "step": 410 |
| }, |
| { |
| "epoch": 0.9478138222849083, |
| "grad_norm": 0.5997768555467595, |
| "learning_rate": 5e-06, |
| "loss": 0.5948, |
| "step": 420 |
| }, |
| { |
| "epoch": 0.9703808180535967, |
| "grad_norm": 0.5805468408554871, |
| "learning_rate": 5e-06, |
| "loss": 0.5888, |
| "step": 430 |
| }, |
| { |
| "epoch": 0.9929478138222849, |
| "grad_norm": 0.8068596224773672, |
| "learning_rate": 5e-06, |
| "loss": 0.5936, |
| "step": 440 |
| }, |
| { |
| "epoch": 0.9997179125528914, |
| "eval_loss": 0.5915570259094238, |
| "eval_runtime": 687.3731, |
| "eval_samples_per_second": 17.372, |
| "eval_steps_per_second": 0.544, |
| "step": 443 |
| }, |
| { |
| "epoch": 1.0155148095909732, |
| "grad_norm": 0.7255717905102692, |
| "learning_rate": 5e-06, |
| "loss": 0.6013, |
| "step": 450 |
| }, |
| { |
| "epoch": 1.0380818053596614, |
| "grad_norm": 0.607435229031035, |
| "learning_rate": 5e-06, |
| "loss": 0.5285, |
| "step": 460 |
| }, |
| { |
| "epoch": 1.0606488011283497, |
| "grad_norm": 0.5833450634240624, |
| "learning_rate": 5e-06, |
| "loss": 0.5432, |
| "step": 470 |
| }, |
| { |
| "epoch": 1.0832157968970382, |
| "grad_norm": 0.6392741593507001, |
| "learning_rate": 5e-06, |
| "loss": 0.5313, |
| "step": 480 |
| }, |
| { |
| "epoch": 1.1057827926657264, |
| "grad_norm": 0.6194306283942889, |
| "learning_rate": 5e-06, |
| "loss": 0.5414, |
| "step": 490 |
| }, |
| { |
| "epoch": 1.1283497884344147, |
| "grad_norm": 0.6034773846054932, |
| "learning_rate": 5e-06, |
| "loss": 0.5401, |
| "step": 500 |
| }, |
| { |
| "epoch": 1.150916784203103, |
| "grad_norm": 0.6074474571234163, |
| "learning_rate": 5e-06, |
| "loss": 0.5411, |
| "step": 510 |
| }, |
| { |
| "epoch": 1.1734837799717912, |
| "grad_norm": 0.5928369457134515, |
| "learning_rate": 5e-06, |
| "loss": 0.5338, |
| "step": 520 |
| }, |
| { |
| "epoch": 1.1960507757404795, |
| "grad_norm": 0.6968967341510017, |
| "learning_rate": 5e-06, |
| "loss": 0.54, |
| "step": 530 |
| }, |
| { |
| "epoch": 1.2186177715091677, |
| "grad_norm": 0.5950609187883755, |
| "learning_rate": 5e-06, |
| "loss": 0.5373, |
| "step": 540 |
| }, |
| { |
| "epoch": 1.2411847672778562, |
| "grad_norm": 0.6590255759384651, |
| "learning_rate": 5e-06, |
| "loss": 0.5421, |
| "step": 550 |
| }, |
| { |
| "epoch": 1.2637517630465445, |
| "grad_norm": 0.6033577729369476, |
| "learning_rate": 5e-06, |
| "loss": 0.544, |
| "step": 560 |
| }, |
| { |
| "epoch": 1.2863187588152327, |
| "grad_norm": 0.5806991373772136, |
| "learning_rate": 5e-06, |
| "loss": 0.5411, |
| "step": 570 |
| }, |
| { |
| "epoch": 1.308885754583921, |
| "grad_norm": 0.5541246966391852, |
| "learning_rate": 5e-06, |
| "loss": 0.541, |
| "step": 580 |
| }, |
| { |
| "epoch": 1.3314527503526092, |
| "grad_norm": 0.5559629537616659, |
| "learning_rate": 5e-06, |
| "loss": 0.541, |
| "step": 590 |
| }, |
| { |
| "epoch": 1.3540197461212977, |
| "grad_norm": 0.600346734227381, |
| "learning_rate": 5e-06, |
| "loss": 0.5464, |
| "step": 600 |
| }, |
| { |
| "epoch": 1.376586741889986, |
| "grad_norm": 0.5483957306260462, |
| "learning_rate": 5e-06, |
| "loss": 0.5391, |
| "step": 610 |
| }, |
| { |
| "epoch": 1.3991537376586742, |
| "grad_norm": 0.5891511592807355, |
| "learning_rate": 5e-06, |
| "loss": 0.5433, |
| "step": 620 |
| }, |
| { |
| "epoch": 1.4217207334273625, |
| "grad_norm": 0.5757262290185178, |
| "learning_rate": 5e-06, |
| "loss": 0.5414, |
| "step": 630 |
| }, |
| { |
| "epoch": 1.4442877291960508, |
| "grad_norm": 0.7178524493309457, |
| "learning_rate": 5e-06, |
| "loss": 0.5411, |
| "step": 640 |
| }, |
| { |
| "epoch": 1.466854724964739, |
| "grad_norm": 0.5761738946478169, |
| "learning_rate": 5e-06, |
| "loss": 0.5419, |
| "step": 650 |
| }, |
| { |
| "epoch": 1.4894217207334273, |
| "grad_norm": 0.6803276810931657, |
| "learning_rate": 5e-06, |
| "loss": 0.5457, |
| "step": 660 |
| }, |
| { |
| "epoch": 1.5119887165021155, |
| "grad_norm": 0.5838483650093735, |
| "learning_rate": 5e-06, |
| "loss": 0.5404, |
| "step": 670 |
| }, |
| { |
| "epoch": 1.5345557122708038, |
| "grad_norm": 0.5500438005976995, |
| "learning_rate": 5e-06, |
| "loss": 0.5345, |
| "step": 680 |
| }, |
| { |
| "epoch": 1.5571227080394923, |
| "grad_norm": 0.5952196199343613, |
| "learning_rate": 5e-06, |
| "loss": 0.5367, |
| "step": 690 |
| }, |
| { |
| "epoch": 1.5796897038081805, |
| "grad_norm": 0.5859776058368286, |
| "learning_rate": 5e-06, |
| "loss": 0.5408, |
| "step": 700 |
| }, |
| { |
| "epoch": 1.6022566995768688, |
| "grad_norm": 0.569845009994629, |
| "learning_rate": 5e-06, |
| "loss": 0.5437, |
| "step": 710 |
| }, |
| { |
| "epoch": 1.6248236953455573, |
| "grad_norm": 0.5920715786393336, |
| "learning_rate": 5e-06, |
| "loss": 0.5516, |
| "step": 720 |
| }, |
| { |
| "epoch": 1.6473906911142455, |
| "grad_norm": 0.5818635487703717, |
| "learning_rate": 5e-06, |
| "loss": 0.5349, |
| "step": 730 |
| }, |
| { |
| "epoch": 1.6699576868829338, |
| "grad_norm": 0.6762439048089558, |
| "learning_rate": 5e-06, |
| "loss": 0.5508, |
| "step": 740 |
| }, |
| { |
| "epoch": 1.692524682651622, |
| "grad_norm": 0.6486808150070261, |
| "learning_rate": 5e-06, |
| "loss": 0.5422, |
| "step": 750 |
| }, |
| { |
| "epoch": 1.7150916784203103, |
| "grad_norm": 0.6023029863507834, |
| "learning_rate": 5e-06, |
| "loss": 0.5411, |
| "step": 760 |
| }, |
| { |
| "epoch": 1.7376586741889986, |
| "grad_norm": 0.6694029375421383, |
| "learning_rate": 5e-06, |
| "loss": 0.5418, |
| "step": 770 |
| }, |
| { |
| "epoch": 1.7602256699576868, |
| "grad_norm": 0.5605024046242828, |
| "learning_rate": 5e-06, |
| "loss": 0.5376, |
| "step": 780 |
| }, |
| { |
| "epoch": 1.782792665726375, |
| "grad_norm": 0.5835110842958704, |
| "learning_rate": 5e-06, |
| "loss": 0.5411, |
| "step": 790 |
| }, |
| { |
| "epoch": 1.8053596614950633, |
| "grad_norm": 0.5630352960782238, |
| "learning_rate": 5e-06, |
| "loss": 0.5362, |
| "step": 800 |
| }, |
| { |
| "epoch": 1.8279266572637518, |
| "grad_norm": 0.7125043555571241, |
| "learning_rate": 5e-06, |
| "loss": 0.5469, |
| "step": 810 |
| }, |
| { |
| "epoch": 1.85049365303244, |
| "grad_norm": 0.6814670972816546, |
| "learning_rate": 5e-06, |
| "loss": 0.5341, |
| "step": 820 |
| }, |
| { |
| "epoch": 1.8730606488011283, |
| "grad_norm": 0.5702489037510262, |
| "learning_rate": 5e-06, |
| "loss": 0.5462, |
| "step": 830 |
| }, |
| { |
| "epoch": 1.8956276445698168, |
| "grad_norm": 0.6424410665396532, |
| "learning_rate": 5e-06, |
| "loss": 0.5462, |
| "step": 840 |
| }, |
| { |
| "epoch": 1.918194640338505, |
| "grad_norm": 0.5606371489617065, |
| "learning_rate": 5e-06, |
| "loss": 0.5341, |
| "step": 850 |
| }, |
| { |
| "epoch": 1.9407616361071933, |
| "grad_norm": 0.6181395493988907, |
| "learning_rate": 5e-06, |
| "loss": 0.5391, |
| "step": 860 |
| }, |
| { |
| "epoch": 1.9633286318758816, |
| "grad_norm": 0.6456199732402441, |
| "learning_rate": 5e-06, |
| "loss": 0.5455, |
| "step": 870 |
| }, |
| { |
| "epoch": 1.9858956276445698, |
| "grad_norm": 0.6608281337723696, |
| "learning_rate": 5e-06, |
| "loss": 0.5438, |
| "step": 880 |
| }, |
| { |
| "epoch": 1.9994358251057829, |
| "eval_loss": 0.5871796011924744, |
| "eval_runtime": 691.7834, |
| "eval_samples_per_second": 17.261, |
| "eval_steps_per_second": 0.541, |
| "step": 886 |
| }, |
| { |
| "epoch": 2.008462623413258, |
| "grad_norm": 0.9883175725757251, |
| "learning_rate": 5e-06, |
| "loss": 0.5674, |
| "step": 890 |
| }, |
| { |
| "epoch": 2.0310296191819464, |
| "grad_norm": 0.774753044507208, |
| "learning_rate": 5e-06, |
| "loss": 0.4789, |
| "step": 900 |
| }, |
| { |
| "epoch": 2.0535966149506346, |
| "grad_norm": 0.6720101690425048, |
| "learning_rate": 5e-06, |
| "loss": 0.4806, |
| "step": 910 |
| }, |
| { |
| "epoch": 2.076163610719323, |
| "grad_norm": 0.6294466623666397, |
| "learning_rate": 5e-06, |
| "loss": 0.4731, |
| "step": 920 |
| }, |
| { |
| "epoch": 2.098730606488011, |
| "grad_norm": 0.5951394939916556, |
| "learning_rate": 5e-06, |
| "loss": 0.4804, |
| "step": 930 |
| }, |
| { |
| "epoch": 2.1212976022566994, |
| "grad_norm": 0.6064099677709555, |
| "learning_rate": 5e-06, |
| "loss": 0.4845, |
| "step": 940 |
| }, |
| { |
| "epoch": 2.143864598025388, |
| "grad_norm": 0.5986475188840071, |
| "learning_rate": 5e-06, |
| "loss": 0.4861, |
| "step": 950 |
| }, |
| { |
| "epoch": 2.1664315937940763, |
| "grad_norm": 0.6108189065348997, |
| "learning_rate": 5e-06, |
| "loss": 0.4824, |
| "step": 960 |
| }, |
| { |
| "epoch": 2.1889985895627646, |
| "grad_norm": 0.7217555959499887, |
| "learning_rate": 5e-06, |
| "loss": 0.4804, |
| "step": 970 |
| }, |
| { |
| "epoch": 2.211565585331453, |
| "grad_norm": 0.6115487837899767, |
| "learning_rate": 5e-06, |
| "loss": 0.4754, |
| "step": 980 |
| }, |
| { |
| "epoch": 2.234132581100141, |
| "grad_norm": 0.5705694723061099, |
| "learning_rate": 5e-06, |
| "loss": 0.4753, |
| "step": 990 |
| }, |
| { |
| "epoch": 2.2566995768688294, |
| "grad_norm": 0.5892148914995478, |
| "learning_rate": 5e-06, |
| "loss": 0.4843, |
| "step": 1000 |
| }, |
| { |
| "epoch": 2.2792665726375176, |
| "grad_norm": 0.6030322855368557, |
| "learning_rate": 5e-06, |
| "loss": 0.4905, |
| "step": 1010 |
| }, |
| { |
| "epoch": 2.301833568406206, |
| "grad_norm": 0.7155760122540393, |
| "learning_rate": 5e-06, |
| "loss": 0.4849, |
| "step": 1020 |
| }, |
| { |
| "epoch": 2.324400564174894, |
| "grad_norm": 0.6636802343164983, |
| "learning_rate": 5e-06, |
| "loss": 0.4892, |
| "step": 1030 |
| }, |
| { |
| "epoch": 2.3469675599435824, |
| "grad_norm": 0.6329844822710624, |
| "learning_rate": 5e-06, |
| "loss": 0.4871, |
| "step": 1040 |
| }, |
| { |
| "epoch": 2.3695345557122707, |
| "grad_norm": 0.6112942066957556, |
| "learning_rate": 5e-06, |
| "loss": 0.4848, |
| "step": 1050 |
| }, |
| { |
| "epoch": 2.392101551480959, |
| "grad_norm": 0.6366777503256337, |
| "learning_rate": 5e-06, |
| "loss": 0.4841, |
| "step": 1060 |
| }, |
| { |
| "epoch": 2.414668547249647, |
| "grad_norm": 0.7138652472801592, |
| "learning_rate": 5e-06, |
| "loss": 0.4851, |
| "step": 1070 |
| }, |
| { |
| "epoch": 2.4372355430183354, |
| "grad_norm": 0.70715248859997, |
| "learning_rate": 5e-06, |
| "loss": 0.4884, |
| "step": 1080 |
| }, |
| { |
| "epoch": 2.459802538787024, |
| "grad_norm": 0.623941050552, |
| "learning_rate": 5e-06, |
| "loss": 0.4866, |
| "step": 1090 |
| }, |
| { |
| "epoch": 2.4823695345557124, |
| "grad_norm": 0.6000021140772213, |
| "learning_rate": 5e-06, |
| "loss": 0.4898, |
| "step": 1100 |
| }, |
| { |
| "epoch": 2.5049365303244007, |
| "grad_norm": 0.591547311727817, |
| "learning_rate": 5e-06, |
| "loss": 0.4929, |
| "step": 1110 |
| }, |
| { |
| "epoch": 2.527503526093089, |
| "grad_norm": 0.6278175151314173, |
| "learning_rate": 5e-06, |
| "loss": 0.4838, |
| "step": 1120 |
| }, |
| { |
| "epoch": 2.550070521861777, |
| "grad_norm": 0.5894451341018947, |
| "learning_rate": 5e-06, |
| "loss": 0.4887, |
| "step": 1130 |
| }, |
| { |
| "epoch": 2.5726375176304654, |
| "grad_norm": 0.5747688518861067, |
| "learning_rate": 5e-06, |
| "loss": 0.4867, |
| "step": 1140 |
| }, |
| { |
| "epoch": 2.5952045133991537, |
| "grad_norm": 0.6773167295421868, |
| "learning_rate": 5e-06, |
| "loss": 0.4877, |
| "step": 1150 |
| }, |
| { |
| "epoch": 2.617771509167842, |
| "grad_norm": 0.6121425734282618, |
| "learning_rate": 5e-06, |
| "loss": 0.4912, |
| "step": 1160 |
| }, |
| { |
| "epoch": 2.64033850493653, |
| "grad_norm": 0.6202468464637775, |
| "learning_rate": 5e-06, |
| "loss": 0.4804, |
| "step": 1170 |
| }, |
| { |
| "epoch": 2.6629055007052185, |
| "grad_norm": 0.6111934336793183, |
| "learning_rate": 5e-06, |
| "loss": 0.491, |
| "step": 1180 |
| }, |
| { |
| "epoch": 2.685472496473907, |
| "grad_norm": 0.5657449063033765, |
| "learning_rate": 5e-06, |
| "loss": 0.4918, |
| "step": 1190 |
| }, |
| { |
| "epoch": 2.7080394922425954, |
| "grad_norm": 0.633560090737014, |
| "learning_rate": 5e-06, |
| "loss": 0.4903, |
| "step": 1200 |
| }, |
| { |
| "epoch": 2.7306064880112837, |
| "grad_norm": 0.5882995160849452, |
| "learning_rate": 5e-06, |
| "loss": 0.4915, |
| "step": 1210 |
| }, |
| { |
| "epoch": 2.753173483779972, |
| "grad_norm": 0.6060363006113761, |
| "learning_rate": 5e-06, |
| "loss": 0.4882, |
| "step": 1220 |
| }, |
| { |
| "epoch": 2.77574047954866, |
| "grad_norm": 0.5852375248151124, |
| "learning_rate": 5e-06, |
| "loss": 0.4947, |
| "step": 1230 |
| }, |
| { |
| "epoch": 2.7983074753173485, |
| "grad_norm": 0.6479541249622195, |
| "learning_rate": 5e-06, |
| "loss": 0.4932, |
| "step": 1240 |
| }, |
| { |
| "epoch": 2.8208744710860367, |
| "grad_norm": 0.5741821001673613, |
| "learning_rate": 5e-06, |
| "loss": 0.4933, |
| "step": 1250 |
| }, |
| { |
| "epoch": 2.843441466854725, |
| "grad_norm": 0.5741576880641838, |
| "learning_rate": 5e-06, |
| "loss": 0.4951, |
| "step": 1260 |
| }, |
| { |
| "epoch": 2.8660084626234132, |
| "grad_norm": 0.5896718404848079, |
| "learning_rate": 5e-06, |
| "loss": 0.492, |
| "step": 1270 |
| }, |
| { |
| "epoch": 2.8885754583921015, |
| "grad_norm": 0.6753471647954528, |
| "learning_rate": 5e-06, |
| "loss": 0.4903, |
| "step": 1280 |
| }, |
| { |
| "epoch": 2.9111424541607898, |
| "grad_norm": 0.6471438922038979, |
| "learning_rate": 5e-06, |
| "loss": 0.494, |
| "step": 1290 |
| }, |
| { |
| "epoch": 2.933709449929478, |
| "grad_norm": 0.6276890796491956, |
| "learning_rate": 5e-06, |
| "loss": 0.4896, |
| "step": 1300 |
| }, |
| { |
| "epoch": 2.9562764456981663, |
| "grad_norm": 0.6652837102432119, |
| "learning_rate": 5e-06, |
| "loss": 0.4933, |
| "step": 1310 |
| }, |
| { |
| "epoch": 2.9788434414668545, |
| "grad_norm": 0.5814130665393497, |
| "learning_rate": 5e-06, |
| "loss": 0.4898, |
| "step": 1320 |
| }, |
| { |
| "epoch": 2.9991537376586743, |
| "eval_loss": 0.6002799272537231, |
| "eval_runtime": 687.3631, |
| "eval_samples_per_second": 17.372, |
| "eval_steps_per_second": 0.544, |
| "step": 1329 |
| }, |
| { |
| "epoch": 2.9991537376586743, |
| "step": 1329, |
| "total_flos": 5064195066298368.0, |
| "train_loss": 0.547943446042396, |
| "train_runtime": 121073.4288, |
| "train_samples_per_second": 5.621, |
| "train_steps_per_second": 0.011 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 1329, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 5064195066298368.0, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|