| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.9991537376586743, |
| "eval_steps": 500, |
| "global_step": 1329, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.022566995768688293, |
| "grad_norm": 1.0523082190358444, |
| "learning_rate": 5e-06, |
| "loss": 0.7445, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.045133991537376586, |
| "grad_norm": 0.7204722174858804, |
| "learning_rate": 5e-06, |
| "loss": 0.6818, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.06770098730606489, |
| "grad_norm": 0.6657659332077731, |
| "learning_rate": 5e-06, |
| "loss": 0.6643, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.09026798307475317, |
| "grad_norm": 0.6362305691978117, |
| "learning_rate": 5e-06, |
| "loss": 0.6416, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.11283497884344147, |
| "grad_norm": 0.6191338109521546, |
| "learning_rate": 5e-06, |
| "loss": 0.6346, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.13540197461212977, |
| "grad_norm": 0.5949181413120924, |
| "learning_rate": 5e-06, |
| "loss": 0.6336, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.15796897038081806, |
| "grad_norm": 0.7070668798593001, |
| "learning_rate": 5e-06, |
| "loss": 0.6292, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.18053596614950634, |
| "grad_norm": 0.7837686482047163, |
| "learning_rate": 5e-06, |
| "loss": 0.6309, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.20310296191819463, |
| "grad_norm": 0.6122731339113894, |
| "learning_rate": 5e-06, |
| "loss": 0.6282, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.22566995768688294, |
| "grad_norm": 0.6099360959628112, |
| "learning_rate": 5e-06, |
| "loss": 0.6272, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.24823695345557123, |
| "grad_norm": 0.5763419323558299, |
| "learning_rate": 5e-06, |
| "loss": 0.6181, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.27080394922425954, |
| "grad_norm": 0.5766708352017299, |
| "learning_rate": 5e-06, |
| "loss": 0.6174, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.2933709449929478, |
| "grad_norm": 0.5831982523299792, |
| "learning_rate": 5e-06, |
| "loss": 0.6183, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.3159379407616361, |
| "grad_norm": 0.5932634819616686, |
| "learning_rate": 5e-06, |
| "loss": 0.6134, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.3385049365303244, |
| "grad_norm": 0.6548735045583572, |
| "learning_rate": 5e-06, |
| "loss": 0.6139, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.3610719322990127, |
| "grad_norm": 0.6104064319299549, |
| "learning_rate": 5e-06, |
| "loss": 0.6158, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.383638928067701, |
| "grad_norm": 0.5773357723466442, |
| "learning_rate": 5e-06, |
| "loss": 0.6121, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.40620592383638926, |
| "grad_norm": 0.5642088593144782, |
| "learning_rate": 5e-06, |
| "loss": 0.609, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.4287729196050776, |
| "grad_norm": 0.7945916904161691, |
| "learning_rate": 5e-06, |
| "loss": 0.6033, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.4513399153737659, |
| "grad_norm": 0.5700649241835255, |
| "learning_rate": 5e-06, |
| "loss": 0.6099, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.47390691114245415, |
| "grad_norm": 0.5724023556811992, |
| "learning_rate": 5e-06, |
| "loss": 0.6064, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.49647390691114246, |
| "grad_norm": 0.578579492001445, |
| "learning_rate": 5e-06, |
| "loss": 0.5981, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.5190409026798307, |
| "grad_norm": 0.5735585228434464, |
| "learning_rate": 5e-06, |
| "loss": 0.6068, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.5416078984485191, |
| "grad_norm": 0.5945538392399885, |
| "learning_rate": 5e-06, |
| "loss": 0.6057, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.5641748942172073, |
| "grad_norm": 0.665968728641082, |
| "learning_rate": 5e-06, |
| "loss": 0.6001, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.5867418899858956, |
| "grad_norm": 0.6068621629992512, |
| "learning_rate": 5e-06, |
| "loss": 0.6026, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.609308885754584, |
| "grad_norm": 0.6016168018586718, |
| "learning_rate": 5e-06, |
| "loss": 0.5982, |
| "step": 270 |
| }, |
| { |
| "epoch": 0.6318758815232722, |
| "grad_norm": 0.5514878917702808, |
| "learning_rate": 5e-06, |
| "loss": 0.6047, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.6544428772919605, |
| "grad_norm": 0.5888116750405652, |
| "learning_rate": 5e-06, |
| "loss": 0.598, |
| "step": 290 |
| }, |
| { |
| "epoch": 0.6770098730606487, |
| "grad_norm": 0.6670509011955545, |
| "learning_rate": 5e-06, |
| "loss": 0.604, |
| "step": 300 |
| }, |
| { |
| "epoch": 0.6995768688293371, |
| "grad_norm": 0.56760502095263, |
| "learning_rate": 5e-06, |
| "loss": 0.5974, |
| "step": 310 |
| }, |
| { |
| "epoch": 0.7221438645980254, |
| "grad_norm": 0.5396300622828406, |
| "learning_rate": 5e-06, |
| "loss": 0.6007, |
| "step": 320 |
| }, |
| { |
| "epoch": 0.7447108603667136, |
| "grad_norm": 0.5248048407857459, |
| "learning_rate": 5e-06, |
| "loss": 0.5962, |
| "step": 330 |
| }, |
| { |
| "epoch": 0.767277856135402, |
| "grad_norm": 0.6373736145180807, |
| "learning_rate": 5e-06, |
| "loss": 0.5959, |
| "step": 340 |
| }, |
| { |
| "epoch": 0.7898448519040903, |
| "grad_norm": 0.6182646651584798, |
| "learning_rate": 5e-06, |
| "loss": 0.5971, |
| "step": 350 |
| }, |
| { |
| "epoch": 0.8124118476727785, |
| "grad_norm": 0.6062420620133867, |
| "learning_rate": 5e-06, |
| "loss": 0.5962, |
| "step": 360 |
| }, |
| { |
| "epoch": 0.8349788434414669, |
| "grad_norm": 0.6674005803692369, |
| "learning_rate": 5e-06, |
| "loss": 0.6032, |
| "step": 370 |
| }, |
| { |
| "epoch": 0.8575458392101551, |
| "grad_norm": 0.5702342610780495, |
| "learning_rate": 5e-06, |
| "loss": 0.5997, |
| "step": 380 |
| }, |
| { |
| "epoch": 0.8801128349788434, |
| "grad_norm": 0.63735621272806, |
| "learning_rate": 5e-06, |
| "loss": 0.5938, |
| "step": 390 |
| }, |
| { |
| "epoch": 0.9026798307475318, |
| "grad_norm": 0.6271890460812696, |
| "learning_rate": 5e-06, |
| "loss": 0.596, |
| "step": 400 |
| }, |
| { |
| "epoch": 0.92524682651622, |
| "grad_norm": 0.5969964634186198, |
| "learning_rate": 5e-06, |
| "loss": 0.594, |
| "step": 410 |
| }, |
| { |
| "epoch": 0.9478138222849083, |
| "grad_norm": 0.5864165897979446, |
| "learning_rate": 5e-06, |
| "loss": 0.5946, |
| "step": 420 |
| }, |
| { |
| "epoch": 0.9703808180535967, |
| "grad_norm": 0.541523736922195, |
| "learning_rate": 5e-06, |
| "loss": 0.5887, |
| "step": 430 |
| }, |
| { |
| "epoch": 0.9929478138222849, |
| "grad_norm": 0.5600449396151266, |
| "learning_rate": 5e-06, |
| "loss": 0.5933, |
| "step": 440 |
| }, |
| { |
| "epoch": 0.9997179125528914, |
| "eval_loss": 0.5914410948753357, |
| "eval_runtime": 695.8463, |
| "eval_samples_per_second": 17.16, |
| "eval_steps_per_second": 0.537, |
| "step": 443 |
| }, |
| { |
| "epoch": 1.0155148095909732, |
| "grad_norm": 0.7144717608081881, |
| "learning_rate": 5e-06, |
| "loss": 0.6007, |
| "step": 450 |
| }, |
| { |
| "epoch": 1.0380818053596614, |
| "grad_norm": 0.6164499703389419, |
| "learning_rate": 5e-06, |
| "loss": 0.5282, |
| "step": 460 |
| }, |
| { |
| "epoch": 1.0606488011283497, |
| "grad_norm": 0.581579518620319, |
| "learning_rate": 5e-06, |
| "loss": 0.5429, |
| "step": 470 |
| }, |
| { |
| "epoch": 1.0832157968970382, |
| "grad_norm": 0.997078554443114, |
| "learning_rate": 5e-06, |
| "loss": 0.5308, |
| "step": 480 |
| }, |
| { |
| "epoch": 1.1057827926657264, |
| "grad_norm": 0.6099409439799195, |
| "learning_rate": 5e-06, |
| "loss": 0.5409, |
| "step": 490 |
| }, |
| { |
| "epoch": 1.1283497884344147, |
| "grad_norm": 0.5468035690652739, |
| "learning_rate": 5e-06, |
| "loss": 0.5397, |
| "step": 500 |
| }, |
| { |
| "epoch": 1.150916784203103, |
| "grad_norm": 0.57828606833009, |
| "learning_rate": 5e-06, |
| "loss": 0.5408, |
| "step": 510 |
| }, |
| { |
| "epoch": 1.1734837799717912, |
| "grad_norm": 0.5871544502398742, |
| "learning_rate": 5e-06, |
| "loss": 0.5333, |
| "step": 520 |
| }, |
| { |
| "epoch": 1.1960507757404795, |
| "grad_norm": 0.5946052427884659, |
| "learning_rate": 5e-06, |
| "loss": 0.5394, |
| "step": 530 |
| }, |
| { |
| "epoch": 1.2186177715091677, |
| "grad_norm": 0.7111134185557438, |
| "learning_rate": 5e-06, |
| "loss": 0.537, |
| "step": 540 |
| }, |
| { |
| "epoch": 1.2411847672778562, |
| "grad_norm": 0.559221387787138, |
| "learning_rate": 5e-06, |
| "loss": 0.5419, |
| "step": 550 |
| }, |
| { |
| "epoch": 1.2637517630465445, |
| "grad_norm": 0.6299235187853538, |
| "learning_rate": 5e-06, |
| "loss": 0.5437, |
| "step": 560 |
| }, |
| { |
| "epoch": 1.2863187588152327, |
| "grad_norm": 0.5558238143445393, |
| "learning_rate": 5e-06, |
| "loss": 0.5409, |
| "step": 570 |
| }, |
| { |
| "epoch": 1.308885754583921, |
| "grad_norm": 0.6497845036097089, |
| "learning_rate": 5e-06, |
| "loss": 0.5408, |
| "step": 580 |
| }, |
| { |
| "epoch": 1.3314527503526092, |
| "grad_norm": 0.5527177649135514, |
| "learning_rate": 5e-06, |
| "loss": 0.5407, |
| "step": 590 |
| }, |
| { |
| "epoch": 1.3540197461212977, |
| "grad_norm": 0.6268702226525366, |
| "learning_rate": 5e-06, |
| "loss": 0.5461, |
| "step": 600 |
| }, |
| { |
| "epoch": 1.376586741889986, |
| "grad_norm": 0.6440861274985064, |
| "learning_rate": 5e-06, |
| "loss": 0.5389, |
| "step": 610 |
| }, |
| { |
| "epoch": 1.3991537376586742, |
| "grad_norm": 0.6466867606638823, |
| "learning_rate": 5e-06, |
| "loss": 0.543, |
| "step": 620 |
| }, |
| { |
| "epoch": 1.4217207334273625, |
| "grad_norm": 0.5585689792621545, |
| "learning_rate": 5e-06, |
| "loss": 0.5412, |
| "step": 630 |
| }, |
| { |
| "epoch": 1.4442877291960508, |
| "grad_norm": 0.5933487231082422, |
| "learning_rate": 5e-06, |
| "loss": 0.5409, |
| "step": 640 |
| }, |
| { |
| "epoch": 1.466854724964739, |
| "grad_norm": 0.5844818065627359, |
| "learning_rate": 5e-06, |
| "loss": 0.5417, |
| "step": 650 |
| }, |
| { |
| "epoch": 1.4894217207334273, |
| "grad_norm": 0.6041075082778192, |
| "learning_rate": 5e-06, |
| "loss": 0.5455, |
| "step": 660 |
| }, |
| { |
| "epoch": 1.5119887165021155, |
| "grad_norm": 0.548206592373879, |
| "learning_rate": 5e-06, |
| "loss": 0.5401, |
| "step": 670 |
| }, |
| { |
| "epoch": 1.5345557122708038, |
| "grad_norm": 0.5984744333510044, |
| "learning_rate": 5e-06, |
| "loss": 0.5343, |
| "step": 680 |
| }, |
| { |
| "epoch": 1.5571227080394923, |
| "grad_norm": 0.5448638212746664, |
| "learning_rate": 5e-06, |
| "loss": 0.5365, |
| "step": 690 |
| }, |
| { |
| "epoch": 1.5796897038081805, |
| "grad_norm": 0.71811935736393, |
| "learning_rate": 5e-06, |
| "loss": 0.5406, |
| "step": 700 |
| }, |
| { |
| "epoch": 1.6022566995768688, |
| "grad_norm": 0.6147265361036524, |
| "learning_rate": 5e-06, |
| "loss": 0.5436, |
| "step": 710 |
| }, |
| { |
| "epoch": 1.6248236953455573, |
| "grad_norm": 0.6151172472267726, |
| "learning_rate": 5e-06, |
| "loss": 0.5517, |
| "step": 720 |
| }, |
| { |
| "epoch": 1.6473906911142455, |
| "grad_norm": 0.5351871271616251, |
| "learning_rate": 5e-06, |
| "loss": 0.5349, |
| "step": 730 |
| }, |
| { |
| "epoch": 1.6699576868829338, |
| "grad_norm": 0.5779949382077424, |
| "learning_rate": 5e-06, |
| "loss": 0.5505, |
| "step": 740 |
| }, |
| { |
| "epoch": 1.692524682651622, |
| "grad_norm": 0.5678527924034793, |
| "learning_rate": 5e-06, |
| "loss": 0.5418, |
| "step": 750 |
| }, |
| { |
| "epoch": 1.7150916784203103, |
| "grad_norm": 0.6133517063554221, |
| "learning_rate": 5e-06, |
| "loss": 0.5409, |
| "step": 760 |
| }, |
| { |
| "epoch": 1.7376586741889986, |
| "grad_norm": 0.5512887094553734, |
| "learning_rate": 5e-06, |
| "loss": 0.5414, |
| "step": 770 |
| }, |
| { |
| "epoch": 1.7602256699576868, |
| "grad_norm": 0.5756847722448575, |
| "learning_rate": 5e-06, |
| "loss": 0.5375, |
| "step": 780 |
| }, |
| { |
| "epoch": 1.782792665726375, |
| "grad_norm": 0.5772546358400333, |
| "learning_rate": 5e-06, |
| "loss": 0.5411, |
| "step": 790 |
| }, |
| { |
| "epoch": 1.8053596614950633, |
| "grad_norm": 0.5518626311991142, |
| "learning_rate": 5e-06, |
| "loss": 0.5362, |
| "step": 800 |
| }, |
| { |
| "epoch": 1.8279266572637518, |
| "grad_norm": 0.6448522569287158, |
| "learning_rate": 5e-06, |
| "loss": 0.5468, |
| "step": 810 |
| }, |
| { |
| "epoch": 1.85049365303244, |
| "grad_norm": 0.6045234522347122, |
| "learning_rate": 5e-06, |
| "loss": 0.5342, |
| "step": 820 |
| }, |
| { |
| "epoch": 1.8730606488011283, |
| "grad_norm": 0.5521879456989965, |
| "learning_rate": 5e-06, |
| "loss": 0.5462, |
| "step": 830 |
| }, |
| { |
| "epoch": 1.8956276445698168, |
| "grad_norm": 0.5219603437514223, |
| "learning_rate": 5e-06, |
| "loss": 0.5461, |
| "step": 840 |
| }, |
| { |
| "epoch": 1.918194640338505, |
| "grad_norm": 0.5709844377651583, |
| "learning_rate": 5e-06, |
| "loss": 0.5341, |
| "step": 850 |
| }, |
| { |
| "epoch": 1.9407616361071933, |
| "grad_norm": 0.5530643235167051, |
| "learning_rate": 5e-06, |
| "loss": 0.539, |
| "step": 860 |
| }, |
| { |
| "epoch": 1.9633286318758816, |
| "grad_norm": 0.7365535258176252, |
| "learning_rate": 5e-06, |
| "loss": 0.5455, |
| "step": 870 |
| }, |
| { |
| "epoch": 1.9858956276445698, |
| "grad_norm": 0.5314913402288777, |
| "learning_rate": 5e-06, |
| "loss": 0.5437, |
| "step": 880 |
| }, |
| { |
| "epoch": 1.9994358251057829, |
| "eval_loss": 0.5873079895973206, |
| "eval_runtime": 700.5561, |
| "eval_samples_per_second": 17.045, |
| "eval_steps_per_second": 0.534, |
| "step": 886 |
| }, |
| { |
| "epoch": 2.008462623413258, |
| "grad_norm": 1.0407094363310498, |
| "learning_rate": 5e-06, |
| "loss": 0.5672, |
| "step": 890 |
| }, |
| { |
| "epoch": 2.0310296191819464, |
| "grad_norm": 0.725533268344312, |
| "learning_rate": 5e-06, |
| "loss": 0.4783, |
| "step": 900 |
| }, |
| { |
| "epoch": 2.0535966149506346, |
| "grad_norm": 0.6697434624450914, |
| "learning_rate": 5e-06, |
| "loss": 0.4801, |
| "step": 910 |
| }, |
| { |
| "epoch": 2.076163610719323, |
| "grad_norm": 0.628962091559047, |
| "learning_rate": 5e-06, |
| "loss": 0.4723, |
| "step": 920 |
| }, |
| { |
| "epoch": 2.098730606488011, |
| "grad_norm": 0.6500710841406606, |
| "learning_rate": 5e-06, |
| "loss": 0.4797, |
| "step": 930 |
| }, |
| { |
| "epoch": 2.1212976022566994, |
| "grad_norm": 0.5817194274894282, |
| "learning_rate": 5e-06, |
| "loss": 0.4837, |
| "step": 940 |
| }, |
| { |
| "epoch": 2.143864598025388, |
| "grad_norm": 0.6376315428985472, |
| "learning_rate": 5e-06, |
| "loss": 0.4856, |
| "step": 950 |
| }, |
| { |
| "epoch": 2.1664315937940763, |
| "grad_norm": 0.6153596517344019, |
| "learning_rate": 5e-06, |
| "loss": 0.4819, |
| "step": 960 |
| }, |
| { |
| "epoch": 2.1889985895627646, |
| "grad_norm": 1.3075863878475336, |
| "learning_rate": 5e-06, |
| "loss": 0.4799, |
| "step": 970 |
| }, |
| { |
| "epoch": 2.211565585331453, |
| "grad_norm": 0.5743877651303808, |
| "learning_rate": 5e-06, |
| "loss": 0.475, |
| "step": 980 |
| }, |
| { |
| "epoch": 2.234132581100141, |
| "grad_norm": 0.6512056940046367, |
| "learning_rate": 5e-06, |
| "loss": 0.475, |
| "step": 990 |
| }, |
| { |
| "epoch": 2.2566995768688294, |
| "grad_norm": 0.6685281956122264, |
| "learning_rate": 5e-06, |
| "loss": 0.4838, |
| "step": 1000 |
| }, |
| { |
| "epoch": 2.2792665726375176, |
| "grad_norm": 0.5886386733020537, |
| "learning_rate": 5e-06, |
| "loss": 0.4901, |
| "step": 1010 |
| }, |
| { |
| "epoch": 2.301833568406206, |
| "grad_norm": 0.6393741572968281, |
| "learning_rate": 5e-06, |
| "loss": 0.4845, |
| "step": 1020 |
| }, |
| { |
| "epoch": 2.324400564174894, |
| "grad_norm": 0.6578541657737053, |
| "learning_rate": 5e-06, |
| "loss": 0.4887, |
| "step": 1030 |
| }, |
| { |
| "epoch": 2.3469675599435824, |
| "grad_norm": 0.6062775207381352, |
| "learning_rate": 5e-06, |
| "loss": 0.4868, |
| "step": 1040 |
| }, |
| { |
| "epoch": 2.3695345557122707, |
| "grad_norm": 0.5908593687028332, |
| "learning_rate": 5e-06, |
| "loss": 0.4845, |
| "step": 1050 |
| }, |
| { |
| "epoch": 2.392101551480959, |
| "grad_norm": 0.5576072857835034, |
| "learning_rate": 5e-06, |
| "loss": 0.4838, |
| "step": 1060 |
| }, |
| { |
| "epoch": 2.414668547249647, |
| "grad_norm": 0.6001411858096596, |
| "learning_rate": 5e-06, |
| "loss": 0.4851, |
| "step": 1070 |
| }, |
| { |
| "epoch": 2.4372355430183354, |
| "grad_norm": 0.5899721132030763, |
| "learning_rate": 5e-06, |
| "loss": 0.4882, |
| "step": 1080 |
| }, |
| { |
| "epoch": 2.459802538787024, |
| "grad_norm": 0.5662098736637078, |
| "learning_rate": 5e-06, |
| "loss": 0.4866, |
| "step": 1090 |
| }, |
| { |
| "epoch": 2.4823695345557124, |
| "grad_norm": 0.6263815123822596, |
| "learning_rate": 5e-06, |
| "loss": 0.4898, |
| "step": 1100 |
| }, |
| { |
| "epoch": 2.5049365303244007, |
| "grad_norm": 0.5650596862227139, |
| "learning_rate": 5e-06, |
| "loss": 0.4929, |
| "step": 1110 |
| }, |
| { |
| "epoch": 2.527503526093089, |
| "grad_norm": 0.6004709195173688, |
| "learning_rate": 5e-06, |
| "loss": 0.4836, |
| "step": 1120 |
| }, |
| { |
| "epoch": 2.550070521861777, |
| "grad_norm": 0.5797990944642266, |
| "learning_rate": 5e-06, |
| "loss": 0.4887, |
| "step": 1130 |
| }, |
| { |
| "epoch": 2.5726375176304654, |
| "grad_norm": 0.5662965238939124, |
| "learning_rate": 5e-06, |
| "loss": 0.4868, |
| "step": 1140 |
| }, |
| { |
| "epoch": 2.5952045133991537, |
| "grad_norm": 0.5701897631841572, |
| "learning_rate": 5e-06, |
| "loss": 0.4876, |
| "step": 1150 |
| }, |
| { |
| "epoch": 2.617771509167842, |
| "grad_norm": 0.5724201270234823, |
| "learning_rate": 5e-06, |
| "loss": 0.4911, |
| "step": 1160 |
| }, |
| { |
| "epoch": 2.64033850493653, |
| "grad_norm": 0.5806978587181322, |
| "learning_rate": 5e-06, |
| "loss": 0.4802, |
| "step": 1170 |
| }, |
| { |
| "epoch": 2.6629055007052185, |
| "grad_norm": 0.677014060449985, |
| "learning_rate": 5e-06, |
| "loss": 0.4911, |
| "step": 1180 |
| }, |
| { |
| "epoch": 2.685472496473907, |
| "grad_norm": 0.5885050384953632, |
| "learning_rate": 5e-06, |
| "loss": 0.4919, |
| "step": 1190 |
| }, |
| { |
| "epoch": 2.7080394922425954, |
| "grad_norm": 0.6288226465091906, |
| "learning_rate": 5e-06, |
| "loss": 0.4905, |
| "step": 1200 |
| }, |
| { |
| "epoch": 2.7306064880112837, |
| "grad_norm": 0.6132564778914088, |
| "learning_rate": 5e-06, |
| "loss": 0.4918, |
| "step": 1210 |
| }, |
| { |
| "epoch": 2.753173483779972, |
| "grad_norm": 0.5577590661098744, |
| "learning_rate": 5e-06, |
| "loss": 0.4883, |
| "step": 1220 |
| }, |
| { |
| "epoch": 2.77574047954866, |
| "grad_norm": 0.5928024830462557, |
| "learning_rate": 5e-06, |
| "loss": 0.4949, |
| "step": 1230 |
| }, |
| { |
| "epoch": 2.7983074753173485, |
| "grad_norm": 0.5891883868519987, |
| "learning_rate": 5e-06, |
| "loss": 0.4933, |
| "step": 1240 |
| }, |
| { |
| "epoch": 2.8208744710860367, |
| "grad_norm": 0.5514513862499895, |
| "learning_rate": 5e-06, |
| "loss": 0.4933, |
| "step": 1250 |
| }, |
| { |
| "epoch": 2.843441466854725, |
| "grad_norm": 0.644157488882428, |
| "learning_rate": 5e-06, |
| "loss": 0.4951, |
| "step": 1260 |
| }, |
| { |
| "epoch": 2.8660084626234132, |
| "grad_norm": 0.56585151560031, |
| "learning_rate": 5e-06, |
| "loss": 0.4922, |
| "step": 1270 |
| }, |
| { |
| "epoch": 2.8885754583921015, |
| "grad_norm": 0.5543334052359794, |
| "learning_rate": 5e-06, |
| "loss": 0.4903, |
| "step": 1280 |
| }, |
| { |
| "epoch": 2.9111424541607898, |
| "grad_norm": 0.5945436585103447, |
| "learning_rate": 5e-06, |
| "loss": 0.494, |
| "step": 1290 |
| }, |
| { |
| "epoch": 2.933709449929478, |
| "grad_norm": 0.5941134704698948, |
| "learning_rate": 5e-06, |
| "loss": 0.4895, |
| "step": 1300 |
| }, |
| { |
| "epoch": 2.9562764456981663, |
| "grad_norm": 0.614494712786801, |
| "learning_rate": 5e-06, |
| "loss": 0.4934, |
| "step": 1310 |
| }, |
| { |
| "epoch": 2.9788434414668545, |
| "grad_norm": 0.5645975729044889, |
| "learning_rate": 5e-06, |
| "loss": 0.4899, |
| "step": 1320 |
| }, |
| { |
| "epoch": 2.9991537376586743, |
| "eval_loss": 0.6007061004638672, |
| "eval_runtime": 701.8414, |
| "eval_samples_per_second": 17.014, |
| "eval_steps_per_second": 0.533, |
| "step": 1329 |
| }, |
| { |
| "epoch": 2.9991537376586743, |
| "step": 1329, |
| "total_flos": 5064195066298368.0, |
| "train_loss": 0.5478489666257791, |
| "train_runtime": 122314.7192, |
| "train_samples_per_second": 5.564, |
| "train_steps_per_second": 0.011 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 1329, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 5064195066298368.0, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|