| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 6.0, | |
| "eval_steps": 500, | |
| "global_step": 12498, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.04800768122899664, | |
| "grad_norm": 3.5230019092559814, | |
| "learning_rate": 4.995343254920787e-05, | |
| "loss": 3.834, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09601536245799328, | |
| "grad_norm": 2.2122299671173096, | |
| "learning_rate": 4.990542486797888e-05, | |
| "loss": 1.006, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.14402304368698993, | |
| "grad_norm": 1.6999797821044922, | |
| "learning_rate": 4.985741718674988e-05, | |
| "loss": 0.795, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.19203072491598655, | |
| "grad_norm": 1.8523478507995605, | |
| "learning_rate": 4.980940950552089e-05, | |
| "loss": 0.7485, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2400384061449832, | |
| "grad_norm": 1.6911518573760986, | |
| "learning_rate": 4.9761401824291885e-05, | |
| "loss": 0.7051, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.28804608737397985, | |
| "grad_norm": 1.78285551071167, | |
| "learning_rate": 4.971339414306289e-05, | |
| "loss": 0.6518, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.3360537686029765, | |
| "grad_norm": 1.8025232553482056, | |
| "learning_rate": 4.9665386461833895e-05, | |
| "loss": 0.6429, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.3840614498319731, | |
| "grad_norm": 1.6609948873519897, | |
| "learning_rate": 4.96173787806049e-05, | |
| "loss": 0.6148, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.4320691310609698, | |
| "grad_norm": 1.7044763565063477, | |
| "learning_rate": 4.9569371099375905e-05, | |
| "loss": 0.6052, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.4800768122899664, | |
| "grad_norm": 1.742863655090332, | |
| "learning_rate": 4.9521363418146903e-05, | |
| "loss": 0.599, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.528084493518963, | |
| "grad_norm": 1.6556651592254639, | |
| "learning_rate": 4.947335573691791e-05, | |
| "loss": 0.5772, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.5760921747479597, | |
| "grad_norm": 1.578718662261963, | |
| "learning_rate": 4.9425348055688914e-05, | |
| "loss": 0.5617, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.6240998559769563, | |
| "grad_norm": 1.6328682899475098, | |
| "learning_rate": 4.937734037445992e-05, | |
| "loss": 0.5566, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.672107537205953, | |
| "grad_norm": 1.775172233581543, | |
| "learning_rate": 4.932981277004321e-05, | |
| "loss": 0.5474, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.7201152184349496, | |
| "grad_norm": 1.4435912370681763, | |
| "learning_rate": 4.928180508881422e-05, | |
| "loss": 0.5342, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.7681228996639462, | |
| "grad_norm": 1.2456103563308716, | |
| "learning_rate": 4.9233797407585215e-05, | |
| "loss": 0.5237, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.8161305808929429, | |
| "grad_norm": 1.5610588788986206, | |
| "learning_rate": 4.918578972635622e-05, | |
| "loss": 0.5335, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.8641382621219396, | |
| "grad_norm": 1.677467703819275, | |
| "learning_rate": 4.9137782045127225e-05, | |
| "loss": 0.5052, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.9121459433509361, | |
| "grad_norm": 1.3189855813980103, | |
| "learning_rate": 4.908977436389823e-05, | |
| "loss": 0.4929, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.9601536245799328, | |
| "grad_norm": 1.320538878440857, | |
| "learning_rate": 4.904176668266923e-05, | |
| "loss": 0.4973, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.0081613058089294, | |
| "grad_norm": 1.2392332553863525, | |
| "learning_rate": 4.8993759001440233e-05, | |
| "loss": 0.4879, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.056168987037926, | |
| "grad_norm": 1.2972139120101929, | |
| "learning_rate": 4.894575132021124e-05, | |
| "loss": 0.4339, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.1041766682669227, | |
| "grad_norm": 1.763129711151123, | |
| "learning_rate": 4.8897743638982243e-05, | |
| "loss": 0.4215, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.1521843494959194, | |
| "grad_norm": 1.8901598453521729, | |
| "learning_rate": 4.884973595775325e-05, | |
| "loss": 0.4386, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.2001920307249159, | |
| "grad_norm": 1.1455730199813843, | |
| "learning_rate": 4.880172827652425e-05, | |
| "loss": 0.4414, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.2481997119539125, | |
| "grad_norm": 1.3231408596038818, | |
| "learning_rate": 4.875372059529525e-05, | |
| "loss": 0.4232, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.2962073931829092, | |
| "grad_norm": 1.544893503189087, | |
| "learning_rate": 4.870571291406626e-05, | |
| "loss": 0.4338, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.344215074411906, | |
| "grad_norm": 1.4648122787475586, | |
| "learning_rate": 4.8657705232837255e-05, | |
| "loss": 0.4273, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.3922227556409026, | |
| "grad_norm": 1.405002474784851, | |
| "learning_rate": 4.860969755160826e-05, | |
| "loss": 0.4337, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.4402304368698993, | |
| "grad_norm": 1.2591880559921265, | |
| "learning_rate": 4.8561689870379265e-05, | |
| "loss": 0.4234, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.488238118098896, | |
| "grad_norm": 1.422620415687561, | |
| "learning_rate": 4.8513682189150264e-05, | |
| "loss": 0.4239, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 1.5362457993278924, | |
| "grad_norm": 1.585553526878357, | |
| "learning_rate": 4.846567450792127e-05, | |
| "loss": 0.4254, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.584253480556889, | |
| "grad_norm": 1.1293108463287354, | |
| "learning_rate": 4.8417666826692274e-05, | |
| "loss": 0.422, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 1.6322611617858858, | |
| "grad_norm": 1.265154242515564, | |
| "learning_rate": 4.836965914546328e-05, | |
| "loss": 0.4194, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.6802688430148822, | |
| "grad_norm": 1.381218433380127, | |
| "learning_rate": 4.832165146423428e-05, | |
| "loss": 0.4321, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 1.7282765242438791, | |
| "grad_norm": 1.3752981424331665, | |
| "learning_rate": 4.827364378300528e-05, | |
| "loss": 0.4151, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 1.7762842054728756, | |
| "grad_norm": 1.1908366680145264, | |
| "learning_rate": 4.822563610177629e-05, | |
| "loss": 0.4203, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 1.8242918867018723, | |
| "grad_norm": 1.2795414924621582, | |
| "learning_rate": 4.8177628420547285e-05, | |
| "loss": 0.414, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 1.872299567930869, | |
| "grad_norm": 1.2664345502853394, | |
| "learning_rate": 4.812962073931829e-05, | |
| "loss": 0.4102, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 1.9203072491598656, | |
| "grad_norm": 1.1444271802902222, | |
| "learning_rate": 4.8081613058089295e-05, | |
| "loss": 0.4066, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.9683149303888623, | |
| "grad_norm": 1.233847737312317, | |
| "learning_rate": 4.80336053768603e-05, | |
| "loss": 0.4235, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.0163226116178588, | |
| "grad_norm": 1.1085718870162964, | |
| "learning_rate": 4.79855976956313e-05, | |
| "loss": 0.3852, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 2.0643302928468557, | |
| "grad_norm": 1.2250890731811523, | |
| "learning_rate": 4.7937590014402304e-05, | |
| "loss": 0.334, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 2.112337974075852, | |
| "grad_norm": 1.3685556650161743, | |
| "learning_rate": 4.788958233317331e-05, | |
| "loss": 0.3273, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 2.160345655304849, | |
| "grad_norm": 1.2295827865600586, | |
| "learning_rate": 4.7841574651944314e-05, | |
| "loss": 0.3332, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.2083533365338455, | |
| "grad_norm": 1.3651865720748901, | |
| "learning_rate": 4.779356697071531e-05, | |
| "loss": 0.3355, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 2.256361017762842, | |
| "grad_norm": 1.414611577987671, | |
| "learning_rate": 4.774555928948632e-05, | |
| "loss": 0.3347, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 2.304368698991839, | |
| "grad_norm": 1.1170624494552612, | |
| "learning_rate": 4.769755160825732e-05, | |
| "loss": 0.3363, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 2.3523763802208353, | |
| "grad_norm": 1.4784187078475952, | |
| "learning_rate": 4.764954392702833e-05, | |
| "loss": 0.3497, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 2.4003840614498317, | |
| "grad_norm": 1.3222328424453735, | |
| "learning_rate": 4.7601536245799325e-05, | |
| "loss": 0.3319, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.4483917426788286, | |
| "grad_norm": 1.4248158931732178, | |
| "learning_rate": 4.755352856457033e-05, | |
| "loss": 0.3456, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 2.496399423907825, | |
| "grad_norm": 1.3521333932876587, | |
| "learning_rate": 4.7505520883341336e-05, | |
| "loss": 0.346, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 2.544407105136822, | |
| "grad_norm": 1.3070430755615234, | |
| "learning_rate": 4.745751320211234e-05, | |
| "loss": 0.338, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 2.5924147863658185, | |
| "grad_norm": 1.2667192220687866, | |
| "learning_rate": 4.7409505520883346e-05, | |
| "loss": 0.3508, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 2.6404224675948154, | |
| "grad_norm": 1.404773473739624, | |
| "learning_rate": 4.7361497839654344e-05, | |
| "loss": 0.3376, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 2.688430148823812, | |
| "grad_norm": 1.233578085899353, | |
| "learning_rate": 4.731349015842535e-05, | |
| "loss": 0.3432, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 2.7364378300528083, | |
| "grad_norm": 1.2811566591262817, | |
| "learning_rate": 4.7265482477196354e-05, | |
| "loss": 0.3498, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 2.784445511281805, | |
| "grad_norm": 1.1714744567871094, | |
| "learning_rate": 4.721747479596736e-05, | |
| "loss": 0.3451, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 2.8324531925108016, | |
| "grad_norm": 1.1416313648223877, | |
| "learning_rate": 4.716946711473836e-05, | |
| "loss": 0.3427, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 2.8804608737397985, | |
| "grad_norm": 1.3504301309585571, | |
| "learning_rate": 4.712145943350936e-05, | |
| "loss": 0.3477, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 2.928468554968795, | |
| "grad_norm": 1.4579728841781616, | |
| "learning_rate": 4.707345175228037e-05, | |
| "loss": 0.3473, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 2.976476236197792, | |
| "grad_norm": 1.1714892387390137, | |
| "learning_rate": 4.702544407105137e-05, | |
| "loss": 0.341, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 3.0244839174267883, | |
| "grad_norm": 1.200076699256897, | |
| "learning_rate": 4.697743638982237e-05, | |
| "loss": 0.2989, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 3.072491598655785, | |
| "grad_norm": 1.3686045408248901, | |
| "learning_rate": 4.6929428708593376e-05, | |
| "loss": 0.2606, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 3.1204992798847817, | |
| "grad_norm": 1.2546569108963013, | |
| "learning_rate": 4.688142102736438e-05, | |
| "loss": 0.2635, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.168506961113778, | |
| "grad_norm": 1.3534624576568604, | |
| "learning_rate": 4.6833413346135386e-05, | |
| "loss": 0.2606, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 3.2165146423427746, | |
| "grad_norm": 1.4877707958221436, | |
| "learning_rate": 4.678540566490639e-05, | |
| "loss": 0.2685, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 3.2645223235717715, | |
| "grad_norm": 1.4656774997711182, | |
| "learning_rate": 4.673739798367739e-05, | |
| "loss": 0.272, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 3.312530004800768, | |
| "grad_norm": 1.24815034866333, | |
| "learning_rate": 4.6689390302448394e-05, | |
| "loss": 0.2703, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 3.360537686029765, | |
| "grad_norm": 1.1822447776794434, | |
| "learning_rate": 4.66413826212194e-05, | |
| "loss": 0.2702, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.4085453672587613, | |
| "grad_norm": 1.4170218706130981, | |
| "learning_rate": 4.6593374939990404e-05, | |
| "loss": 0.2716, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 3.4565530484877582, | |
| "grad_norm": 1.360338568687439, | |
| "learning_rate": 4.65453672587614e-05, | |
| "loss": 0.2717, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 3.5045607297167547, | |
| "grad_norm": 1.4744322299957275, | |
| "learning_rate": 4.649735957753241e-05, | |
| "loss": 0.2795, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 3.552568410945751, | |
| "grad_norm": 1.5080569982528687, | |
| "learning_rate": 4.644935189630341e-05, | |
| "loss": 0.2753, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 3.600576092174748, | |
| "grad_norm": 1.5707645416259766, | |
| "learning_rate": 4.640134421507442e-05, | |
| "loss": 0.278, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 3.6485837734037445, | |
| "grad_norm": 1.4137930870056152, | |
| "learning_rate": 4.6353336533845416e-05, | |
| "loss": 0.2756, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 3.696591454632741, | |
| "grad_norm": 1.4156709909439087, | |
| "learning_rate": 4.630532885261642e-05, | |
| "loss": 0.2841, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 3.744599135861738, | |
| "grad_norm": 1.4720118045806885, | |
| "learning_rate": 4.6257321171387426e-05, | |
| "loss": 0.2767, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 3.7926068170907348, | |
| "grad_norm": 1.2695891857147217, | |
| "learning_rate": 4.620931349015843e-05, | |
| "loss": 0.2762, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 3.8406144983197312, | |
| "grad_norm": 1.439794659614563, | |
| "learning_rate": 4.6161305808929436e-05, | |
| "loss": 0.2799, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 3.8886221795487277, | |
| "grad_norm": 1.3036625385284424, | |
| "learning_rate": 4.611377820451272e-05, | |
| "loss": 0.2728, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 3.9366298607777246, | |
| "grad_norm": 1.3942623138427734, | |
| "learning_rate": 4.606577052328373e-05, | |
| "loss": 0.2779, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 3.984637542006721, | |
| "grad_norm": 1.3402965068817139, | |
| "learning_rate": 4.6017762842054726e-05, | |
| "loss": 0.2759, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 4.0326452232357175, | |
| "grad_norm": 1.2347018718719482, | |
| "learning_rate": 4.596975516082573e-05, | |
| "loss": 0.2239, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 4.080652904464714, | |
| "grad_norm": 1.2590075731277466, | |
| "learning_rate": 4.5922227556409024e-05, | |
| "loss": 0.1964, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 4.128660585693711, | |
| "grad_norm": 1.2734559774398804, | |
| "learning_rate": 4.587421987518003e-05, | |
| "loss": 0.2033, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 4.176668266922707, | |
| "grad_norm": 1.3685240745544434, | |
| "learning_rate": 4.5826212193951034e-05, | |
| "loss": 0.2039, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 4.224675948151704, | |
| "grad_norm": 1.2431707382202148, | |
| "learning_rate": 4.577820451272204e-05, | |
| "loss": 0.2031, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 4.272683629380701, | |
| "grad_norm": 1.3057847023010254, | |
| "learning_rate": 4.573019683149304e-05, | |
| "loss": 0.1993, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 4.320691310609698, | |
| "grad_norm": 1.3426510095596313, | |
| "learning_rate": 4.568218915026404e-05, | |
| "loss": 0.2103, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.368698991838694, | |
| "grad_norm": 1.4993879795074463, | |
| "learning_rate": 4.563418146903505e-05, | |
| "loss": 0.2038, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 4.416706673067691, | |
| "grad_norm": 1.1477452516555786, | |
| "learning_rate": 4.558617378780605e-05, | |
| "loss": 0.2087, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 4.464714354296688, | |
| "grad_norm": 1.2744262218475342, | |
| "learning_rate": 4.553816610657706e-05, | |
| "loss": 0.2128, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 4.512722035525684, | |
| "grad_norm": 1.2372483015060425, | |
| "learning_rate": 4.5490158425348056e-05, | |
| "loss": 0.2141, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 4.560729716754681, | |
| "grad_norm": 1.5185002088546753, | |
| "learning_rate": 4.544215074411906e-05, | |
| "loss": 0.2163, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 4.608737397983678, | |
| "grad_norm": 1.2318006753921509, | |
| "learning_rate": 4.5394143062890066e-05, | |
| "loss": 0.2142, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 4.656745079212674, | |
| "grad_norm": 1.0812604427337646, | |
| "learning_rate": 4.534613538166107e-05, | |
| "loss": 0.2179, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 4.704752760441671, | |
| "grad_norm": 1.460476279258728, | |
| "learning_rate": 4.529812770043207e-05, | |
| "loss": 0.2203, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 4.7527604416706675, | |
| "grad_norm": 1.4925509691238403, | |
| "learning_rate": 4.5250120019203074e-05, | |
| "loss": 0.2167, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 4.8007681228996635, | |
| "grad_norm": 1.4219658374786377, | |
| "learning_rate": 4.520211233797408e-05, | |
| "loss": 0.2163, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 4.84877580412866, | |
| "grad_norm": 1.0717830657958984, | |
| "learning_rate": 4.5154104656745084e-05, | |
| "loss": 0.2138, | |
| "step": 10100 | |
| }, | |
| { | |
| "epoch": 4.896783485357657, | |
| "grad_norm": 1.0514252185821533, | |
| "learning_rate": 4.510609697551608e-05, | |
| "loss": 0.2187, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 4.944791166586654, | |
| "grad_norm": 1.1718051433563232, | |
| "learning_rate": 4.505808929428709e-05, | |
| "loss": 0.2179, | |
| "step": 10300 | |
| }, | |
| { | |
| "epoch": 4.99279884781565, | |
| "grad_norm": 1.3955721855163574, | |
| "learning_rate": 4.501008161305809e-05, | |
| "loss": 0.2222, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 5.040806529044647, | |
| "grad_norm": 1.0917434692382812, | |
| "learning_rate": 4.49620739318291e-05, | |
| "loss": 0.1556, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 5.088814210273644, | |
| "grad_norm": 1.2517110109329224, | |
| "learning_rate": 4.49140662506001e-05, | |
| "loss": 0.1442, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 5.13682189150264, | |
| "grad_norm": 1.4139866828918457, | |
| "learning_rate": 4.48660585693711e-05, | |
| "loss": 0.1456, | |
| "step": 10700 | |
| }, | |
| { | |
| "epoch": 5.184829572731637, | |
| "grad_norm": 1.4004515409469604, | |
| "learning_rate": 4.4818050888142106e-05, | |
| "loss": 0.1475, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 5.232837253960634, | |
| "grad_norm": 1.3576511144638062, | |
| "learning_rate": 4.477004320691311e-05, | |
| "loss": 0.1475, | |
| "step": 10900 | |
| }, | |
| { | |
| "epoch": 5.280844935189631, | |
| "grad_norm": 1.2428905963897705, | |
| "learning_rate": 4.4722035525684116e-05, | |
| "loss": 0.1505, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.328852616418627, | |
| "grad_norm": 1.577462077140808, | |
| "learning_rate": 4.4674027844455114e-05, | |
| "loss": 0.1533, | |
| "step": 11100 | |
| }, | |
| { | |
| "epoch": 5.376860297647624, | |
| "grad_norm": 1.5575320720672607, | |
| "learning_rate": 4.462602016322612e-05, | |
| "loss": 0.1545, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 5.4248679788766205, | |
| "grad_norm": 1.6392663717269897, | |
| "learning_rate": 4.4578012481997124e-05, | |
| "loss": 0.1535, | |
| "step": 11300 | |
| }, | |
| { | |
| "epoch": 5.4728756601056165, | |
| "grad_norm": 1.5311086177825928, | |
| "learning_rate": 4.453000480076813e-05, | |
| "loss": 0.1579, | |
| "step": 11400 | |
| }, | |
| { | |
| "epoch": 5.5208833413346134, | |
| "grad_norm": 1.2279739379882812, | |
| "learning_rate": 4.448199711953913e-05, | |
| "loss": 0.1616, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 5.56889102256361, | |
| "grad_norm": 1.48116934299469, | |
| "learning_rate": 4.443398943831013e-05, | |
| "loss": 0.1603, | |
| "step": 11600 | |
| }, | |
| { | |
| "epoch": 5.616898703792607, | |
| "grad_norm": 1.2573692798614502, | |
| "learning_rate": 4.438598175708114e-05, | |
| "loss": 0.1615, | |
| "step": 11700 | |
| }, | |
| { | |
| "epoch": 5.664906385021603, | |
| "grad_norm": 1.3175160884857178, | |
| "learning_rate": 4.433797407585214e-05, | |
| "loss": 0.1618, | |
| "step": 11800 | |
| }, | |
| { | |
| "epoch": 5.7129140662506, | |
| "grad_norm": 1.3634432554244995, | |
| "learning_rate": 4.428996639462314e-05, | |
| "loss": 0.1634, | |
| "step": 11900 | |
| }, | |
| { | |
| "epoch": 5.760921747479597, | |
| "grad_norm": 1.4681994915008545, | |
| "learning_rate": 4.4241958713394146e-05, | |
| "loss": 0.1644, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 5.808929428708593, | |
| "grad_norm": 2.1259870529174805, | |
| "learning_rate": 4.419395103216515e-05, | |
| "loss": 0.1637, | |
| "step": 12100 | |
| }, | |
| { | |
| "epoch": 5.85693710993759, | |
| "grad_norm": 1.3507694005966187, | |
| "learning_rate": 4.4145943350936156e-05, | |
| "loss": 0.1676, | |
| "step": 12200 | |
| }, | |
| { | |
| "epoch": 5.904944791166587, | |
| "grad_norm": 1.3994966745376587, | |
| "learning_rate": 4.4097935669707155e-05, | |
| "loss": 0.1682, | |
| "step": 12300 | |
| }, | |
| { | |
| "epoch": 5.952952472395584, | |
| "grad_norm": 1.37490713596344, | |
| "learning_rate": 4.404992798847816e-05, | |
| "loss": 0.1632, | |
| "step": 12400 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 104150, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 10000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.186514987204802e+20, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |