| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 812, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0012315270935960591, | |
| "grad_norm": 0.2949771583080292, | |
| "learning_rate": 2.4390243902439027e-06, | |
| "loss": 0.9838, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.006157635467980296, | |
| "grad_norm": 0.3101727366447449, | |
| "learning_rate": 1.2195121951219513e-05, | |
| "loss": 0.9644, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.012315270935960592, | |
| "grad_norm": 0.3715027868747711, | |
| "learning_rate": 2.4390243902439026e-05, | |
| "loss": 0.9825, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.01847290640394089, | |
| "grad_norm": 0.11249574273824692, | |
| "learning_rate": 3.6585365853658535e-05, | |
| "loss": 0.9393, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.024630541871921183, | |
| "grad_norm": 0.08815101534128189, | |
| "learning_rate": 4.878048780487805e-05, | |
| "loss": 0.9236, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.03078817733990148, | |
| "grad_norm": 0.10426054894924164, | |
| "learning_rate": 6.097560975609756e-05, | |
| "loss": 0.9237, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.03694581280788178, | |
| "grad_norm": 0.1105574294924736, | |
| "learning_rate": 7.317073170731707e-05, | |
| "loss": 0.9153, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.04310344827586207, | |
| "grad_norm": 0.09308202564716339, | |
| "learning_rate": 8.53658536585366e-05, | |
| "loss": 0.9281, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.04926108374384237, | |
| "grad_norm": 0.09463588148355484, | |
| "learning_rate": 9.75609756097561e-05, | |
| "loss": 0.9157, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.05541871921182266, | |
| "grad_norm": 0.11583919823169708, | |
| "learning_rate": 0.00010975609756097563, | |
| "loss": 0.8853, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.06157635467980296, | |
| "grad_norm": 0.09693233668804169, | |
| "learning_rate": 0.00012195121951219512, | |
| "loss": 0.8707, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06773399014778325, | |
| "grad_norm": 0.0934528186917305, | |
| "learning_rate": 0.00013414634146341464, | |
| "loss": 0.8749, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.07389162561576355, | |
| "grad_norm": 0.10069940984249115, | |
| "learning_rate": 0.00014634146341463414, | |
| "loss": 0.8685, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.08004926108374384, | |
| "grad_norm": 0.09800374507904053, | |
| "learning_rate": 0.00015853658536585366, | |
| "loss": 0.8713, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.08620689655172414, | |
| "grad_norm": 0.12216204404830933, | |
| "learning_rate": 0.0001707317073170732, | |
| "loss": 0.8685, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.09236453201970443, | |
| "grad_norm": 0.09282781183719635, | |
| "learning_rate": 0.0001829268292682927, | |
| "loss": 0.8689, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.09852216748768473, | |
| "grad_norm": 0.09639699012041092, | |
| "learning_rate": 0.0001951219512195122, | |
| "loss": 0.8815, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.10467980295566502, | |
| "grad_norm": 0.0974295362830162, | |
| "learning_rate": 0.0001999916658654738, | |
| "loss": 0.8761, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.11083743842364532, | |
| "grad_norm": 0.08732698857784271, | |
| "learning_rate": 0.0001999407400739705, | |
| "loss": 0.8938, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.11699507389162561, | |
| "grad_norm": 0.09193898737430573, | |
| "learning_rate": 0.00019984354211555644, | |
| "loss": 0.851, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.12315270935960591, | |
| "grad_norm": 0.09586363285779953, | |
| "learning_rate": 0.00019970011699250152, | |
| "loss": 0.9015, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12931034482758622, | |
| "grad_norm": 0.10461977124214172, | |
| "learning_rate": 0.00019951053111006976, | |
| "loss": 0.8593, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.1354679802955665, | |
| "grad_norm": 0.10003778338432312, | |
| "learning_rate": 0.00019927487224577402, | |
| "loss": 0.8596, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.1416256157635468, | |
| "grad_norm": 0.08891289681196213, | |
| "learning_rate": 0.0001989932495087353, | |
| "loss": 0.8813, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.1477832512315271, | |
| "grad_norm": 0.09503619372844696, | |
| "learning_rate": 0.0001986657932891657, | |
| "loss": 0.8774, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.1539408866995074, | |
| "grad_norm": 0.08597695827484131, | |
| "learning_rate": 0.0001982926551979982, | |
| "loss": 0.85, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.16009852216748768, | |
| "grad_norm": 0.0924694836139679, | |
| "learning_rate": 0.00019787400799669154, | |
| "loss": 0.8549, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.16625615763546797, | |
| "grad_norm": 0.10183464735746384, | |
| "learning_rate": 0.00019741004551724207, | |
| "loss": 0.8501, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.1724137931034483, | |
| "grad_norm": 0.11476742476224899, | |
| "learning_rate": 0.00019690098257244064, | |
| "loss": 0.8757, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.17857142857142858, | |
| "grad_norm": 0.10026735812425613, | |
| "learning_rate": 0.00019634705485641488, | |
| "loss": 0.8547, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.18472906403940886, | |
| "grad_norm": 0.0951361283659935, | |
| "learning_rate": 0.00019574851883550395, | |
| "loss": 0.859, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.19088669950738915, | |
| "grad_norm": 0.09267546981573105, | |
| "learning_rate": 0.00019510565162951537, | |
| "loss": 0.8635, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.19704433497536947, | |
| "grad_norm": 0.10676441341638565, | |
| "learning_rate": 0.00019441875088341997, | |
| "loss": 0.8707, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.20320197044334976, | |
| "grad_norm": 0.08962992578744888, | |
| "learning_rate": 0.00019368813462954316, | |
| "loss": 0.8341, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.20935960591133004, | |
| "grad_norm": 0.10068422555923462, | |
| "learning_rate": 0.00019291414114031743, | |
| "loss": 0.8063, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.21551724137931033, | |
| "grad_norm": 0.09436162561178207, | |
| "learning_rate": 0.00019209712877166349, | |
| "loss": 0.8477, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.22167487684729065, | |
| "grad_norm": 0.0981898158788681, | |
| "learning_rate": 0.00019123747579707275, | |
| "loss": 0.8634, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.22783251231527094, | |
| "grad_norm": 0.09050612151622772, | |
| "learning_rate": 0.00019033558023246844, | |
| "loss": 0.8693, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.23399014778325122, | |
| "grad_norm": 0.09567181766033173, | |
| "learning_rate": 0.0001893918596519257, | |
| "loss": 0.8721, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.24014778325123154, | |
| "grad_norm": 0.09379356354475021, | |
| "learning_rate": 0.00018840675099433636, | |
| "loss": 0.8572, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.24630541871921183, | |
| "grad_norm": 0.09768428653478622, | |
| "learning_rate": 0.00018738071036110808, | |
| "loss": 0.8675, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2524630541871921, | |
| "grad_norm": 0.0965338945388794, | |
| "learning_rate": 0.00018631421280499116, | |
| "loss": 0.869, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.25862068965517243, | |
| "grad_norm": 0.08635499328374863, | |
| "learning_rate": 0.00018520775211013093, | |
| "loss": 0.842, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.2647783251231527, | |
| "grad_norm": 0.08754138648509979, | |
| "learning_rate": 0.00018406184056344782, | |
| "loss": 0.8305, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.270935960591133, | |
| "grad_norm": 0.09839050471782684, | |
| "learning_rate": 0.00018287700871745036, | |
| "loss": 0.835, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.2770935960591133, | |
| "grad_norm": 0.11136944591999054, | |
| "learning_rate": 0.0001816538051445916, | |
| "loss": 0.8588, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.2832512315270936, | |
| "grad_norm": 0.0912737250328064, | |
| "learning_rate": 0.00018039279618328212, | |
| "loss": 0.8624, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.2894088669950739, | |
| "grad_norm": 0.07793363183736801, | |
| "learning_rate": 0.00017909456567567772, | |
| "loss": 0.8345, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.2955665024630542, | |
| "grad_norm": 0.10047736018896103, | |
| "learning_rate": 0.0001777597146973627, | |
| "loss": 0.8585, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3017241379310345, | |
| "grad_norm": 0.08737127482891083, | |
| "learning_rate": 0.00017638886127905427, | |
| "loss": 0.8397, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.3078817733990148, | |
| "grad_norm": 0.1030949130654335, | |
| "learning_rate": 0.00017498264012045687, | |
| "loss": 0.8794, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.31403940886699505, | |
| "grad_norm": 0.08828911185264587, | |
| "learning_rate": 0.00017354170229639856, | |
| "loss": 0.854, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.32019704433497537, | |
| "grad_norm": 0.0845285952091217, | |
| "learning_rate": 0.00017206671495538612, | |
| "loss": 0.8413, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3263546798029557, | |
| "grad_norm": 0.09490133821964264, | |
| "learning_rate": 0.0001705583610107178, | |
| "loss": 0.8278, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.33251231527093594, | |
| "grad_norm": 0.0908144861459732, | |
| "learning_rate": 0.0001690173388242972, | |
| "loss": 0.8589, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.33866995073891626, | |
| "grad_norm": 0.09003310650587082, | |
| "learning_rate": 0.00016744436188329456, | |
| "loss": 0.8572, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.3448275862068966, | |
| "grad_norm": 0.09844415634870529, | |
| "learning_rate": 0.0001658401584698049, | |
| "loss": 0.8628, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.35098522167487683, | |
| "grad_norm": 0.09799636900424957, | |
| "learning_rate": 0.00016420547132365635, | |
| "loss": 0.86, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 0.0892237052321434, | |
| "learning_rate": 0.00016254105729852464, | |
| "loss": 0.8818, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.3633004926108374, | |
| "grad_norm": 0.09125364571809769, | |
| "learning_rate": 0.00016084768701151263, | |
| "loss": 0.8788, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.3694581280788177, | |
| "grad_norm": 0.09678944945335388, | |
| "learning_rate": 0.00015912614448635782, | |
| "loss": 0.8846, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.37561576354679804, | |
| "grad_norm": 0.09397515654563904, | |
| "learning_rate": 0.00015737722679043248, | |
| "loss": 0.8566, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.3817733990147783, | |
| "grad_norm": 0.08986654132604599, | |
| "learning_rate": 0.00015560174366570446, | |
| "loss": 0.8321, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.3879310344827586, | |
| "grad_norm": 0.09454771876335144, | |
| "learning_rate": 0.00015380051715382996, | |
| "loss": 0.8826, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.39408866995073893, | |
| "grad_norm": 0.092396080493927, | |
| "learning_rate": 0.0001519743812155516, | |
| "loss": 0.8526, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.4002463054187192, | |
| "grad_norm": 0.10106166452169418, | |
| "learning_rate": 0.00015012418134457755, | |
| "loss": 0.847, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.4064039408866995, | |
| "grad_norm": 0.091457299888134, | |
| "learning_rate": 0.00014825077417612186, | |
| "loss": 0.8469, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.4125615763546798, | |
| "grad_norm": 0.09755238145589828, | |
| "learning_rate": 0.0001463550270902851, | |
| "loss": 0.8296, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.4187192118226601, | |
| "grad_norm": 0.09282223135232925, | |
| "learning_rate": 0.00014443781781046136, | |
| "loss": 0.8452, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.4248768472906404, | |
| "grad_norm": 0.09217504411935806, | |
| "learning_rate": 0.0001425000339969554, | |
| "loss": 0.8357, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.43103448275862066, | |
| "grad_norm": 0.0990719348192215, | |
| "learning_rate": 0.00014054257283599973, | |
| "loss": 0.8402, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.437192118226601, | |
| "grad_norm": 0.09021298587322235, | |
| "learning_rate": 0.0001385663406243607, | |
| "loss": 0.8454, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.4433497536945813, | |
| "grad_norm": 0.09311360120773315, | |
| "learning_rate": 0.00013657225234972695, | |
| "loss": 0.8449, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.44950738916256155, | |
| "grad_norm": 0.09712479263544083, | |
| "learning_rate": 0.00013456123126707334, | |
| "loss": 0.8718, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.45566502463054187, | |
| "grad_norm": 0.098002128303051, | |
| "learning_rate": 0.00013253420847119803, | |
| "loss": 0.8467, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.4618226600985222, | |
| "grad_norm": 0.08402260392904282, | |
| "learning_rate": 0.0001304921224656289, | |
| "loss": 0.8279, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.46798029556650245, | |
| "grad_norm": 0.09465406835079193, | |
| "learning_rate": 0.0001284359187281004, | |
| "loss": 0.8749, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.47413793103448276, | |
| "grad_norm": 0.08619461953639984, | |
| "learning_rate": 0.00012636654927280073, | |
| "loss": 0.8495, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.4802955665024631, | |
| "grad_norm": 0.09889654070138931, | |
| "learning_rate": 0.0001242849722095936, | |
| "loss": 0.8447, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.48645320197044334, | |
| "grad_norm": 0.0903097614645958, | |
| "learning_rate": 0.00012219215130041656, | |
| "loss": 0.8186, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.49261083743842365, | |
| "grad_norm": 0.09543944150209427, | |
| "learning_rate": 0.00012008905551306356, | |
| "loss": 0.8497, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.4987684729064039, | |
| "grad_norm": 0.09465156495571136, | |
| "learning_rate": 0.00011797665857255621, | |
| "loss": 0.8496, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.5049261083743842, | |
| "grad_norm": 0.09152977913618088, | |
| "learning_rate": 0.00011585593851031347, | |
| "loss": 0.8406, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.5110837438423645, | |
| "grad_norm": 0.09556221961975098, | |
| "learning_rate": 0.00011372787721132648, | |
| "loss": 0.851, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.5172413793103449, | |
| "grad_norm": 0.09278736263513565, | |
| "learning_rate": 0.00011159345995955006, | |
| "loss": 0.8636, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.5233990147783252, | |
| "grad_norm": 0.09471924602985382, | |
| "learning_rate": 0.00010945367498171993, | |
| "loss": 0.8628, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.5295566502463054, | |
| "grad_norm": 0.09358783066272736, | |
| "learning_rate": 0.00010730951298980776, | |
| "loss": 0.864, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.5357142857142857, | |
| "grad_norm": 0.10094843059778214, | |
| "learning_rate": 0.00010516196672232539, | |
| "loss": 0.8698, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.541871921182266, | |
| "grad_norm": 0.09740354865789413, | |
| "learning_rate": 0.00010301203048469083, | |
| "loss": 0.8419, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5480295566502463, | |
| "grad_norm": 0.09911592304706573, | |
| "learning_rate": 0.00010086069968886885, | |
| "loss": 0.8524, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.5541871921182266, | |
| "grad_norm": 0.09378519654273987, | |
| "learning_rate": 9.870897039249911e-05, | |
| "loss": 0.8673, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5603448275862069, | |
| "grad_norm": 0.0991242304444313, | |
| "learning_rate": 9.655783883772545e-05, | |
| "loss": 0.8624, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.5665024630541872, | |
| "grad_norm": 0.09386149048805237, | |
| "learning_rate": 9.440830098993969e-05, | |
| "loss": 0.8161, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.5726600985221675, | |
| "grad_norm": 0.09219907224178314, | |
| "learning_rate": 9.22613520766537e-05, | |
| "loss": 0.8519, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.5788177339901478, | |
| "grad_norm": 0.09010721743106842, | |
| "learning_rate": 9.011798612671286e-05, | |
| "loss": 0.8489, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.5849753694581281, | |
| "grad_norm": 0.10001237690448761, | |
| "learning_rate": 8.797919551006475e-05, | |
| "loss": 0.8526, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.5911330049261084, | |
| "grad_norm": 0.09620565176010132, | |
| "learning_rate": 8.58459704782957e-05, | |
| "loss": 0.85, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.5972906403940886, | |
| "grad_norm": 0.0897950753569603, | |
| "learning_rate": 8.371929870614833e-05, | |
| "loss": 0.8458, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.603448275862069, | |
| "grad_norm": 0.08846917748451233, | |
| "learning_rate": 8.160016483423199e-05, | |
| "loss": 0.8512, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.6096059113300493, | |
| "grad_norm": 0.09186448156833649, | |
| "learning_rate": 7.948955001313811e-05, | |
| "loss": 0.8699, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.6157635467980296, | |
| "grad_norm": 0.10069604963064194, | |
| "learning_rate": 7.738843144917119e-05, | |
| "loss": 0.8398, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6219211822660099, | |
| "grad_norm": 0.09906591475009918, | |
| "learning_rate": 7.529778195190645e-05, | |
| "loss": 0.8378, | |
| "step": 505 | |
| }, | |
| { | |
| "epoch": 0.6280788177339901, | |
| "grad_norm": 0.09918715059757233, | |
| "learning_rate": 7.321856948378259e-05, | |
| "loss": 0.8584, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.6342364532019704, | |
| "grad_norm": 0.09439591318368912, | |
| "learning_rate": 7.115175671193913e-05, | |
| "loss": 0.8519, | |
| "step": 515 | |
| }, | |
| { | |
| "epoch": 0.6403940886699507, | |
| "grad_norm": 0.0965607687830925, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.8607, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.646551724137931, | |
| "grad_norm": 0.09937562048435211, | |
| "learning_rate": 6.7059151777547e-05, | |
| "loss": 0.8547, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.6527093596059114, | |
| "grad_norm": 0.10041774064302444, | |
| "learning_rate": 6.503525447487715e-05, | |
| "loss": 0.8584, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.6588669950738916, | |
| "grad_norm": 0.09252887219190598, | |
| "learning_rate": 6.30275457109327e-05, | |
| "loss": 0.8824, | |
| "step": 535 | |
| }, | |
| { | |
| "epoch": 0.6650246305418719, | |
| "grad_norm": 0.09451112896203995, | |
| "learning_rate": 6.103695504692122e-05, | |
| "loss": 0.8371, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.6711822660098522, | |
| "grad_norm": 0.10130809247493744, | |
| "learning_rate": 5.906440411843787e-05, | |
| "loss": 0.8807, | |
| "step": 545 | |
| }, | |
| { | |
| "epoch": 0.6773399014778325, | |
| "grad_norm": 0.09514950215816498, | |
| "learning_rate": 5.7110806208751655e-05, | |
| "loss": 0.8571, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6834975369458128, | |
| "grad_norm": 0.0906277671456337, | |
| "learning_rate": 5.5177065825958966e-05, | |
| "loss": 0.8376, | |
| "step": 555 | |
| }, | |
| { | |
| "epoch": 0.6896551724137931, | |
| "grad_norm": 0.08796288818120956, | |
| "learning_rate": 5.326407828419979e-05, | |
| "loss": 0.8283, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.6958128078817734, | |
| "grad_norm": 0.09175130724906921, | |
| "learning_rate": 5.137272928913097e-05, | |
| "loss": 0.8502, | |
| "step": 565 | |
| }, | |
| { | |
| "epoch": 0.7019704433497537, | |
| "grad_norm": 0.09584875404834747, | |
| "learning_rate": 4.9503894527847964e-05, | |
| "loss": 0.8398, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.708128078817734, | |
| "grad_norm": 0.09284964203834534, | |
| "learning_rate": 4.7658439263445e-05, | |
| "loss": 0.8328, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.09421636164188385, | |
| "learning_rate": 4.583721793440188e-05, | |
| "loss": 0.8395, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.7204433497536946, | |
| "grad_norm": 0.1052021011710167, | |
| "learning_rate": 4.4041073758982335e-05, | |
| "loss": 0.8579, | |
| "step": 585 | |
| }, | |
| { | |
| "epoch": 0.7266009852216748, | |
| "grad_norm": 0.09751743078231812, | |
| "learning_rate": 4.227083834482728e-05, | |
| "loss": 0.8348, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.7327586206896551, | |
| "grad_norm": 0.09555874764919281, | |
| "learning_rate": 4.052733130392367e-05, | |
| "loss": 0.8386, | |
| "step": 595 | |
| }, | |
| { | |
| "epoch": 0.7389162561576355, | |
| "grad_norm": 0.09556646645069122, | |
| "learning_rate": 3.881135987312757e-05, | |
| "loss": 0.8858, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7450738916256158, | |
| "grad_norm": 0.0967402383685112, | |
| "learning_rate": 3.712371854041654e-05, | |
| "loss": 0.8306, | |
| "step": 605 | |
| }, | |
| { | |
| "epoch": 0.7512315270935961, | |
| "grad_norm": 0.1028241440653801, | |
| "learning_rate": 3.546518867704499e-05, | |
| "loss": 0.8657, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.7573891625615764, | |
| "grad_norm": 0.09105245769023895, | |
| "learning_rate": 3.383653817577216e-05, | |
| "loss": 0.8398, | |
| "step": 615 | |
| }, | |
| { | |
| "epoch": 0.7635467980295566, | |
| "grad_norm": 0.09858682006597519, | |
| "learning_rate": 3.223852109533112e-05, | |
| "loss": 0.8218, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.7697044334975369, | |
| "grad_norm": 0.09542707353830338, | |
| "learning_rate": 3.0671877311302244e-05, | |
| "loss": 0.8243, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.7758620689655172, | |
| "grad_norm": 0.09767568111419678, | |
| "learning_rate": 2.9137332173554043e-05, | |
| "loss": 0.8449, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.7820197044334976, | |
| "grad_norm": 0.10336579382419586, | |
| "learning_rate": 2.763559617040876e-05, | |
| "loss": 0.8441, | |
| "step": 635 | |
| }, | |
| { | |
| "epoch": 0.7881773399014779, | |
| "grad_norm": 0.09993448853492737, | |
| "learning_rate": 2.616736459968936e-05, | |
| "loss": 0.8388, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.7943349753694581, | |
| "grad_norm": 0.09298530966043472, | |
| "learning_rate": 2.473331724679917e-05, | |
| "loss": 0.8385, | |
| "step": 645 | |
| }, | |
| { | |
| "epoch": 0.8004926108374384, | |
| "grad_norm": 0.09422852843999863, | |
| "learning_rate": 2.33341180699841e-05, | |
| "loss": 0.8178, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.8066502463054187, | |
| "grad_norm": 0.09662608057260513, | |
| "learning_rate": 2.1970414892922442e-05, | |
| "loss": 0.8483, | |
| "step": 655 | |
| }, | |
| { | |
| "epoch": 0.812807881773399, | |
| "grad_norm": 0.09834180027246475, | |
| "learning_rate": 2.0642839104785272e-05, | |
| "loss": 0.8452, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.8189655172413793, | |
| "grad_norm": 0.12449537217617035, | |
| "learning_rate": 1.9352005367905536e-05, | |
| "loss": 0.8379, | |
| "step": 665 | |
| }, | |
| { | |
| "epoch": 0.8251231527093597, | |
| "grad_norm": 0.10711503773927689, | |
| "learning_rate": 1.8098511333192024e-05, | |
| "loss": 0.8604, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.8312807881773399, | |
| "grad_norm": 0.09627261012792587, | |
| "learning_rate": 1.6882937363419203e-05, | |
| "loss": 0.8234, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.8374384236453202, | |
| "grad_norm": 0.10317688435316086, | |
| "learning_rate": 1.570584626452173e-05, | |
| "loss": 0.8325, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.8435960591133005, | |
| "grad_norm": 0.10015459358692169, | |
| "learning_rate": 1.4567783025017301e-05, | |
| "loss": 0.8418, | |
| "step": 685 | |
| }, | |
| { | |
| "epoch": 0.8497536945812808, | |
| "grad_norm": 0.09650367498397827, | |
| "learning_rate": 1.3469274563679402e-05, | |
| "loss": 0.8248, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.8559113300492611, | |
| "grad_norm": 0.10634295642375946, | |
| "learning_rate": 1.2410829485575704e-05, | |
| "loss": 0.8538, | |
| "step": 695 | |
| }, | |
| { | |
| "epoch": 0.8620689655172413, | |
| "grad_norm": 0.09615869075059891, | |
| "learning_rate": 1.1392937846586215e-05, | |
| "loss": 0.8669, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8682266009852216, | |
| "grad_norm": 0.09273919463157654, | |
| "learning_rate": 1.0416070926509113e-05, | |
| "loss": 0.8374, | |
| "step": 705 | |
| }, | |
| { | |
| "epoch": 0.874384236453202, | |
| "grad_norm": 0.10093870013952255, | |
| "learning_rate": 9.48068101086026e-06, | |
| "loss": 0.8638, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.8805418719211823, | |
| "grad_norm": 0.09216069430112839, | |
| "learning_rate": 8.58720118146662e-06, | |
| "loss": 0.8302, | |
| "step": 715 | |
| }, | |
| { | |
| "epoch": 0.8866995073891626, | |
| "grad_norm": 0.09972187131643295, | |
| "learning_rate": 7.736045115951251e-06, | |
| "loss": 0.8679, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.8928571428571429, | |
| "grad_norm": 0.09708079695701599, | |
| "learning_rate": 6.927606896202066e-06, | |
| "loss": 0.8348, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.8990147783251231, | |
| "grad_norm": 0.09960056841373444, | |
| "learning_rate": 6.16226082591359e-06, | |
| "loss": 0.8403, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.9051724137931034, | |
| "grad_norm": 0.09734474867582321, | |
| "learning_rate": 5.440361257285742e-06, | |
| "loss": 0.8567, | |
| "step": 735 | |
| }, | |
| { | |
| "epoch": 0.9113300492610837, | |
| "grad_norm": 0.0940781757235527, | |
| "learning_rate": 4.762242426960262e-06, | |
| "loss": 0.8456, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.9174876847290641, | |
| "grad_norm": 0.09184621274471283, | |
| "learning_rate": 4.128218301270359e-06, | |
| "loss": 0.8516, | |
| "step": 745 | |
| }, | |
| { | |
| "epoch": 0.9236453201970444, | |
| "grad_norm": 0.095575712621212, | |
| "learning_rate": 3.5385824308756587e-06, | |
| "loss": 0.8433, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.9298029556650246, | |
| "grad_norm": 0.09812497347593307, | |
| "learning_rate": 2.9936078148492973e-06, | |
| "loss": 0.865, | |
| "step": 755 | |
| }, | |
| { | |
| "epoch": 0.9359605911330049, | |
| "grad_norm": 0.09702542424201965, | |
| "learning_rate": 2.493546774280531e-06, | |
| "loss": 0.8249, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.9421182266009852, | |
| "grad_norm": 0.09994158893823624, | |
| "learning_rate": 2.0386308354509942e-06, | |
| "loss": 0.8355, | |
| "step": 765 | |
| }, | |
| { | |
| "epoch": 0.9482758620689655, | |
| "grad_norm": 0.09425787627696991, | |
| "learning_rate": 1.6290706226390285e-06, | |
| "loss": 0.8094, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.9544334975369458, | |
| "grad_norm": 0.10560785979032516, | |
| "learning_rate": 1.2650557606013635e-06, | |
| "loss": 0.8512, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.9605911330049262, | |
| "grad_norm": 0.09790593385696411, | |
| "learning_rate": 9.46754786777726e-07, | |
| "loss": 0.8378, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.9667487684729064, | |
| "grad_norm": 0.09701854735612869, | |
| "learning_rate": 6.74315073258569e-07, | |
| "loss": 0.8201, | |
| "step": 785 | |
| }, | |
| { | |
| "epoch": 0.9729064039408867, | |
| "grad_norm": 0.0912141278386116, | |
| "learning_rate": 4.4786275855247527e-07, | |
| "loss": 0.8304, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.979064039408867, | |
| "grad_norm": 0.10074811428785324, | |
| "learning_rate": 2.675026891844512e-07, | |
| "loss": 0.8649, | |
| "step": 795 | |
| }, | |
| { | |
| "epoch": 0.9852216748768473, | |
| "grad_norm": 0.09760763496160507, | |
| "learning_rate": 1.333183711524133e-07, | |
| "loss": 0.8527, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.9913793103448276, | |
| "grad_norm": 0.10889925807714462, | |
| "learning_rate": 4.5371931264270864e-08, | |
| "loss": 0.8688, | |
| "step": 805 | |
| }, | |
| { | |
| "epoch": 0.9975369458128078, | |
| "grad_norm": 0.0987965315580368, | |
| "learning_rate": 3.7040883734462683e-09, | |
| "loss": 0.842, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 1.241860270500183, | |
| "eval_runtime": 133.9183, | |
| "eval_samples_per_second": 8.625, | |
| "eval_steps_per_second": 0.545, | |
| "step": 812 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 812, | |
| "total_flos": 2.0167297520684237e+18, | |
| "train_loss": 0.8562019185479639, | |
| "train_runtime": 32009.8427, | |
| "train_samples_per_second": 3.247, | |
| "train_steps_per_second": 0.025 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 812, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 25, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.0167297520684237e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |