| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.25, | |
| "eval_steps": 100.0, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0025, | |
| "grad_norm": 0.09379793703556061, | |
| "learning_rate": 5.999999999999999e-06, | |
| "loss": 0.6799, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.005, | |
| "grad_norm": 0.1399833709001541, | |
| "learning_rate": 1.3499999999999998e-05, | |
| "loss": 0.6954, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.0075, | |
| "grad_norm": 0.08632303029298782, | |
| "learning_rate": 2.1e-05, | |
| "loss": 0.6921, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "grad_norm": 0.10006701201200485, | |
| "learning_rate": 2.8499999999999998e-05, | |
| "loss": 0.69, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.0125, | |
| "grad_norm": 0.07633858919143677, | |
| "learning_rate": 3.5999999999999994e-05, | |
| "loss": 0.6722, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.015, | |
| "grad_norm": 0.09399061650037766, | |
| "learning_rate": 4.3499999999999993e-05, | |
| "loss": 0.6453, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.0175, | |
| "grad_norm": 0.0843738541007042, | |
| "learning_rate": 5.1e-05, | |
| "loss": 0.6276, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 0.08583351224660873, | |
| "learning_rate": 5.85e-05, | |
| "loss": 0.58, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0225, | |
| "grad_norm": 0.09571370482444763, | |
| "learning_rate": 6.599999999999999e-05, | |
| "loss": 0.6355, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.025, | |
| "grad_norm": 0.1083935871720314, | |
| "learning_rate": 7.35e-05, | |
| "loss": 0.589, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.0275, | |
| "grad_norm": 0.10387319326400757, | |
| "learning_rate": 8.1e-05, | |
| "loss": 0.6061, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "grad_norm": 0.11083361506462097, | |
| "learning_rate": 8.849999999999998e-05, | |
| "loss": 0.572, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.0325, | |
| "grad_norm": 0.12665686011314392, | |
| "learning_rate": 9.599999999999999e-05, | |
| "loss": 0.5442, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.035, | |
| "grad_norm": 0.1308053582906723, | |
| "learning_rate": 0.00010349999999999998, | |
| "loss": 0.6524, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.0375, | |
| "grad_norm": 0.13535510003566742, | |
| "learning_rate": 0.00011099999999999999, | |
| "loss": 0.6404, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 0.12833671271800995, | |
| "learning_rate": 0.0001185, | |
| "loss": 0.5717, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.0425, | |
| "grad_norm": 0.11962099373340607, | |
| "learning_rate": 0.00012599999999999997, | |
| "loss": 0.6098, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.045, | |
| "grad_norm": 0.13898271322250366, | |
| "learning_rate": 0.0001335, | |
| "loss": 0.6099, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.0475, | |
| "grad_norm": 0.14486610889434814, | |
| "learning_rate": 0.00014099999999999998, | |
| "loss": 0.5744, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.1432138830423355, | |
| "learning_rate": 0.00014849999999999998, | |
| "loss": 0.5659, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.0525, | |
| "grad_norm": 0.13487878441810608, | |
| "learning_rate": 0.000156, | |
| "loss": 0.5622, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.055, | |
| "grad_norm": 0.12495309859514236, | |
| "learning_rate": 0.0001635, | |
| "loss": 0.5951, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.0575, | |
| "grad_norm": 0.13011734187602997, | |
| "learning_rate": 0.00017099999999999998, | |
| "loss": 0.6249, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 0.13987745344638824, | |
| "learning_rate": 0.00017849999999999997, | |
| "loss": 0.559, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.0625, | |
| "grad_norm": 0.13373605906963348, | |
| "learning_rate": 0.000186, | |
| "loss": 0.5475, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.065, | |
| "grad_norm": 0.12433867901563644, | |
| "learning_rate": 0.0001935, | |
| "loss": 0.5274, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.0675, | |
| "grad_norm": 0.11097615957260132, | |
| "learning_rate": 0.000201, | |
| "loss": 0.678, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 0.1155027225613594, | |
| "learning_rate": 0.00020849999999999997, | |
| "loss": 0.5611, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.0725, | |
| "grad_norm": 0.11431068181991577, | |
| "learning_rate": 0.00021599999999999996, | |
| "loss": 0.6054, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.075, | |
| "grad_norm": 0.09796140342950821, | |
| "learning_rate": 0.00022349999999999998, | |
| "loss": 0.5472, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.0775, | |
| "grad_norm": 0.09489257633686066, | |
| "learning_rate": 0.00023099999999999998, | |
| "loss": 0.4636, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 0.10787788033485413, | |
| "learning_rate": 0.0002385, | |
| "loss": 0.6164, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.0825, | |
| "grad_norm": 0.10261733084917068, | |
| "learning_rate": 0.00024599999999999996, | |
| "loss": 0.5408, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.085, | |
| "grad_norm": 0.11870352178812027, | |
| "learning_rate": 0.0002535, | |
| "loss": 0.5268, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.0875, | |
| "grad_norm": 0.11910569667816162, | |
| "learning_rate": 0.000261, | |
| "loss": 0.5461, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.10083702206611633, | |
| "learning_rate": 0.00026849999999999997, | |
| "loss": 0.4794, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.0925, | |
| "grad_norm": 0.10453511029481888, | |
| "learning_rate": 0.000276, | |
| "loss": 0.5539, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.095, | |
| "grad_norm": 0.101403146982193, | |
| "learning_rate": 0.00028349999999999995, | |
| "loss": 0.5346, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.0975, | |
| "grad_norm": 0.10724789649248123, | |
| "learning_rate": 0.00029099999999999997, | |
| "loss": 0.6026, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 0.1140277311205864, | |
| "learning_rate": 0.0002985, | |
| "loss": 0.5193, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.1025, | |
| "grad_norm": 0.09706108272075653, | |
| "learning_rate": 0.0002999963446058092, | |
| "loss": 0.54, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.105, | |
| "grad_norm": 0.10003062337636948, | |
| "learning_rate": 0.0002999814948722491, | |
| "loss": 0.5365, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.1075, | |
| "grad_norm": 0.1078687533736229, | |
| "learning_rate": 0.00029995522346717746, | |
| "loss": 0.5889, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 0.10538115352392197, | |
| "learning_rate": 0.0002999175323912636, | |
| "loss": 0.5611, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.1125, | |
| "grad_norm": 0.1020808294415474, | |
| "learning_rate": 0.00029986842451482874, | |
| "loss": 0.6103, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.115, | |
| "grad_norm": 0.09635835886001587, | |
| "learning_rate": 0.0002998079035776279, | |
| "loss": 0.5229, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.1175, | |
| "grad_norm": 0.10287190228700638, | |
| "learning_rate": 0.0002997359741885648, | |
| "loss": 0.5312, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 0.09160075336694717, | |
| "learning_rate": 0.0002996526418253408, | |
| "loss": 0.5673, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.1225, | |
| "grad_norm": 0.08691006153821945, | |
| "learning_rate": 0.000299557912834038, | |
| "loss": 0.5326, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.125, | |
| "grad_norm": 0.10096988826990128, | |
| "learning_rate": 0.00029945179442863594, | |
| "loss": 0.6004, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.1275, | |
| "grad_norm": 0.09594204276800156, | |
| "learning_rate": 0.000299334294690462, | |
| "loss": 0.5516, | |
| "step": 255 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "grad_norm": 0.10281919687986374, | |
| "learning_rate": 0.00029920542256757607, | |
| "loss": 0.5515, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.1325, | |
| "grad_norm": 0.08547840267419815, | |
| "learning_rate": 0.00029906518787408944, | |
| "loss": 0.5243, | |
| "step": 265 | |
| }, | |
| { | |
| "epoch": 0.135, | |
| "grad_norm": 0.10161560773849487, | |
| "learning_rate": 0.0002989136012894168, | |
| "loss": 0.5096, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.1375, | |
| "grad_norm": 0.09101904183626175, | |
| "learning_rate": 0.0002987506743574635, | |
| "loss": 0.553, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.09769442677497864, | |
| "learning_rate": 0.0002985764194857463, | |
| "loss": 0.4953, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.1425, | |
| "grad_norm": 0.10991579294204712, | |
| "learning_rate": 0.00029839084994444826, | |
| "loss": 0.5152, | |
| "step": 285 | |
| }, | |
| { | |
| "epoch": 0.145, | |
| "grad_norm": 0.09450916200876236, | |
| "learning_rate": 0.00029819397986540836, | |
| "loss": 0.5397, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.1475, | |
| "grad_norm": 0.10876069217920303, | |
| "learning_rate": 0.0002979858242410454, | |
| "loss": 0.4858, | |
| "step": 295 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 0.097995825111866, | |
| "learning_rate": 0.00029776639892321606, | |
| "loss": 0.5566, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.1525, | |
| "grad_norm": 0.1145048514008522, | |
| "learning_rate": 0.0002975357206220079, | |
| "loss": 0.4531, | |
| "step": 305 | |
| }, | |
| { | |
| "epoch": 0.155, | |
| "grad_norm": 0.10271880775690079, | |
| "learning_rate": 0.00029729380690446654, | |
| "loss": 0.5199, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.1575, | |
| "grad_norm": 0.11095371842384338, | |
| "learning_rate": 0.0002970406761932583, | |
| "loss": 0.5416, | |
| "step": 315 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.09949438273906708, | |
| "learning_rate": 0.00029677634776526673, | |
| "loss": 0.4841, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.1625, | |
| "grad_norm": 0.1163724958896637, | |
| "learning_rate": 0.00029650084175012517, | |
| "loss": 0.4913, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.165, | |
| "grad_norm": 0.10726840049028397, | |
| "learning_rate": 0.00029621417912868323, | |
| "loss": 0.5203, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.1675, | |
| "grad_norm": 0.09609931707382202, | |
| "learning_rate": 0.00029591638173140947, | |
| "loss": 0.5607, | |
| "step": 335 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 0.10824442654848099, | |
| "learning_rate": 0.0002956074722367286, | |
| "loss": 0.6004, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.1725, | |
| "grad_norm": 0.10465679317712784, | |
| "learning_rate": 0.00029528747416929463, | |
| "loss": 0.5216, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 0.175, | |
| "grad_norm": 0.10518354922533035, | |
| "learning_rate": 0.0002949564118981994, | |
| "loss": 0.499, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.1775, | |
| "grad_norm": 0.0955279991030693, | |
| "learning_rate": 0.0002946143106351165, | |
| "loss": 0.5607, | |
| "step": 355 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.11159654706716537, | |
| "learning_rate": 0.0002942611964323817, | |
| "loss": 0.5204, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.1825, | |
| "grad_norm": 0.09571187198162079, | |
| "learning_rate": 0.0002938970961810086, | |
| "loss": 0.6113, | |
| "step": 365 | |
| }, | |
| { | |
| "epoch": 0.185, | |
| "grad_norm": 0.11854679882526398, | |
| "learning_rate": 0.0002935220376086411, | |
| "loss": 0.5639, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.1875, | |
| "grad_norm": 0.1050512045621872, | |
| "learning_rate": 0.0002931360492774415, | |
| "loss": 0.548, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 0.1053968220949173, | |
| "learning_rate": 0.0002927391605819157, | |
| "loss": 0.5507, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.1925, | |
| "grad_norm": 0.10567320138216019, | |
| "learning_rate": 0.00029233140174667445, | |
| "loss": 0.5312, | |
| "step": 385 | |
| }, | |
| { | |
| "epoch": 0.195, | |
| "grad_norm": 0.11914283782243729, | |
| "learning_rate": 0.0002919128038241318, | |
| "loss": 0.5961, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.1975, | |
| "grad_norm": 0.09915795922279358, | |
| "learning_rate": 0.0002914833986921401, | |
| "loss": 0.5086, | |
| "step": 395 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 0.10796502232551575, | |
| "learning_rate": 0.0002910432190515628, | |
| "loss": 0.5585, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2025, | |
| "grad_norm": 0.10748997330665588, | |
| "learning_rate": 0.00029059229842378373, | |
| "loss": 0.5466, | |
| "step": 405 | |
| }, | |
| { | |
| "epoch": 0.205, | |
| "grad_norm": 0.10696308314800262, | |
| "learning_rate": 0.0002901306711481544, | |
| "loss": 0.5513, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.2075, | |
| "grad_norm": 0.10418657958507538, | |
| "learning_rate": 0.0002896583723793792, | |
| "loss": 0.5391, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.16421550512313843, | |
| "learning_rate": 0.00028917543808483796, | |
| "loss": 0.4699, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.2125, | |
| "grad_norm": 0.12929962575435638, | |
| "learning_rate": 0.00028868190504184696, | |
| "loss": 0.4984, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.215, | |
| "grad_norm": 0.10469454526901245, | |
| "learning_rate": 0.00028817781083485816, | |
| "loss": 0.5119, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.2175, | |
| "grad_norm": 0.0964970663189888, | |
| "learning_rate": 0.00028766319385259713, | |
| "loss": 0.5167, | |
| "step": 435 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 0.12395574152469635, | |
| "learning_rate": 0.00028713809328513953, | |
| "loss": 0.5692, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.2225, | |
| "grad_norm": 0.10189738124608994, | |
| "learning_rate": 0.0002866025491209265, | |
| "loss": 0.4628, | |
| "step": 445 | |
| }, | |
| { | |
| "epoch": 0.225, | |
| "grad_norm": 0.10433454066514969, | |
| "learning_rate": 0.0002860566021437197, | |
| "loss": 0.4869, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.2275, | |
| "grad_norm": 0.13003456592559814, | |
| "learning_rate": 0.0002855002939294951, | |
| "loss": 0.5291, | |
| "step": 455 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.11692202836275101, | |
| "learning_rate": 0.000284933666843277, | |
| "loss": 0.5229, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.2325, | |
| "grad_norm": 0.10757846385240555, | |
| "learning_rate": 0.0002843567640359119, | |
| "loss": 0.435, | |
| "step": 465 | |
| }, | |
| { | |
| "epoch": 0.235, | |
| "grad_norm": 0.10775501281023026, | |
| "learning_rate": 0.00028376962944078206, | |
| "loss": 0.4418, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.2375, | |
| "grad_norm": 0.11543692648410797, | |
| "learning_rate": 0.00028317230777046015, | |
| "loss": 0.4204, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 0.10946698486804962, | |
| "learning_rate": 0.00028256484451330403, | |
| "loss": 0.49, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.2425, | |
| "grad_norm": 0.11528221517801285, | |
| "learning_rate": 0.00028194728592999247, | |
| "loss": 0.4752, | |
| "step": 485 | |
| }, | |
| { | |
| "epoch": 0.245, | |
| "grad_norm": 0.10474205762147903, | |
| "learning_rate": 0.0002813196790500027, | |
| "loss": 0.4847, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.2475, | |
| "grad_norm": 0.10768820345401764, | |
| "learning_rate": 0.00028068207166802837, | |
| "loss": 0.4664, | |
| "step": 495 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.12158560007810593, | |
| "learning_rate": 0.00028003451234034037, | |
| "loss": 0.4741, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 5, | |
| "max_steps": 2000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 9223372036854775807, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.57394539429888e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |