| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 30, | |
| "global_step": 1209, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.024829298572315334, | |
| "grad_norm": 2.199962615966797, | |
| "learning_rate": 1.487603305785124e-05, | |
| "loss": 1.2101, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04965859714463067, | |
| "grad_norm": 0.5984658598899841, | |
| "learning_rate": 3.1404958677685955e-05, | |
| "loss": 0.6936, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.074487895716946, | |
| "grad_norm": 0.6005842089653015, | |
| "learning_rate": 4.793388429752066e-05, | |
| "loss": 0.5252, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.074487895716946, | |
| "eval_loss": 0.4523814916610718, | |
| "eval_runtime": 63.8486, | |
| "eval_samples_per_second": 2.663, | |
| "eval_steps_per_second": 1.331, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.09931719428926133, | |
| "grad_norm": 0.5528084635734558, | |
| "learning_rate": 6.446280991735537e-05, | |
| "loss": 0.3847, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.12414649286157665, | |
| "grad_norm": 0.7472060322761536, | |
| "learning_rate": 8.099173553719009e-05, | |
| "loss": 0.3113, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.148975791433892, | |
| "grad_norm": 0.5087178349494934, | |
| "learning_rate": 9.75206611570248e-05, | |
| "loss": 0.2469, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.148975791433892, | |
| "eval_loss": 0.2289326786994934, | |
| "eval_runtime": 63.4197, | |
| "eval_samples_per_second": 2.681, | |
| "eval_steps_per_second": 1.34, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.17380509000620734, | |
| "grad_norm": 0.5471166372299194, | |
| "learning_rate": 0.0001140495867768595, | |
| "loss": 0.2153, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.19863438857852267, | |
| "grad_norm": 0.6101455688476562, | |
| "learning_rate": 0.00013057851239669423, | |
| "loss": 0.1896, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.22346368715083798, | |
| "grad_norm": 0.4914955198764801, | |
| "learning_rate": 0.00014710743801652894, | |
| "loss": 0.1668, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.22346368715083798, | |
| "eval_loss": 0.16067302227020264, | |
| "eval_runtime": 63.2227, | |
| "eval_samples_per_second": 2.689, | |
| "eval_steps_per_second": 1.344, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.2482929857231533, | |
| "grad_norm": 0.577325701713562, | |
| "learning_rate": 0.00016363636363636366, | |
| "loss": 0.1684, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.27312228429546864, | |
| "grad_norm": 0.670930027961731, | |
| "learning_rate": 0.00018016528925619835, | |
| "loss": 0.1403, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.297951582867784, | |
| "grad_norm": 0.3254905343055725, | |
| "learning_rate": 0.0001966942148760331, | |
| "loss": 0.1523, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.297951582867784, | |
| "eval_loss": 0.14328794181346893, | |
| "eval_runtime": 63.2048, | |
| "eval_samples_per_second": 2.69, | |
| "eval_steps_per_second": 1.345, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.3227808814400993, | |
| "grad_norm": 0.3248271644115448, | |
| "learning_rate": 0.00019997332081116373, | |
| "loss": 0.1476, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.34761018001241467, | |
| "grad_norm": 0.2937372028827667, | |
| "learning_rate": 0.00019986496100395275, | |
| "loss": 0.1527, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.37243947858473, | |
| "grad_norm": 0.2765355408191681, | |
| "learning_rate": 0.000199673343399533, | |
| "loss": 0.1408, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.37243947858473, | |
| "eval_loss": 0.1354692131280899, | |
| "eval_runtime": 63.3386, | |
| "eval_samples_per_second": 2.684, | |
| "eval_steps_per_second": 1.342, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.39726877715704534, | |
| "grad_norm": 0.3119029998779297, | |
| "learning_rate": 0.00019939862775022893, | |
| "loss": 0.1447, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.42209807572936064, | |
| "grad_norm": 0.40628781914711, | |
| "learning_rate": 0.0001990410430875205, | |
| "loss": 0.1394, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.44692737430167595, | |
| "grad_norm": 0.3151319622993469, | |
| "learning_rate": 0.00019860088753109896, | |
| "loss": 0.1353, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.44692737430167595, | |
| "eval_loss": 0.13339394330978394, | |
| "eval_runtime": 63.468, | |
| "eval_samples_per_second": 2.679, | |
| "eval_steps_per_second": 1.339, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4717566728739913, | |
| "grad_norm": 0.2475433200597763, | |
| "learning_rate": 0.00019807852804032305, | |
| "loss": 0.1333, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.4965859714463066, | |
| "grad_norm": 0.23864243924617767, | |
| "learning_rate": 0.00019747440010828383, | |
| "loss": 0.1266, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.521415270018622, | |
| "grad_norm": 0.45053115487098694, | |
| "learning_rate": 0.00019678900739873226, | |
| "loss": 0.1358, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.521415270018622, | |
| "eval_loss": 0.12399247288703918, | |
| "eval_runtime": 63.2719, | |
| "eval_samples_per_second": 2.687, | |
| "eval_steps_per_second": 1.343, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.5462445685909373, | |
| "grad_norm": 0.2610243856906891, | |
| "learning_rate": 0.000196022921326173, | |
| "loss": 0.1308, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.5710738671632526, | |
| "grad_norm": 0.24870441854000092, | |
| "learning_rate": 0.00019517678057947384, | |
| "loss": 0.1341, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.595903165735568, | |
| "grad_norm": 0.21340686082839966, | |
| "learning_rate": 0.00019425129058938832, | |
| "loss": 0.134, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.595903165735568, | |
| "eval_loss": 0.12239952385425568, | |
| "eval_runtime": 63.3162, | |
| "eval_samples_per_second": 2.685, | |
| "eval_steps_per_second": 1.342, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.6207324643078833, | |
| "grad_norm": 0.26844245195388794, | |
| "learning_rate": 0.00019324722294043558, | |
| "loss": 0.1266, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.6455617628801986, | |
| "grad_norm": 0.1732800155878067, | |
| "learning_rate": 0.00019216541472762735, | |
| "loss": 0.1316, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.6703910614525139, | |
| "grad_norm": 0.15882068872451782, | |
| "learning_rate": 0.0001910067678585786, | |
| "loss": 0.1228, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6703910614525139, | |
| "eval_loss": 0.1194252222776413, | |
| "eval_runtime": 63.2871, | |
| "eval_samples_per_second": 2.686, | |
| "eval_steps_per_second": 1.343, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.6952203600248293, | |
| "grad_norm": 0.2936108708381653, | |
| "learning_rate": 0.0001897722483015838, | |
| "loss": 0.1161, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.7200496585971446, | |
| "grad_norm": 0.14567315578460693, | |
| "learning_rate": 0.00018846288528028555, | |
| "loss": 0.126, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.74487895716946, | |
| "grad_norm": 0.2370811104774475, | |
| "learning_rate": 0.0001870797704156067, | |
| "loss": 0.1283, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.74487895716946, | |
| "eval_loss": 0.1176798865199089, | |
| "eval_runtime": 63.3181, | |
| "eval_samples_per_second": 2.685, | |
| "eval_steps_per_second": 1.342, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.7697082557417753, | |
| "grad_norm": 0.16372860968112946, | |
| "learning_rate": 0.00018562405681566216, | |
| "loss": 0.1357, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.7945375543140907, | |
| "grad_norm": 0.35256749391555786, | |
| "learning_rate": 0.00018409695811440796, | |
| "loss": 0.128, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.819366852886406, | |
| "grad_norm": 0.16305528581142426, | |
| "learning_rate": 0.00018249974745983023, | |
| "loss": 0.1224, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.819366852886406, | |
| "eval_loss": 0.11605502665042877, | |
| "eval_runtime": 63.296, | |
| "eval_samples_per_second": 2.686, | |
| "eval_steps_per_second": 1.343, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.8441961514587213, | |
| "grad_norm": 0.14337457716464996, | |
| "learning_rate": 0.00018083375645251684, | |
| "loss": 0.1287, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.8690254500310366, | |
| "grad_norm": 0.23693013191223145, | |
| "learning_rate": 0.00017910037403549693, | |
| "loss": 0.1298, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.8938547486033519, | |
| "grad_norm": 0.11969780921936035, | |
| "learning_rate": 0.0001773010453362737, | |
| "loss": 0.1175, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.8938547486033519, | |
| "eval_loss": 0.11470940709114075, | |
| "eval_runtime": 63.3643, | |
| "eval_samples_per_second": 2.683, | |
| "eval_steps_per_second": 1.341, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.9186840471756673, | |
| "grad_norm": 0.11961532384157181, | |
| "learning_rate": 0.0001754372704620164, | |
| "loss": 0.1299, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.9435133457479826, | |
| "grad_norm": 0.14891605079174042, | |
| "learning_rate": 0.00017351060324891502, | |
| "loss": 0.1266, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.9683426443202979, | |
| "grad_norm": 0.12348289042711258, | |
| "learning_rate": 0.00017152264996674136, | |
| "loss": 0.1283, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9683426443202979, | |
| "eval_loss": 0.11494524031877518, | |
| "eval_runtime": 63.3601, | |
| "eval_samples_per_second": 2.683, | |
| "eval_steps_per_second": 1.342, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.9931719428926132, | |
| "grad_norm": 0.21074527502059937, | |
| "learning_rate": 0.00016947506797969562, | |
| "loss": 0.114, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 1.0173805090006207, | |
| "grad_norm": 0.2929309606552124, | |
| "learning_rate": 0.00016736956436465573, | |
| "loss": 0.1253, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 1.042209807572936, | |
| "grad_norm": 0.15592433512210846, | |
| "learning_rate": 0.00016520789448798087, | |
| "loss": 0.1125, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.042209807572936, | |
| "eval_loss": 0.1184433102607727, | |
| "eval_runtime": 63.5387, | |
| "eval_samples_per_second": 2.676, | |
| "eval_steps_per_second": 1.338, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 1.0670391061452513, | |
| "grad_norm": 0.22566121816635132, | |
| "learning_rate": 0.00016299186054205577, | |
| "loss": 0.1208, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 1.0918684047175666, | |
| "grad_norm": 0.22597947716712952, | |
| "learning_rate": 0.00016072331004279614, | |
| "loss": 0.1073, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 1.1166977032898822, | |
| "grad_norm": 0.2692144811153412, | |
| "learning_rate": 0.00015840413428936767, | |
| "loss": 0.111, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.1166977032898822, | |
| "eval_loss": 0.11594453454017639, | |
| "eval_runtime": 63.5354, | |
| "eval_samples_per_second": 2.676, | |
| "eval_steps_per_second": 1.338, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 1.1415270018621975, | |
| "grad_norm": 0.11959823220968246, | |
| "learning_rate": 0.00015603626678740263, | |
| "loss": 0.1177, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 1.1663563004345128, | |
| "grad_norm": 0.16115300357341766, | |
| "learning_rate": 0.000153621681637029, | |
| "loss": 0.1187, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.191185599006828, | |
| "grad_norm": 0.12221992760896683, | |
| "learning_rate": 0.00015116239188705556, | |
| "loss": 0.1208, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.191185599006828, | |
| "eval_loss": 0.11457835137844086, | |
| "eval_runtime": 63.6214, | |
| "eval_samples_per_second": 2.672, | |
| "eval_steps_per_second": 1.336, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.2160148975791434, | |
| "grad_norm": 0.14025907218456268, | |
| "learning_rate": 0.00014866044785668563, | |
| "loss": 0.1252, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.2408441961514587, | |
| "grad_norm": 0.2606123387813568, | |
| "learning_rate": 0.00014611793542615803, | |
| "loss": 0.1177, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.265673494723774, | |
| "grad_norm": 0.1650104820728302, | |
| "learning_rate": 0.00014353697429774084, | |
| "loss": 0.1182, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.265673494723774, | |
| "eval_loss": 0.11401313543319702, | |
| "eval_runtime": 63.6832, | |
| "eval_samples_per_second": 2.669, | |
| "eval_steps_per_second": 1.335, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.2905027932960893, | |
| "grad_norm": 0.2529577910900116, | |
| "learning_rate": 0.0001409197162285275, | |
| "loss": 0.1136, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.3153320918684046, | |
| "grad_norm": 0.10269193351268768, | |
| "learning_rate": 0.000138268343236509, | |
| "loss": 0.1163, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.34016139044072, | |
| "grad_norm": 0.13571563363075256, | |
| "learning_rate": 0.00013558506578141682, | |
| "loss": 0.1251, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.34016139044072, | |
| "eval_loss": 0.1113414540886879, | |
| "eval_runtime": 63.575, | |
| "eval_samples_per_second": 2.674, | |
| "eval_steps_per_second": 1.337, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.3649906890130354, | |
| "grad_norm": 0.19955532252788544, | |
| "learning_rate": 0.00013287212092185464, | |
| "loss": 0.1091, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.3898199875853507, | |
| "grad_norm": 0.134247288107872, | |
| "learning_rate": 0.00013013177045025374, | |
| "loss": 0.1168, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.414649286157666, | |
| "grad_norm": 0.18950553238391876, | |
| "learning_rate": 0.0001273662990072083, | |
| "loss": 0.1226, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.414649286157666, | |
| "eval_loss": 0.11281875520944595, | |
| "eval_runtime": 63.627, | |
| "eval_samples_per_second": 2.672, | |
| "eval_steps_per_second": 1.336, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.4394785847299814, | |
| "grad_norm": 0.10673321038484573, | |
| "learning_rate": 0.00012457801217676182, | |
| "loss": 0.1217, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.4643078833022967, | |
| "grad_norm": 0.13143132627010345, | |
| "learning_rate": 0.00012176923456423284, | |
| "loss": 0.1128, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.489137181874612, | |
| "grad_norm": 0.14993837475776672, | |
| "learning_rate": 0.00011894230785818284, | |
| "loss": 0.1151, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.489137181874612, | |
| "eval_loss": 0.10988790541887283, | |
| "eval_runtime": 63.6526, | |
| "eval_samples_per_second": 2.671, | |
| "eval_steps_per_second": 1.335, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.5139664804469275, | |
| "grad_norm": 0.11081644892692566, | |
| "learning_rate": 0.00011609958887814129, | |
| "loss": 0.1224, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.5387957790192428, | |
| "grad_norm": 0.08948010951280594, | |
| "learning_rate": 0.00011324344760971671, | |
| "loss": 0.1182, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.563625077591558, | |
| "grad_norm": 0.11353158205747604, | |
| "learning_rate": 0.00011037626522873019, | |
| "loss": 0.1288, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.563625077591558, | |
| "eval_loss": 0.10913822054862976, | |
| "eval_runtime": 63.4574, | |
| "eval_samples_per_second": 2.679, | |
| "eval_steps_per_second": 1.339, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.5884543761638734, | |
| "grad_norm": 0.1256856471300125, | |
| "learning_rate": 0.00010750043211602045, | |
| "loss": 0.115, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.6132836747361887, | |
| "grad_norm": 0.13873563706874847, | |
| "learning_rate": 0.00010461834586457398, | |
| "loss": 0.1165, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.638112973308504, | |
| "grad_norm": 0.08335566520690918, | |
| "learning_rate": 0.00010173240928064285, | |
| "loss": 0.0987, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.638112973308504, | |
| "eval_loss": 0.10997913777828217, | |
| "eval_runtime": 63.5501, | |
| "eval_samples_per_second": 2.675, | |
| "eval_steps_per_second": 1.338, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.6629422718808193, | |
| "grad_norm": 0.0818580761551857, | |
| "learning_rate": 9.884502838051595e-05, | |
| "loss": 0.1156, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.6877715704531346, | |
| "grad_norm": 0.2908185124397278, | |
| "learning_rate": 9.595861038461398e-05, | |
| "loss": 0.1141, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.71260086902545, | |
| "grad_norm": 0.12958967685699463, | |
| "learning_rate": 9.307556171058085e-05, | |
| "loss": 0.1173, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.71260086902545, | |
| "eval_loss": 0.10946697741746902, | |
| "eval_runtime": 63.6108, | |
| "eval_samples_per_second": 2.673, | |
| "eval_steps_per_second": 1.336, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.7374301675977653, | |
| "grad_norm": 0.0967235118150711, | |
| "learning_rate": 9.019828596704394e-05, | |
| "loss": 0.1213, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.7622594661700806, | |
| "grad_norm": 0.12068717181682587, | |
| "learning_rate": 8.732918194971664e-05, | |
| "loss": 0.1138, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.7870887647423959, | |
| "grad_norm": 0.09991966933012009, | |
| "learning_rate": 8.447064164151304e-05, | |
| "loss": 0.1274, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.7870887647423959, | |
| "eval_loss": 0.10868000984191895, | |
| "eval_runtime": 63.549, | |
| "eval_samples_per_second": 2.675, | |
| "eval_steps_per_second": 1.338, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.8119180633147114, | |
| "grad_norm": 0.11946102976799011, | |
| "learning_rate": 8.162504821834295e-05, | |
| "loss": 0.1142, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.8367473618870267, | |
| "grad_norm": 0.09517239034175873, | |
| "learning_rate": 7.879477406224894e-05, | |
| "loss": 0.117, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.861576660459342, | |
| "grad_norm": 0.10799138993024826, | |
| "learning_rate": 7.598217878354237e-05, | |
| "loss": 0.1099, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.861576660459342, | |
| "eval_loss": 0.10897944122552872, | |
| "eval_runtime": 63.6832, | |
| "eval_samples_per_second": 2.669, | |
| "eval_steps_per_second": 1.335, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.8864059590316573, | |
| "grad_norm": 0.11809396743774414, | |
| "learning_rate": 7.318960725358741e-05, | |
| "loss": 0.1151, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.9112352576039728, | |
| "grad_norm": 0.08294913917779922, | |
| "learning_rate": 7.041938764987297e-05, | |
| "loss": 0.1075, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.9360645561762881, | |
| "grad_norm": 0.12175123393535614, | |
| "learning_rate": 6.767382951500204e-05, | |
| "loss": 0.1114, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.9360645561762881, | |
| "eval_loss": 0.1087631806731224, | |
| "eval_runtime": 63.5302, | |
| "eval_samples_per_second": 2.676, | |
| "eval_steps_per_second": 1.338, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.9608938547486034, | |
| "grad_norm": 0.10169629007577896, | |
| "learning_rate": 6.495522183121741e-05, | |
| "loss": 0.119, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.9857231533209188, | |
| "grad_norm": 0.10031325370073318, | |
| "learning_rate": 6.226583111206856e-05, | |
| "loss": 0.1147, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 2.009931719428926, | |
| "grad_norm": 0.09207529574632645, | |
| "learning_rate": 5.960789951281052e-05, | |
| "loss": 0.1054, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.009931719428926, | |
| "eval_loss": 0.10779497772455215, | |
| "eval_runtime": 63.789, | |
| "eval_samples_per_second": 2.665, | |
| "eval_steps_per_second": 1.333, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 2.0347610180012414, | |
| "grad_norm": 0.08689288049936295, | |
| "learning_rate": 5.698364296111056e-05, | |
| "loss": 0.1124, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 2.0595903165735567, | |
| "grad_norm": 0.11001532524824142, | |
| "learning_rate": 5.43952493096211e-05, | |
| "loss": 0.1041, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 2.084419615145872, | |
| "grad_norm": 0.08535470813512802, | |
| "learning_rate": 5.184487651195825e-05, | |
| "loss": 0.1123, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.084419615145872, | |
| "eval_loss": 0.10820275545120239, | |
| "eval_runtime": 63.5597, | |
| "eval_samples_per_second": 2.675, | |
| "eval_steps_per_second": 1.337, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 2.1092489137181873, | |
| "grad_norm": 0.09453848749399185, | |
| "learning_rate": 4.933465082360807e-05, | |
| "loss": 0.1191, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 2.1340782122905027, | |
| "grad_norm": 0.09359566867351532, | |
| "learning_rate": 4.686666502925908e-05, | |
| "loss": 0.1116, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 2.158907510862818, | |
| "grad_norm": 0.08426634222269058, | |
| "learning_rate": 4.444297669803981e-05, | |
| "loss": 0.111, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.158907510862818, | |
| "eval_loss": 0.10807512700557709, | |
| "eval_runtime": 63.5269, | |
| "eval_samples_per_second": 2.676, | |
| "eval_steps_per_second": 1.338, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 2.1837368094351333, | |
| "grad_norm": 0.12573853135108948, | |
| "learning_rate": 4.206560646811545e-05, | |
| "loss": 0.1177, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 2.2085661080074486, | |
| "grad_norm": 0.1112317442893982, | |
| "learning_rate": 3.973653636207437e-05, | |
| "loss": 0.099, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 2.2333954065797643, | |
| "grad_norm": 0.11728418618440628, | |
| "learning_rate": 3.745770813450824e-05, | |
| "loss": 0.1075, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.2333954065797643, | |
| "eval_loss": 0.10825176537036896, | |
| "eval_runtime": 63.6811, | |
| "eval_samples_per_second": 2.67, | |
| "eval_steps_per_second": 1.335, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 2.2582247051520796, | |
| "grad_norm": 0.09951410442590714, | |
| "learning_rate": 3.523102165316381e-05, | |
| "loss": 0.1065, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 2.283054003724395, | |
| "grad_norm": 0.10445831716060638, | |
| "learning_rate": 3.3058333315016065e-05, | |
| "loss": 0.1052, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 2.3078833022967102, | |
| "grad_norm": 0.13279516994953156, | |
| "learning_rate": 3.094145449858285e-05, | |
| "loss": 0.1101, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.3078833022967102, | |
| "eval_loss": 0.10792894661426544, | |
| "eval_runtime": 63.7332, | |
| "eval_samples_per_second": 2.667, | |
| "eval_steps_per_second": 1.334, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 2.3327126008690255, | |
| "grad_norm": 0.08349404484033585, | |
| "learning_rate": 2.8882150053771995e-05, | |
| "loss": 0.1031, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 2.357541899441341, | |
| "grad_norm": 0.11196437478065491, | |
| "learning_rate": 2.688213683051892e-05, | |
| "loss": 0.1151, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.382371198013656, | |
| "grad_norm": 0.1190405786037445, | |
| "learning_rate": 2.4943082247442585e-05, | |
| "loss": 0.1034, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.382371198013656, | |
| "eval_loss": 0.10848245024681091, | |
| "eval_runtime": 63.7471, | |
| "eval_samples_per_second": 2.667, | |
| "eval_steps_per_second": 1.333, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 2.4072004965859715, | |
| "grad_norm": 0.14327777922153473, | |
| "learning_rate": 2.3066602901712108e-05, | |
| "loss": 0.1093, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 2.4320297951582868, | |
| "grad_norm": 0.10319961607456207, | |
| "learning_rate": 2.1254263221283654e-05, | |
| "loss": 0.1027, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 2.456859093730602, | |
| "grad_norm": 0.11453425884246826, | |
| "learning_rate": 1.950757416063077e-05, | |
| "loss": 0.1041, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.456859093730602, | |
| "eval_loss": 0.10789740085601807, | |
| "eval_runtime": 63.7392, | |
| "eval_samples_per_second": 2.667, | |
| "eval_steps_per_second": 1.334, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 2.4816883923029174, | |
| "grad_norm": 0.13892674446105957, | |
| "learning_rate": 1.7827991941056177e-05, | |
| "loss": 0.1043, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 2.5065176908752327, | |
| "grad_norm": 0.11595698446035385, | |
| "learning_rate": 1.621691683663418e-05, | |
| "loss": 0.1154, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 2.531346989447548, | |
| "grad_norm": 0.10611863434314728, | |
| "learning_rate": 1.4675692006797137e-05, | |
| "loss": 0.1109, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.531346989447548, | |
| "eval_loss": 0.10792025178670883, | |
| "eval_runtime": 63.6053, | |
| "eval_samples_per_second": 2.673, | |
| "eval_steps_per_second": 1.336, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 2.5561762880198633, | |
| "grad_norm": 0.12383992969989777, | |
| "learning_rate": 1.3205602376538163e-05, | |
| "loss": 0.1161, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 2.5810055865921786, | |
| "grad_norm": 0.09889344125986099, | |
| "learning_rate": 1.1807873565164506e-05, | |
| "loss": 0.1109, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 2.6058348851644944, | |
| "grad_norm": 0.11996069550514221, | |
| "learning_rate": 1.0483670864493778e-05, | |
| "loss": 0.1144, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.6058348851644944, | |
| "eval_loss": 0.10760737955570221, | |
| "eval_runtime": 63.5808, | |
| "eval_samples_per_second": 2.674, | |
| "eval_steps_per_second": 1.337, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 2.630664183736809, | |
| "grad_norm": 0.09900493919849396, | |
| "learning_rate": 9.234098267345958e-06, | |
| "loss": 0.1075, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 2.655493482309125, | |
| "grad_norm": 0.14834974706172943, | |
| "learning_rate": 8.060197547140347e-06, | |
| "loss": 0.1024, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 2.68032278088144, | |
| "grad_norm": 0.08732602745294571, | |
| "learning_rate": 6.962947389365071e-06, | |
| "loss": 0.1055, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.68032278088144, | |
| "eval_loss": 0.1074790507555008, | |
| "eval_runtime": 63.5309, | |
| "eval_samples_per_second": 2.676, | |
| "eval_steps_per_second": 1.338, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 2.7051520794537556, | |
| "grad_norm": 0.13483889400959015, | |
| "learning_rate": 5.943262575643238e-06, | |
| "loss": 0.1202, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 2.729981378026071, | |
| "grad_norm": 0.1085759624838829, | |
| "learning_rate": 5.001993221076162e-06, | |
| "loss": 0.1121, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 2.754810676598386, | |
| "grad_norm": 0.11096607148647308, | |
| "learning_rate": 4.139924065499035e-06, | |
| "loss": 0.105, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.754810676598386, | |
| "eval_loss": 0.10723040252923965, | |
| "eval_runtime": 63.5635, | |
| "eval_samples_per_second": 2.674, | |
| "eval_steps_per_second": 1.337, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 2.7796399751707015, | |
| "grad_norm": 0.11648862063884735, | |
| "learning_rate": 3.3577738192404395e-06, | |
| "loss": 0.1035, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 2.804469273743017, | |
| "grad_norm": 0.12449430674314499, | |
| "learning_rate": 2.656194563930714e-06, | |
| "loss": 0.1079, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 2.829298572315332, | |
| "grad_norm": 0.10689777880907059, | |
| "learning_rate": 2.035771208859194e-06, | |
| "loss": 0.1053, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.829298572315332, | |
| "eval_loss": 0.10730743408203125, | |
| "eval_runtime": 63.6665, | |
| "eval_samples_per_second": 2.67, | |
| "eval_steps_per_second": 1.335, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 2.8541278708876474, | |
| "grad_norm": 0.12634721398353577, | |
| "learning_rate": 1.49702100333291e-06, | |
| "loss": 0.1031, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 2.8789571694599627, | |
| "grad_norm": 0.135261669754982, | |
| "learning_rate": 1.0403931054440374e-06, | |
| "loss": 0.1077, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 2.903786468032278, | |
| "grad_norm": 0.11396101117134094, | |
| "learning_rate": 6.662682076050031e-07, | |
| "loss": 0.0958, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.903786468032278, | |
| "eval_loss": 0.10724406689405441, | |
| "eval_runtime": 63.5911, | |
| "eval_samples_per_second": 2.673, | |
| "eval_steps_per_second": 1.337, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 2.9286157666045933, | |
| "grad_norm": 0.11092764139175415, | |
| "learning_rate": 3.7495821916382344e-07, | |
| "loss": 0.1165, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 2.9534450651769086, | |
| "grad_norm": 0.1551099419593811, | |
| "learning_rate": 1.6670600636403687e-07, | |
| "loss": 0.0921, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 2.978274363749224, | |
| "grad_norm": 0.09738525003194809, | |
| "learning_rate": 4.168518986628067e-08, | |
| "loss": 0.0971, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 2.978274363749224, | |
| "eval_loss": 0.10730230808258057, | |
| "eval_runtime": 63.5023, | |
| "eval_samples_per_second": 2.677, | |
| "eval_steps_per_second": 1.339, | |
| "step": 1200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1209, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 4.438063984162099e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |