| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 252, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.003968253968253968, | |
| "grad_norm": 0.8016456961631775, | |
| "learning_rate": 7.692307692307694e-07, | |
| "loss": 0.1572, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.007936507936507936, | |
| "grad_norm": 0.5781773924827576, | |
| "learning_rate": 1.5384615384615387e-06, | |
| "loss": 0.0899, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.011904761904761904, | |
| "grad_norm": 0.5898916721343994, | |
| "learning_rate": 2.307692307692308e-06, | |
| "loss": 0.0867, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.015873015873015872, | |
| "grad_norm": 0.7932422161102295, | |
| "learning_rate": 3.0769230769230774e-06, | |
| "loss": 0.1254, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.01984126984126984, | |
| "grad_norm": 0.8549998998641968, | |
| "learning_rate": 3.846153846153847e-06, | |
| "loss": 0.1682, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.023809523809523808, | |
| "grad_norm": 0.6565646529197693, | |
| "learning_rate": 4.615384615384616e-06, | |
| "loss": 0.0863, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.027777777777777776, | |
| "grad_norm": 0.6592453718185425, | |
| "learning_rate": 5.384615384615385e-06, | |
| "loss": 0.092, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.031746031746031744, | |
| "grad_norm": 0.613888144493103, | |
| "learning_rate": 6.153846153846155e-06, | |
| "loss": 0.0893, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.03571428571428571, | |
| "grad_norm": 0.7832797765731812, | |
| "learning_rate": 6.923076923076923e-06, | |
| "loss": 0.1094, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.03968253968253968, | |
| "grad_norm": 0.6986587643623352, | |
| "learning_rate": 7.692307692307694e-06, | |
| "loss": 0.0972, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04365079365079365, | |
| "grad_norm": 0.7623270153999329, | |
| "learning_rate": 8.461538461538462e-06, | |
| "loss": 0.1219, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.047619047619047616, | |
| "grad_norm": 0.7717706561088562, | |
| "learning_rate": 9.230769230769232e-06, | |
| "loss": 0.1111, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.051587301587301584, | |
| "grad_norm": 0.6941069960594177, | |
| "learning_rate": 1e-05, | |
| "loss": 0.0913, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.05555555555555555, | |
| "grad_norm": 0.9567614793777466, | |
| "learning_rate": 9.999568045802216e-06, | |
| "loss": 0.1244, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.05952380952380952, | |
| "grad_norm": 0.9038094282150269, | |
| "learning_rate": 9.99827225784264e-06, | |
| "loss": 0.1261, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.06349206349206349, | |
| "grad_norm": 0.942318320274353, | |
| "learning_rate": 9.996112860009689e-06, | |
| "loss": 0.1154, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.06746031746031746, | |
| "grad_norm": 1.2963838577270508, | |
| "learning_rate": 9.993090225407743e-06, | |
| "loss": 0.1179, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.07142857142857142, | |
| "grad_norm": 0.8918065428733826, | |
| "learning_rate": 9.98920487629269e-06, | |
| "loss": 0.1136, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.07539682539682539, | |
| "grad_norm": 0.8215576410293579, | |
| "learning_rate": 9.98445748398167e-06, | |
| "loss": 0.0897, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.07936507936507936, | |
| "grad_norm": 0.9790538549423218, | |
| "learning_rate": 9.978848868737099e-06, | |
| "loss": 0.0951, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.08333333333333333, | |
| "grad_norm": 0.9144031405448914, | |
| "learning_rate": 9.972379999624935e-06, | |
| "loss": 0.1015, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.0873015873015873, | |
| "grad_norm": 2.1270599365234375, | |
| "learning_rate": 9.96505199434725e-06, | |
| "loss": 0.1525, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.09126984126984126, | |
| "grad_norm": 0.9955826997756958, | |
| "learning_rate": 9.956866119049095e-06, | |
| "loss": 0.1295, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.09523809523809523, | |
| "grad_norm": 1.271159052848816, | |
| "learning_rate": 9.947823788099754e-06, | |
| "loss": 0.1149, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.0992063492063492, | |
| "grad_norm": 0.8611926436424255, | |
| "learning_rate": 9.937926563848345e-06, | |
| "loss": 0.109, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.10317460317460317, | |
| "grad_norm": 0.9697916507720947, | |
| "learning_rate": 9.9271761563539e-06, | |
| "loss": 0.1274, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.10714285714285714, | |
| "grad_norm": 1.031341791152954, | |
| "learning_rate": 9.915574423089872e-06, | |
| "loss": 0.1389, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.1111111111111111, | |
| "grad_norm": 0.9682615399360657, | |
| "learning_rate": 9.903123368623216e-06, | |
| "loss": 0.1257, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.11507936507936507, | |
| "grad_norm": 0.8228491544723511, | |
| "learning_rate": 9.889825144268029e-06, | |
| "loss": 0.0996, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.11904761904761904, | |
| "grad_norm": 0.8693366646766663, | |
| "learning_rate": 9.875682047713847e-06, | |
| "loss": 0.1193, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.12301587301587301, | |
| "grad_norm": 0.8517415523529053, | |
| "learning_rate": 9.860696522628638e-06, | |
| "loss": 0.1002, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.12698412698412698, | |
| "grad_norm": 0.8783800005912781, | |
| "learning_rate": 9.84487115823659e-06, | |
| "loss": 0.1055, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.13095238095238096, | |
| "grad_norm": 0.8121059536933899, | |
| "learning_rate": 9.828208688870736e-06, | |
| "loss": 0.0906, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.1349206349206349, | |
| "grad_norm": 0.9300619959831238, | |
| "learning_rate": 9.810711993500506e-06, | |
| "loss": 0.132, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.1388888888888889, | |
| "grad_norm": 0.7378125190734863, | |
| "learning_rate": 9.792384095234312e-06, | |
| "loss": 0.0821, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.14285714285714285, | |
| "grad_norm": 0.9593575596809387, | |
| "learning_rate": 9.773228160797187e-06, | |
| "loss": 0.1413, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.14682539682539683, | |
| "grad_norm": 0.8884281516075134, | |
| "learning_rate": 9.753247499983649e-06, | |
| "loss": 0.1197, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.15079365079365079, | |
| "grad_norm": 0.9438914656639099, | |
| "learning_rate": 9.732445565085823e-06, | |
| "loss": 0.1173, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.15476190476190477, | |
| "grad_norm": 0.9140347838401794, | |
| "learning_rate": 9.71082595029695e-06, | |
| "loss": 0.1369, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.15873015873015872, | |
| "grad_norm": 0.9356802105903625, | |
| "learning_rate": 9.688392391090374e-06, | |
| "loss": 0.1345, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.1626984126984127, | |
| "grad_norm": 0.9041910767555237, | |
| "learning_rate": 9.665148763574123e-06, | |
| "loss": 0.1288, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.16666666666666666, | |
| "grad_norm": 1.086296558380127, | |
| "learning_rate": 9.64109908382119e-06, | |
| "loss": 0.1717, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.17063492063492064, | |
| "grad_norm": 0.81577467918396, | |
| "learning_rate": 9.616247507175624e-06, | |
| "loss": 0.0939, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.1746031746031746, | |
| "grad_norm": 0.7911767363548279, | |
| "learning_rate": 9.590598327534563e-06, | |
| "loss": 0.1011, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.17857142857142858, | |
| "grad_norm": 1.0346065759658813, | |
| "learning_rate": 9.56415597660634e-06, | |
| "loss": 0.1359, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.18253968253968253, | |
| "grad_norm": 0.9680626392364502, | |
| "learning_rate": 9.536925023144742e-06, | |
| "loss": 0.1411, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.1865079365079365, | |
| "grad_norm": 1.1418447494506836, | |
| "learning_rate": 9.508910172159635e-06, | |
| "loss": 0.1447, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.19047619047619047, | |
| "grad_norm": 1.0825251340866089, | |
| "learning_rate": 9.48011626410401e-06, | |
| "loss": 0.1203, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.19444444444444445, | |
| "grad_norm": 0.8085933923721313, | |
| "learning_rate": 9.450548274037652e-06, | |
| "loss": 0.1118, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.1984126984126984, | |
| "grad_norm": 1.0938574075698853, | |
| "learning_rate": 9.420211310767534e-06, | |
| "loss": 0.131, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.20238095238095238, | |
| "grad_norm": 0.9817928075790405, | |
| "learning_rate": 9.389110615965102e-06, | |
| "loss": 0.1272, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.20634920634920634, | |
| "grad_norm": 0.7266258597373962, | |
| "learning_rate": 9.35725156326063e-06, | |
| "loss": 0.1021, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.21031746031746032, | |
| "grad_norm": 0.9259639382362366, | |
| "learning_rate": 9.324639657314742e-06, | |
| "loss": 0.1637, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.21428571428571427, | |
| "grad_norm": 0.7401134371757507, | |
| "learning_rate": 9.291280532867301e-06, | |
| "loss": 0.1058, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.21825396825396826, | |
| "grad_norm": 0.7972776889801025, | |
| "learning_rate": 9.257179953763846e-06, | |
| "loss": 0.119, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.7029431462287903, | |
| "learning_rate": 9.222343811959694e-06, | |
| "loss": 0.0905, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.2261904761904762, | |
| "grad_norm": 0.9574322700500488, | |
| "learning_rate": 9.186778126501916e-06, | |
| "loss": 0.1559, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.23015873015873015, | |
| "grad_norm": 0.8770625591278076, | |
| "learning_rate": 9.150489042489368e-06, | |
| "loss": 0.1117, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.23412698412698413, | |
| "grad_norm": 0.798253059387207, | |
| "learning_rate": 9.113482830010918e-06, | |
| "loss": 0.0985, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.23809523809523808, | |
| "grad_norm": 0.780923068523407, | |
| "learning_rate": 9.075765883062093e-06, | |
| "loss": 0.1185, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.24206349206349206, | |
| "grad_norm": 0.9421917796134949, | |
| "learning_rate": 9.037344718440321e-06, | |
| "loss": 0.1397, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.24603174603174602, | |
| "grad_norm": 0.9296698570251465, | |
| "learning_rate": 8.99822597461894e-06, | |
| "loss": 0.1439, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.989691972732544, | |
| "learning_rate": 8.958416410600188e-06, | |
| "loss": 0.1417, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.25396825396825395, | |
| "grad_norm": 0.8695488572120667, | |
| "learning_rate": 8.917922904747385e-06, | |
| "loss": 0.1231, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.25793650793650796, | |
| "grad_norm": 0.8557742834091187, | |
| "learning_rate": 8.876752453596462e-06, | |
| "loss": 0.1129, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.2619047619047619, | |
| "grad_norm": 0.8572667837142944, | |
| "learning_rate": 8.834912170647102e-06, | |
| "loss": 0.1355, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.26587301587301587, | |
| "grad_norm": 1.0003724098205566, | |
| "learning_rate": 8.792409285133644e-06, | |
| "loss": 0.1601, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.2698412698412698, | |
| "grad_norm": 0.9211122989654541, | |
| "learning_rate": 8.749251140776016e-06, | |
| "loss": 0.0913, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.27380952380952384, | |
| "grad_norm": 0.8586229681968689, | |
| "learning_rate": 8.705445194510868e-06, | |
| "loss": 0.1041, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.2777777777777778, | |
| "grad_norm": 0.8599536418914795, | |
| "learning_rate": 8.660999015203152e-06, | |
| "loss": 0.1141, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.28174603174603174, | |
| "grad_norm": 0.7690649032592773, | |
| "learning_rate": 8.615920282338355e-06, | |
| "loss": 0.0876, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.2857142857142857, | |
| "grad_norm": 0.9678345918655396, | |
| "learning_rate": 8.570216784695637e-06, | |
| "loss": 0.1313, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.2896825396825397, | |
| "grad_norm": 1.0103791952133179, | |
| "learning_rate": 8.52389641900206e-06, | |
| "loss": 0.1396, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.29365079365079366, | |
| "grad_norm": 0.8074073195457458, | |
| "learning_rate": 8.476967188568187e-06, | |
| "loss": 0.1069, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.2976190476190476, | |
| "grad_norm": 0.8165928721427917, | |
| "learning_rate": 8.429437201905254e-06, | |
| "loss": 0.0957, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.30158730158730157, | |
| "grad_norm": 0.8850393891334534, | |
| "learning_rate": 8.38131467132416e-06, | |
| "loss": 0.1139, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.3055555555555556, | |
| "grad_norm": 0.7706073522567749, | |
| "learning_rate": 8.332607911516545e-06, | |
| "loss": 0.0906, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.30952380952380953, | |
| "grad_norm": 0.8396443128585815, | |
| "learning_rate": 8.283325338118154e-06, | |
| "loss": 0.1223, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.3134920634920635, | |
| "grad_norm": 0.6697443127632141, | |
| "learning_rate": 8.233475466254766e-06, | |
| "loss": 0.0934, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.31746031746031744, | |
| "grad_norm": 0.931776762008667, | |
| "learning_rate": 8.183066909070946e-06, | |
| "loss": 0.126, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.32142857142857145, | |
| "grad_norm": 0.8448562026023865, | |
| "learning_rate": 8.132108376241849e-06, | |
| "loss": 0.1074, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.3253968253968254, | |
| "grad_norm": 0.7601461410522461, | |
| "learning_rate": 8.08060867246834e-06, | |
| "loss": 0.1016, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.32936507936507936, | |
| "grad_norm": 0.8407372832298279, | |
| "learning_rate": 8.028576695955711e-06, | |
| "loss": 0.0987, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.3333333333333333, | |
| "grad_norm": 0.7016237378120422, | |
| "learning_rate": 7.976021436876232e-06, | |
| "loss": 0.0854, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.3373015873015873, | |
| "grad_norm": 0.8386403322219849, | |
| "learning_rate": 7.92295197581581e-06, | |
| "loss": 0.1308, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.3412698412698413, | |
| "grad_norm": 0.8006751537322998, | |
| "learning_rate": 7.869377482205042e-06, | |
| "loss": 0.1136, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.34523809523809523, | |
| "grad_norm": 0.9047397375106812, | |
| "learning_rate": 7.815307212734888e-06, | |
| "loss": 0.1307, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.3492063492063492, | |
| "grad_norm": 0.8037883043289185, | |
| "learning_rate": 7.7607505097573e-06, | |
| "loss": 0.1057, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.3531746031746032, | |
| "grad_norm": 0.7339609861373901, | |
| "learning_rate": 7.705716799671019e-06, | |
| "loss": 0.0983, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.35714285714285715, | |
| "grad_norm": 0.9146028757095337, | |
| "learning_rate": 7.650215591292888e-06, | |
| "loss": 0.1195, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.3611111111111111, | |
| "grad_norm": 0.857414960861206, | |
| "learning_rate": 7.594256474214883e-06, | |
| "loss": 0.1124, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.36507936507936506, | |
| "grad_norm": 0.775814414024353, | |
| "learning_rate": 7.537849117147212e-06, | |
| "loss": 0.0912, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.36904761904761907, | |
| "grad_norm": 0.8781107664108276, | |
| "learning_rate": 7.481003266247745e-06, | |
| "loss": 0.1272, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.373015873015873, | |
| "grad_norm": 0.7291442155838013, | |
| "learning_rate": 7.4237287434380485e-06, | |
| "loss": 0.1009, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.376984126984127, | |
| "grad_norm": 0.8335267901420593, | |
| "learning_rate": 7.366035444706346e-06, | |
| "loss": 0.0911, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.38095238095238093, | |
| "grad_norm": 0.9337305426597595, | |
| "learning_rate": 7.307933338397667e-06, | |
| "loss": 0.1116, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.38492063492063494, | |
| "grad_norm": 0.9217402338981628, | |
| "learning_rate": 7.249432463491498e-06, | |
| "loss": 0.1299, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.3888888888888889, | |
| "grad_norm": 0.9241219758987427, | |
| "learning_rate": 7.190542927867234e-06, | |
| "loss": 0.1247, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.39285714285714285, | |
| "grad_norm": 0.9741147756576538, | |
| "learning_rate": 7.131274906557725e-06, | |
| "loss": 0.1123, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.3968253968253968, | |
| "grad_norm": 0.9606366157531738, | |
| "learning_rate": 7.0716386399912075e-06, | |
| "loss": 0.1176, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.4007936507936508, | |
| "grad_norm": 0.7203896641731262, | |
| "learning_rate": 7.0116444322219575e-06, | |
| "loss": 0.1118, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.40476190476190477, | |
| "grad_norm": 0.8363652229309082, | |
| "learning_rate": 6.95130264914993e-06, | |
| "loss": 0.12, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.4087301587301587, | |
| "grad_norm": 0.6669372916221619, | |
| "learning_rate": 6.890623716729724e-06, | |
| "loss": 0.0751, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.4126984126984127, | |
| "grad_norm": 0.7728121280670166, | |
| "learning_rate": 6.829618119169169e-06, | |
| "loss": 0.1016, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.4166666666666667, | |
| "grad_norm": 0.8097180724143982, | |
| "learning_rate": 6.768296397117848e-06, | |
| "loss": 0.1163, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.42063492063492064, | |
| "grad_norm": 0.935376763343811, | |
| "learning_rate": 6.706669145845863e-06, | |
| "loss": 0.1407, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.4246031746031746, | |
| "grad_norm": 0.7769891619682312, | |
| "learning_rate": 6.6447470134131685e-06, | |
| "loss": 0.0998, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.42857142857142855, | |
| "grad_norm": 0.7420790195465088, | |
| "learning_rate": 6.5825406988297815e-06, | |
| "loss": 0.0974, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.43253968253968256, | |
| "grad_norm": 0.8356537818908691, | |
| "learning_rate": 6.520060950207186e-06, | |
| "loss": 0.1368, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.4365079365079365, | |
| "grad_norm": 0.8886961340904236, | |
| "learning_rate": 6.457318562901257e-06, | |
| "loss": 0.1258, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.44047619047619047, | |
| "grad_norm": 0.8278570771217346, | |
| "learning_rate": 6.394324377647028e-06, | |
| "loss": 0.1207, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.7972725033760071, | |
| "learning_rate": 6.331089278685599e-06, | |
| "loss": 0.1233, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.44841269841269843, | |
| "grad_norm": 0.7953070402145386, | |
| "learning_rate": 6.267624191883551e-06, | |
| "loss": 0.0925, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.4523809523809524, | |
| "grad_norm": 0.8089295029640198, | |
| "learning_rate": 6.203940082845144e-06, | |
| "loss": 0.1058, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.45634920634920634, | |
| "grad_norm": 0.9612205028533936, | |
| "learning_rate": 6.140047955017672e-06, | |
| "loss": 0.1254, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.4603174603174603, | |
| "grad_norm": 0.7713960409164429, | |
| "learning_rate": 6.075958847790262e-06, | |
| "loss": 0.0923, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.4642857142857143, | |
| "grad_norm": 0.8480079174041748, | |
| "learning_rate": 6.011683834586474e-06, | |
| "loss": 0.1245, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.46825396825396826, | |
| "grad_norm": 0.7191230654716492, | |
| "learning_rate": 5.947234020951015e-06, | |
| "loss": 0.0957, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.4722222222222222, | |
| "grad_norm": 0.8899836540222168, | |
| "learning_rate": 5.882620542630901e-06, | |
| "loss": 0.1155, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.47619047619047616, | |
| "grad_norm": 0.813059389591217, | |
| "learning_rate": 5.817854563651415e-06, | |
| "loss": 0.1106, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.4801587301587302, | |
| "grad_norm": 0.8137439489364624, | |
| "learning_rate": 5.752947274387147e-06, | |
| "loss": 0.1042, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.48412698412698413, | |
| "grad_norm": 0.9558090567588806, | |
| "learning_rate": 5.687909889628529e-06, | |
| "loss": 0.1197, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.4880952380952381, | |
| "grad_norm": 0.9763339757919312, | |
| "learning_rate": 5.622753646644102e-06, | |
| "loss": 0.1273, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.49206349206349204, | |
| "grad_norm": 0.7899503111839294, | |
| "learning_rate": 5.557489803238934e-06, | |
| "loss": 0.0896, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.49603174603174605, | |
| "grad_norm": 0.8102907538414001, | |
| "learning_rate": 5.492129635809473e-06, | |
| "loss": 0.0992, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.7076608538627625, | |
| "learning_rate": 5.426684437395196e-06, | |
| "loss": 0.123, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.503968253968254, | |
| "grad_norm": 0.7774274945259094, | |
| "learning_rate": 5.361165515727374e-06, | |
| "loss": 0.1035, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.5079365079365079, | |
| "grad_norm": 0.7213121056556702, | |
| "learning_rate": 5.295584191275308e-06, | |
| "loss": 0.0872, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.5119047619047619, | |
| "grad_norm": 0.8430085182189941, | |
| "learning_rate": 5.229951795290353e-06, | |
| "loss": 0.1232, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.5158730158730159, | |
| "grad_norm": 0.9626345634460449, | |
| "learning_rate": 5.164279667848094e-06, | |
| "loss": 0.1609, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.5198412698412699, | |
| "grad_norm": 0.8764887452125549, | |
| "learning_rate": 5.0985791558889785e-06, | |
| "loss": 0.135, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.5238095238095238, | |
| "grad_norm": 0.8301170468330383, | |
| "learning_rate": 5.032861611257783e-06, | |
| "loss": 0.1246, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.5277777777777778, | |
| "grad_norm": 0.7681728601455688, | |
| "learning_rate": 4.967138388742218e-06, | |
| "loss": 0.103, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.5317460317460317, | |
| "grad_norm": 0.7660509347915649, | |
| "learning_rate": 4.9014208441110215e-06, | |
| "loss": 0.1055, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.5357142857142857, | |
| "grad_norm": 0.8104261755943298, | |
| "learning_rate": 4.835720332151907e-06, | |
| "loss": 0.1237, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.5396825396825397, | |
| "grad_norm": 0.7689002156257629, | |
| "learning_rate": 4.770048204709648e-06, | |
| "loss": 0.1049, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.5436507936507936, | |
| "grad_norm": 0.833803653717041, | |
| "learning_rate": 4.7044158087246926e-06, | |
| "loss": 0.1099, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.5476190476190477, | |
| "grad_norm": 0.7744324207305908, | |
| "learning_rate": 4.6388344842726266e-06, | |
| "loss": 0.1009, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.5515873015873016, | |
| "grad_norm": 0.8948577642440796, | |
| "learning_rate": 4.573315562604804e-06, | |
| "loss": 0.1226, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.5555555555555556, | |
| "grad_norm": 0.8321415781974792, | |
| "learning_rate": 4.5078703641905275e-06, | |
| "loss": 0.0984, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.5595238095238095, | |
| "grad_norm": 0.8380492925643921, | |
| "learning_rate": 4.442510196761068e-06, | |
| "loss": 0.1052, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.5634920634920635, | |
| "grad_norm": 0.8537865877151489, | |
| "learning_rate": 4.377246353355899e-06, | |
| "loss": 0.1145, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 0.5674603174603174, | |
| "grad_norm": 0.9566012620925903, | |
| "learning_rate": 4.312090110371473e-06, | |
| "loss": 0.1453, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 0.5714285714285714, | |
| "grad_norm": 0.8509257435798645, | |
| "learning_rate": 4.247052725612853e-06, | |
| "loss": 0.0998, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 0.5753968253968254, | |
| "grad_norm": 0.7241506576538086, | |
| "learning_rate": 4.182145436348587e-06, | |
| "loss": 0.0929, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 0.5793650793650794, | |
| "grad_norm": 0.9016909003257751, | |
| "learning_rate": 4.1173794573691e-06, | |
| "loss": 0.1061, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 0.5833333333333334, | |
| "grad_norm": 0.846988320350647, | |
| "learning_rate": 4.052765979048986e-06, | |
| "loss": 0.1237, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 0.5873015873015873, | |
| "grad_norm": 0.9077289700508118, | |
| "learning_rate": 3.988316165413528e-06, | |
| "loss": 0.0911, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 0.5912698412698413, | |
| "grad_norm": 0.8484727740287781, | |
| "learning_rate": 3.924041152209739e-06, | |
| "loss": 0.1028, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 0.5952380952380952, | |
| "grad_norm": 0.7354507446289062, | |
| "learning_rate": 3.859952044982329e-06, | |
| "loss": 0.0847, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.5992063492063492, | |
| "grad_norm": 0.644497275352478, | |
| "learning_rate": 3.7960599171548572e-06, | |
| "loss": 0.09, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 0.6031746031746031, | |
| "grad_norm": 0.7260298728942871, | |
| "learning_rate": 3.732375808116451e-06, | |
| "loss": 0.0788, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 0.6071428571428571, | |
| "grad_norm": 0.7378935217857361, | |
| "learning_rate": 3.6689107213144025e-06, | |
| "loss": 0.1021, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 0.6111111111111112, | |
| "grad_norm": 0.644940197467804, | |
| "learning_rate": 3.6056756223529734e-06, | |
| "loss": 0.0769, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 0.6150793650793651, | |
| "grad_norm": 0.8905737400054932, | |
| "learning_rate": 3.542681437098745e-06, | |
| "loss": 0.1694, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 0.6190476190476191, | |
| "grad_norm": 0.7038963437080383, | |
| "learning_rate": 3.479939049792817e-06, | |
| "loss": 0.0968, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 0.623015873015873, | |
| "grad_norm": 0.7893255352973938, | |
| "learning_rate": 3.4174593011702197e-06, | |
| "loss": 0.1158, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 0.626984126984127, | |
| "grad_norm": 0.7545840740203857, | |
| "learning_rate": 3.3552529865868323e-06, | |
| "loss": 0.0929, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 0.6309523809523809, | |
| "grad_norm": 0.7931913733482361, | |
| "learning_rate": 3.2933308541541365e-06, | |
| "loss": 0.1034, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 0.6349206349206349, | |
| "grad_norm": 0.7249116897583008, | |
| "learning_rate": 3.2317036028821523e-06, | |
| "loss": 0.0874, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.6388888888888888, | |
| "grad_norm": 0.706990122795105, | |
| "learning_rate": 3.1703818808308327e-06, | |
| "loss": 0.0882, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 0.6428571428571429, | |
| "grad_norm": 0.6917218565940857, | |
| "learning_rate": 3.1093762832702775e-06, | |
| "loss": 0.0817, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 0.6468253968253969, | |
| "grad_norm": 0.8852331042289734, | |
| "learning_rate": 3.048697350850073e-06, | |
| "loss": 0.1279, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 0.6507936507936508, | |
| "grad_norm": 0.8701708316802979, | |
| "learning_rate": 2.988355567778043e-06, | |
| "loss": 0.1171, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 0.6547619047619048, | |
| "grad_norm": 0.8259684443473816, | |
| "learning_rate": 2.9283613600087933e-06, | |
| "loss": 0.1141, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 0.6587301587301587, | |
| "grad_norm": 0.8396156430244446, | |
| "learning_rate": 2.8687250934422774e-06, | |
| "loss": 0.1178, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 0.6626984126984127, | |
| "grad_norm": 0.7612534761428833, | |
| "learning_rate": 2.809457072132766e-06, | |
| "loss": 0.1037, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.7613610029220581, | |
| "learning_rate": 2.750567536508504e-06, | |
| "loss": 0.0943, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 0.6706349206349206, | |
| "grad_norm": 0.8023819923400879, | |
| "learning_rate": 2.692066661602333e-06, | |
| "loss": 0.0949, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 0.6746031746031746, | |
| "grad_norm": 0.7530853748321533, | |
| "learning_rate": 2.633964555293654e-06, | |
| "loss": 0.0852, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.6785714285714286, | |
| "grad_norm": 0.9524847269058228, | |
| "learning_rate": 2.576271256561953e-06, | |
| "loss": 0.0924, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 0.6825396825396826, | |
| "grad_norm": 0.8371558785438538, | |
| "learning_rate": 2.5189967337522574e-06, | |
| "loss": 0.1211, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 0.6865079365079365, | |
| "grad_norm": 0.676647961139679, | |
| "learning_rate": 2.46215088285279e-06, | |
| "loss": 0.0919, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 0.6904761904761905, | |
| "grad_norm": 0.6709980368614197, | |
| "learning_rate": 2.4057435257851173e-06, | |
| "loss": 0.0745, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 0.6944444444444444, | |
| "grad_norm": 0.7309594750404358, | |
| "learning_rate": 2.349784408707112e-06, | |
| "loss": 0.0967, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.6984126984126984, | |
| "grad_norm": 0.7355422377586365, | |
| "learning_rate": 2.2942832003289823e-06, | |
| "loss": 0.1096, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 0.7023809523809523, | |
| "grad_norm": 0.6394128203392029, | |
| "learning_rate": 2.2392494902427027e-06, | |
| "loss": 0.0876, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 0.7063492063492064, | |
| "grad_norm": 0.7420768141746521, | |
| "learning_rate": 2.1846927872651135e-06, | |
| "loss": 0.1039, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 0.7103174603174603, | |
| "grad_norm": 0.7211629152297974, | |
| "learning_rate": 2.1306225177949584e-06, | |
| "loss": 0.0875, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 0.7142857142857143, | |
| "grad_norm": 0.7187921404838562, | |
| "learning_rate": 2.07704802418419e-06, | |
| "loss": 0.0885, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.7182539682539683, | |
| "grad_norm": 0.7532177567481995, | |
| "learning_rate": 2.023978563123771e-06, | |
| "loss": 0.106, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 0.7222222222222222, | |
| "grad_norm": 0.726718544960022, | |
| "learning_rate": 1.9714233040442915e-06, | |
| "loss": 0.092, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 0.7261904761904762, | |
| "grad_norm": 0.6967416405677795, | |
| "learning_rate": 1.919391327531663e-06, | |
| "loss": 0.0771, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 0.7301587301587301, | |
| "grad_norm": 0.7169931530952454, | |
| "learning_rate": 1.8678916237581524e-06, | |
| "loss": 0.1009, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 0.7341269841269841, | |
| "grad_norm": 0.662554919719696, | |
| "learning_rate": 1.816933090929055e-06, | |
| "loss": 0.0727, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 0.7380952380952381, | |
| "grad_norm": 0.9608215093612671, | |
| "learning_rate": 1.7665245337452368e-06, | |
| "loss": 0.1224, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 0.7420634920634921, | |
| "grad_norm": 0.9227773547172546, | |
| "learning_rate": 1.716674661881848e-06, | |
| "loss": 0.1166, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 0.746031746031746, | |
| "grad_norm": 0.8112769722938538, | |
| "learning_rate": 1.667392088483456e-06, | |
| "loss": 0.1101, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "grad_norm": 0.8895912766456604, | |
| "learning_rate": 1.6186853286758397e-06, | |
| "loss": 0.1162, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 0.753968253968254, | |
| "grad_norm": 0.6724901795387268, | |
| "learning_rate": 1.570562798094747e-06, | |
| "loss": 0.0819, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.7579365079365079, | |
| "grad_norm": 0.8350103497505188, | |
| "learning_rate": 1.5230328114318127e-06, | |
| "loss": 0.1021, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 0.7619047619047619, | |
| "grad_norm": 0.9384281039237976, | |
| "learning_rate": 1.4761035809979395e-06, | |
| "loss": 0.135, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 0.7658730158730159, | |
| "grad_norm": 0.894897997379303, | |
| "learning_rate": 1.4297832153043657e-06, | |
| "loss": 0.1075, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 0.7698412698412699, | |
| "grad_norm": 0.7277584671974182, | |
| "learning_rate": 1.3840797176616467e-06, | |
| "loss": 0.0788, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 0.7738095238095238, | |
| "grad_norm": 0.809172511100769, | |
| "learning_rate": 1.3390009847968505e-06, | |
| "loss": 0.0923, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 0.7777777777777778, | |
| "grad_norm": 0.8635011911392212, | |
| "learning_rate": 1.2945548054891322e-06, | |
| "loss": 0.0895, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 0.7817460317460317, | |
| "grad_norm": 0.8458127975463867, | |
| "learning_rate": 1.2507488592239848e-06, | |
| "loss": 0.1392, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 0.7857142857142857, | |
| "grad_norm": 0.7588448524475098, | |
| "learning_rate": 1.2075907148663579e-06, | |
| "loss": 0.1068, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 0.7896825396825397, | |
| "grad_norm": 0.6866970062255859, | |
| "learning_rate": 1.1650878293528994e-06, | |
| "loss": 0.0875, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 0.7936507936507936, | |
| "grad_norm": 0.7142922878265381, | |
| "learning_rate": 1.1232475464035386e-06, | |
| "loss": 0.0892, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.7976190476190477, | |
| "grad_norm": 0.6719024777412415, | |
| "learning_rate": 1.0820770952526155e-06, | |
| "loss": 0.0904, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 0.8015873015873016, | |
| "grad_norm": 0.6220860481262207, | |
| "learning_rate": 1.0415835893998116e-06, | |
| "loss": 0.0773, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 0.8055555555555556, | |
| "grad_norm": 0.5566734075546265, | |
| "learning_rate": 1.0017740253810608e-06, | |
| "loss": 0.0704, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 0.8095238095238095, | |
| "grad_norm": 0.6570054888725281, | |
| "learning_rate": 9.62655281559679e-07, | |
| "loss": 0.0852, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 0.8134920634920635, | |
| "grad_norm": 0.6448202729225159, | |
| "learning_rate": 9.242341169379077e-07, | |
| "loss": 0.0703, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 0.8174603174603174, | |
| "grad_norm": 0.7391513586044312, | |
| "learning_rate": 8.865171699890835e-07, | |
| "loss": 0.1028, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 0.8214285714285714, | |
| "grad_norm": 0.7353842854499817, | |
| "learning_rate": 8.495109575106331e-07, | |
| "loss": 0.0968, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 0.8253968253968254, | |
| "grad_norm": 0.8170803785324097, | |
| "learning_rate": 8.132218734980852e-07, | |
| "loss": 0.113, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 0.8293650793650794, | |
| "grad_norm": 0.7208415269851685, | |
| "learning_rate": 7.776561880403072e-07, | |
| "loss": 0.0937, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 0.8333333333333334, | |
| "grad_norm": 0.792476236820221, | |
| "learning_rate": 7.42820046236154e-07, | |
| "loss": 0.117, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.8373015873015873, | |
| "grad_norm": 0.7033000588417053, | |
| "learning_rate": 7.087194671326986e-07, | |
| "loss": 0.0836, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 0.8412698412698413, | |
| "grad_norm": 0.6652337908744812, | |
| "learning_rate": 6.753603426852589e-07, | |
| "loss": 0.0754, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 0.8452380952380952, | |
| "grad_norm": 0.9486038684844971, | |
| "learning_rate": 6.427484367393699e-07, | |
| "loss": 0.1304, | |
| "step": 213 | |
| }, | |
| { | |
| "epoch": 0.8492063492063492, | |
| "grad_norm": 0.8958883285522461, | |
| "learning_rate": 6.108893840348995e-07, | |
| "loss": 0.0917, | |
| "step": 214 | |
| }, | |
| { | |
| "epoch": 0.8531746031746031, | |
| "grad_norm": 0.6590295433998108, | |
| "learning_rate": 5.797886892324695e-07, | |
| "loss": 0.0759, | |
| "step": 215 | |
| }, | |
| { | |
| "epoch": 0.8571428571428571, | |
| "grad_norm": 0.789568305015564, | |
| "learning_rate": 5.494517259623478e-07, | |
| "loss": 0.0966, | |
| "step": 216 | |
| }, | |
| { | |
| "epoch": 0.8611111111111112, | |
| "grad_norm": 0.8645297288894653, | |
| "learning_rate": 5.198837358959901e-07, | |
| "loss": 0.1092, | |
| "step": 217 | |
| }, | |
| { | |
| "epoch": 0.8650793650793651, | |
| "grad_norm": 0.8162879347801208, | |
| "learning_rate": 4.91089827840367e-07, | |
| "loss": 0.1082, | |
| "step": 218 | |
| }, | |
| { | |
| "epoch": 0.8690476190476191, | |
| "grad_norm": 0.9198590517044067, | |
| "learning_rate": 4.6307497685525894e-07, | |
| "loss": 0.1278, | |
| "step": 219 | |
| }, | |
| { | |
| "epoch": 0.873015873015873, | |
| "grad_norm": 0.6241840124130249, | |
| "learning_rate": 4.3584402339366174e-07, | |
| "loss": 0.0662, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.876984126984127, | |
| "grad_norm": 0.8222904801368713, | |
| "learning_rate": 4.0940167246543595e-07, | |
| "loss": 0.1049, | |
| "step": 221 | |
| }, | |
| { | |
| "epoch": 0.8809523809523809, | |
| "grad_norm": 0.7959262132644653, | |
| "learning_rate": 3.8375249282437743e-07, | |
| "loss": 0.0972, | |
| "step": 222 | |
| }, | |
| { | |
| "epoch": 0.8849206349206349, | |
| "grad_norm": 0.6456685066223145, | |
| "learning_rate": 3.589009161788104e-07, | |
| "loss": 0.0774, | |
| "step": 223 | |
| }, | |
| { | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.9123170375823975, | |
| "learning_rate": 3.3485123642587657e-07, | |
| "loss": 0.1471, | |
| "step": 224 | |
| }, | |
| { | |
| "epoch": 0.8928571428571429, | |
| "grad_norm": 0.6460578441619873, | |
| "learning_rate": 3.116076089096265e-07, | |
| "loss": 0.0684, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.8968253968253969, | |
| "grad_norm": 0.6745744347572327, | |
| "learning_rate": 2.8917404970305096e-07, | |
| "loss": 0.0848, | |
| "step": 226 | |
| }, | |
| { | |
| "epoch": 0.9007936507936508, | |
| "grad_norm": 0.7377632260322571, | |
| "learning_rate": 2.6755443491417786e-07, | |
| "loss": 0.0897, | |
| "step": 227 | |
| }, | |
| { | |
| "epoch": 0.9047619047619048, | |
| "grad_norm": 0.7380819916725159, | |
| "learning_rate": 2.467525000163523e-07, | |
| "loss": 0.1127, | |
| "step": 228 | |
| }, | |
| { | |
| "epoch": 0.9087301587301587, | |
| "grad_norm": 0.6314770579338074, | |
| "learning_rate": 2.2677183920281342e-07, | |
| "loss": 0.0775, | |
| "step": 229 | |
| }, | |
| { | |
| "epoch": 0.9126984126984127, | |
| "grad_norm": 0.7447143197059631, | |
| "learning_rate": 2.0761590476568893e-07, | |
| "loss": 0.1098, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.9166666666666666, | |
| "grad_norm": 0.6674031615257263, | |
| "learning_rate": 1.892880064994934e-07, | |
| "loss": 0.0729, | |
| "step": 231 | |
| }, | |
| { | |
| "epoch": 0.9206349206349206, | |
| "grad_norm": 0.7818775773048401, | |
| "learning_rate": 1.7179131112926628e-07, | |
| "loss": 0.0965, | |
| "step": 232 | |
| }, | |
| { | |
| "epoch": 0.9246031746031746, | |
| "grad_norm": 0.6574113368988037, | |
| "learning_rate": 1.551288417634106e-07, | |
| "loss": 0.0833, | |
| "step": 233 | |
| }, | |
| { | |
| "epoch": 0.9285714285714286, | |
| "grad_norm": 0.7490445971488953, | |
| "learning_rate": 1.3930347737136195e-07, | |
| "loss": 0.0865, | |
| "step": 234 | |
| }, | |
| { | |
| "epoch": 0.9325396825396826, | |
| "grad_norm": 0.8491647839546204, | |
| "learning_rate": 1.2431795228615372e-07, | |
| "loss": 0.1268, | |
| "step": 235 | |
| }, | |
| { | |
| "epoch": 0.9365079365079365, | |
| "grad_norm": 0.7804898023605347, | |
| "learning_rate": 1.1017485573197151e-07, | |
| "loss": 0.0975, | |
| "step": 236 | |
| }, | |
| { | |
| "epoch": 0.9404761904761905, | |
| "grad_norm": 0.8244890570640564, | |
| "learning_rate": 9.687663137678605e-08, | |
| "loss": 0.1113, | |
| "step": 237 | |
| }, | |
| { | |
| "epoch": 0.9444444444444444, | |
| "grad_norm": 0.803353488445282, | |
| "learning_rate": 8.442557691013042e-08, | |
| "loss": 0.1053, | |
| "step": 238 | |
| }, | |
| { | |
| "epoch": 0.9484126984126984, | |
| "grad_norm": 0.7855432629585266, | |
| "learning_rate": 7.282384364610207e-08, | |
| "loss": 0.1012, | |
| "step": 239 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "grad_norm": 0.80164635181427, | |
| "learning_rate": 6.207343615165562e-08, | |
| "loss": 0.1138, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.9563492063492064, | |
| "grad_norm": 0.6952555179595947, | |
| "learning_rate": 5.21762119002478e-08, | |
| "loss": 0.0786, | |
| "step": 241 | |
| }, | |
| { | |
| "epoch": 0.9603174603174603, | |
| "grad_norm": 0.7810098528862, | |
| "learning_rate": 4.31338809509052e-08, | |
| "loss": 0.0923, | |
| "step": 242 | |
| }, | |
| { | |
| "epoch": 0.9642857142857143, | |
| "grad_norm": 0.8230253458023071, | |
| "learning_rate": 3.494800565275125e-08, | |
| "loss": 0.1149, | |
| "step": 243 | |
| }, | |
| { | |
| "epoch": 0.9682539682539683, | |
| "grad_norm": 0.8179909586906433, | |
| "learning_rate": 2.7620000375064848e-08, | |
| "loss": 0.0936, | |
| "step": 244 | |
| }, | |
| { | |
| "epoch": 0.9722222222222222, | |
| "grad_norm": 0.9115637540817261, | |
| "learning_rate": 2.115113126290258e-08, | |
| "loss": 0.1169, | |
| "step": 245 | |
| }, | |
| { | |
| "epoch": 0.9761904761904762, | |
| "grad_norm": 0.6795814633369446, | |
| "learning_rate": 1.554251601833201e-08, | |
| "loss": 0.0689, | |
| "step": 246 | |
| }, | |
| { | |
| "epoch": 0.9801587301587301, | |
| "grad_norm": 0.7916927337646484, | |
| "learning_rate": 1.0795123707312283e-08, | |
| "loss": 0.1116, | |
| "step": 247 | |
| }, | |
| { | |
| "epoch": 0.9841269841269841, | |
| "grad_norm": 0.6779837012290955, | |
| "learning_rate": 6.9097745922580564e-09, | |
| "loss": 0.0853, | |
| "step": 248 | |
| }, | |
| { | |
| "epoch": 0.9880952380952381, | |
| "grad_norm": 0.6872785091400146, | |
| "learning_rate": 3.887139990313427e-09, | |
| "loss": 0.0926, | |
| "step": 249 | |
| }, | |
| { | |
| "epoch": 0.9920634920634921, | |
| "grad_norm": 0.6764498949050903, | |
| "learning_rate": 1.7277421573608234e-09, | |
| "loss": 0.0831, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.996031746031746, | |
| "grad_norm": 0.7526364326477051, | |
| "learning_rate": 4.3195419778319095e-10, | |
| "loss": 0.0966, | |
| "step": 251 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.7119873762130737, | |
| "learning_rate": 0.0, | |
| "loss": 0.1041, | |
| "step": 252 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 252, | |
| "total_flos": 27436118114304.0, | |
| "train_loss": 0.1079725922040996, | |
| "train_runtime": 1946.0648, | |
| "train_samples_per_second": 2.067, | |
| "train_steps_per_second": 0.129 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 252, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": false, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 27436118114304.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |