| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 51.97127937336815, | |
| "eval_steps": 500, | |
| "global_step": 1200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.4177545691906005, | |
| "grad_norm": 0.28227752447128296, | |
| "learning_rate": 2.9999999999999997e-05, | |
| "loss": 4.1508, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.835509138381201, | |
| "grad_norm": 0.31433430314064026, | |
| "learning_rate": 5.9999999999999995e-05, | |
| "loss": 4.1593, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.2532637075718016, | |
| "grad_norm": 0.3350953161716461, | |
| "learning_rate": 8.999999999999999e-05, | |
| "loss": 4.0414, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.671018276762402, | |
| "grad_norm": 0.2885706126689911, | |
| "learning_rate": 0.00011999999999999999, | |
| "loss": 3.8411, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.0887728459530024, | |
| "grad_norm": 0.23711609840393066, | |
| "learning_rate": 0.00015, | |
| "loss": 3.6434, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.506527415143603, | |
| "grad_norm": 0.21583135426044464, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 3.4636, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.9242819843342036, | |
| "grad_norm": 0.18754692375659943, | |
| "learning_rate": 0.00020999999999999998, | |
| "loss": 3.3154, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 3.342036553524804, | |
| "grad_norm": 0.15951760113239288, | |
| "learning_rate": 0.00023999999999999998, | |
| "loss": 3.2195, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.759791122715405, | |
| "grad_norm": 0.14639759063720703, | |
| "learning_rate": 0.00027, | |
| "loss": 3.122, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 4.177545691906005, | |
| "grad_norm": 0.1860765665769577, | |
| "learning_rate": 0.0003, | |
| "loss": 3.0677, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 4.595300261096606, | |
| "grad_norm": 0.1737535446882248, | |
| "learning_rate": 0.000285, | |
| "loss": 2.9992, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 5.013054830287206, | |
| "grad_norm": 0.181383416056633, | |
| "learning_rate": 0.00027, | |
| "loss": 2.9761, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 5.430809399477806, | |
| "grad_norm": 0.1873219609260559, | |
| "learning_rate": 0.00025499999999999996, | |
| "loss": 2.9281, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 5.848563968668407, | |
| "grad_norm": 0.19864186644554138, | |
| "learning_rate": 0.00023999999999999998, | |
| "loss": 2.9168, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 6.266318537859008, | |
| "grad_norm": 0.22326301038265228, | |
| "learning_rate": 0.000225, | |
| "loss": 2.8549, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 6.684073107049608, | |
| "grad_norm": 0.2200121283531189, | |
| "learning_rate": 0.00020999999999999998, | |
| "loss": 2.855, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 7.101827676240209, | |
| "grad_norm": 0.2546086311340332, | |
| "learning_rate": 0.000195, | |
| "loss": 2.8509, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 7.51958224543081, | |
| "grad_norm": 0.26345309615135193, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 2.8144, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 7.93733681462141, | |
| "grad_norm": 0.21533280611038208, | |
| "learning_rate": 0.000165, | |
| "loss": 2.8006, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 8.35509138381201, | |
| "grad_norm": 0.2510657012462616, | |
| "learning_rate": 0.00015, | |
| "loss": 2.7816, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 8.77284595300261, | |
| "grad_norm": 0.23468665778636932, | |
| "learning_rate": 0.000135, | |
| "loss": 2.7762, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 9.190600522193211, | |
| "grad_norm": 0.23014432191848755, | |
| "learning_rate": 0.00011999999999999999, | |
| "loss": 2.7731, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 9.608355091383812, | |
| "grad_norm": 0.247611865401268, | |
| "learning_rate": 0.00010499999999999999, | |
| "loss": 2.742, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 10.026109660574413, | |
| "grad_norm": 0.2899376451969147, | |
| "learning_rate": 8.999999999999999e-05, | |
| "loss": 2.763, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 10.443864229765014, | |
| "grad_norm": 0.24601446092128754, | |
| "learning_rate": 7.5e-05, | |
| "loss": 2.7529, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 10.861618798955613, | |
| "grad_norm": 0.2344890832901001, | |
| "learning_rate": 5.9999999999999995e-05, | |
| "loss": 2.7373, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 11.279373368146214, | |
| "grad_norm": 0.22882166504859924, | |
| "learning_rate": 4.4999999999999996e-05, | |
| "loss": 2.7427, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 11.697127937336814, | |
| "grad_norm": 0.26199406385421753, | |
| "learning_rate": 2.9999999999999997e-05, | |
| "loss": 2.6814, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 12.114882506527415, | |
| "grad_norm": 0.2374505251646042, | |
| "learning_rate": 1.4999999999999999e-05, | |
| "loss": 2.758, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 12.532637075718016, | |
| "grad_norm": 0.2393040806055069, | |
| "learning_rate": 0.0, | |
| "loss": 2.7284, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 13.459530026109661, | |
| "grad_norm": 0.2965029180049896, | |
| "learning_rate": 0.0002668421052631579, | |
| "loss": 2.7219, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 13.877284595300262, | |
| "grad_norm": 0.2831190526485443, | |
| "learning_rate": 0.0002652631578947368, | |
| "loss": 2.7316, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 14.295039164490861, | |
| "grad_norm": 0.29041996598243713, | |
| "learning_rate": 0.00026368421052631576, | |
| "loss": 2.6869, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 14.712793733681462, | |
| "grad_norm": 0.27796632051467896, | |
| "learning_rate": 0.0002621052631578947, | |
| "loss": 2.7045, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 15.130548302872063, | |
| "grad_norm": 0.30092301964759827, | |
| "learning_rate": 0.0002605263157894737, | |
| "loss": 2.6589, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 15.548302872062663, | |
| "grad_norm": 0.33648282289505005, | |
| "learning_rate": 0.0002589473684210526, | |
| "loss": 2.6811, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 15.966057441253264, | |
| "grad_norm": 0.3513476550579071, | |
| "learning_rate": 0.00025736842105263157, | |
| "loss": 2.6424, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 16.383812010443865, | |
| "grad_norm": 0.366802453994751, | |
| "learning_rate": 0.0002557894736842105, | |
| "loss": 2.6225, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 16.801566579634464, | |
| "grad_norm": 0.3507522642612457, | |
| "learning_rate": 0.00025421052631578945, | |
| "loss": 2.6693, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 17.219321148825067, | |
| "grad_norm": 0.32098060846328735, | |
| "learning_rate": 0.00025263157894736836, | |
| "loss": 2.6372, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 17.637075718015666, | |
| "grad_norm": 0.34954994916915894, | |
| "learning_rate": 0.0002510526315789474, | |
| "loss": 2.6057, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 18.054830287206265, | |
| "grad_norm": 0.3401590585708618, | |
| "learning_rate": 0.0002494736842105263, | |
| "loss": 2.6224, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 18.472584856396868, | |
| "grad_norm": 0.3732793927192688, | |
| "learning_rate": 0.00024789473684210526, | |
| "loss": 2.5845, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 18.890339425587467, | |
| "grad_norm": 0.3447878956794739, | |
| "learning_rate": 0.00024631578947368417, | |
| "loss": 2.5766, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 19.30809399477807, | |
| "grad_norm": 0.4215945899486542, | |
| "learning_rate": 0.00024473684210526314, | |
| "loss": 2.5784, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 19.72584856396867, | |
| "grad_norm": 0.44425827264785767, | |
| "learning_rate": 0.00024315789473684207, | |
| "loss": 2.5752, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 20.143603133159267, | |
| "grad_norm": 0.38389045000076294, | |
| "learning_rate": 0.000241578947368421, | |
| "loss": 2.5828, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 20.56135770234987, | |
| "grad_norm": 0.3717693090438843, | |
| "learning_rate": 0.00023999999999999998, | |
| "loss": 2.5507, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 20.97911227154047, | |
| "grad_norm": 0.3933301568031311, | |
| "learning_rate": 0.00023842105263157895, | |
| "loss": 2.5479, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 21.39686684073107, | |
| "grad_norm": 0.3996904790401459, | |
| "learning_rate": 0.00023684210526315788, | |
| "loss": 2.5408, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 21.81462140992167, | |
| "grad_norm": 0.3934177756309509, | |
| "learning_rate": 0.00023526315789473682, | |
| "loss": 2.5463, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 22.232375979112273, | |
| "grad_norm": 0.42467737197875977, | |
| "learning_rate": 0.00023368421052631576, | |
| "loss": 2.5392, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 22.650130548302872, | |
| "grad_norm": 0.38297030329704285, | |
| "learning_rate": 0.0002321052631578947, | |
| "loss": 2.5204, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 23.06788511749347, | |
| "grad_norm": 0.39583373069763184, | |
| "learning_rate": 0.00023052631578947364, | |
| "loss": 2.5195, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 23.485639686684074, | |
| "grad_norm": 0.3692266345024109, | |
| "learning_rate": 0.00022894736842105263, | |
| "loss": 2.5091, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 23.903394255874673, | |
| "grad_norm": 0.34597283601760864, | |
| "learning_rate": 0.00022736842105263157, | |
| "loss": 2.5011, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 24.321148825065276, | |
| "grad_norm": 0.3991779088973999, | |
| "learning_rate": 0.0002257894736842105, | |
| "loss": 2.4919, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 24.738903394255875, | |
| "grad_norm": 0.37865138053894043, | |
| "learning_rate": 0.00022421052631578945, | |
| "loss": 2.4943, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 25.156657963446474, | |
| "grad_norm": 0.41416704654693604, | |
| "learning_rate": 0.0002226315789473684, | |
| "loss": 2.4847, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 25.574412532637076, | |
| "grad_norm": 0.37662285566329956, | |
| "learning_rate": 0.00022105263157894733, | |
| "loss": 2.4836, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 25.992167101827675, | |
| "grad_norm": 0.4186669588088989, | |
| "learning_rate": 0.00021947368421052632, | |
| "loss": 2.4627, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 26.409921671018278, | |
| "grad_norm": 0.3905445337295532, | |
| "learning_rate": 0.00021789473684210526, | |
| "loss": 2.4616, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 26.827676240208877, | |
| "grad_norm": 0.45327094197273254, | |
| "learning_rate": 0.0002163157894736842, | |
| "loss": 2.4777, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 27.245430809399476, | |
| "grad_norm": 0.43680539727211, | |
| "learning_rate": 0.00021473684210526314, | |
| "loss": 2.4654, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 27.66318537859008, | |
| "grad_norm": 0.36886611580848694, | |
| "learning_rate": 0.00021315789473684208, | |
| "loss": 2.4511, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 28.080939947780678, | |
| "grad_norm": 0.36019206047058105, | |
| "learning_rate": 0.00021157894736842102, | |
| "loss": 2.4343, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 28.49869451697128, | |
| "grad_norm": 0.39306387305259705, | |
| "learning_rate": 0.00020999999999999998, | |
| "loss": 2.443, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 28.91644908616188, | |
| "grad_norm": 0.38716623187065125, | |
| "learning_rate": 0.00020842105263157895, | |
| "loss": 2.4417, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 29.334203655352482, | |
| "grad_norm": 0.38376671075820923, | |
| "learning_rate": 0.0002068421052631579, | |
| "loss": 2.4234, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 29.75195822454308, | |
| "grad_norm": 0.39722415804862976, | |
| "learning_rate": 0.00020526315789473683, | |
| "loss": 2.4229, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 30.8355091383812, | |
| "grad_norm": 0.39840930700302124, | |
| "learning_rate": 0.00020368421052631576, | |
| "loss": 2.4198, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 31.2532637075718, | |
| "grad_norm": 0.3548623323440552, | |
| "learning_rate": 0.0002021052631578947, | |
| "loss": 2.415, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 31.671018276762403, | |
| "grad_norm": 0.37791335582733154, | |
| "learning_rate": 0.00020052631578947367, | |
| "loss": 2.4184, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 32.088772845953, | |
| "grad_norm": 0.4041711688041687, | |
| "learning_rate": 0.0001989473684210526, | |
| "loss": 2.3749, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 32.5065274151436, | |
| "grad_norm": 0.37801074981689453, | |
| "learning_rate": 0.00019736842105263157, | |
| "loss": 2.3959, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 32.9242819843342, | |
| "grad_norm": 0.3841933310031891, | |
| "learning_rate": 0.0001957894736842105, | |
| "loss": 2.4193, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 33.34203655352481, | |
| "grad_norm": 0.4326969385147095, | |
| "learning_rate": 0.00019421052631578945, | |
| "loss": 2.3801, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 33.759791122715406, | |
| "grad_norm": 0.41781577467918396, | |
| "learning_rate": 0.0001926315789473684, | |
| "loss": 2.3941, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 34.177545691906005, | |
| "grad_norm": 0.3655799925327301, | |
| "learning_rate": 0.00019105263157894736, | |
| "loss": 2.3998, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 34.595300261096604, | |
| "grad_norm": 0.41886845231056213, | |
| "learning_rate": 0.0001894736842105263, | |
| "loss": 2.3642, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 35.0130548302872, | |
| "grad_norm": 0.4033879041671753, | |
| "learning_rate": 0.00018789473684210524, | |
| "loss": 2.3635, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 35.43080939947781, | |
| "grad_norm": 0.40223613381385803, | |
| "learning_rate": 0.0001863157894736842, | |
| "loss": 2.3686, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 35.84856396866841, | |
| "grad_norm": 0.42818790674209595, | |
| "learning_rate": 0.00018473684210526314, | |
| "loss": 2.3665, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 36.26631853785901, | |
| "grad_norm": 0.4297718405723572, | |
| "learning_rate": 0.00018315789473684208, | |
| "loss": 2.3625, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 36.684073107049606, | |
| "grad_norm": 0.41340720653533936, | |
| "learning_rate": 0.00018157894736842105, | |
| "loss": 2.3729, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 37.10182767624021, | |
| "grad_norm": 0.4357605576515198, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 2.3362, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 37.51958224543081, | |
| "grad_norm": 0.4296557605266571, | |
| "learning_rate": 0.00017842105263157892, | |
| "loss": 2.3099, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 37.93733681462141, | |
| "grad_norm": 0.4468984305858612, | |
| "learning_rate": 0.00017684210526315786, | |
| "loss": 2.3713, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 38.35509138381201, | |
| "grad_norm": 0.4298686683177948, | |
| "learning_rate": 0.00017526315789473683, | |
| "loss": 2.3607, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 38.77284595300261, | |
| "grad_norm": 0.4179350435733795, | |
| "learning_rate": 0.0001736842105263158, | |
| "loss": 2.3186, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 39.190600522193215, | |
| "grad_norm": 0.3968249261379242, | |
| "learning_rate": 0.00017210526315789473, | |
| "loss": 2.3505, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 39.608355091383814, | |
| "grad_norm": 0.40423065423965454, | |
| "learning_rate": 0.00017052631578947367, | |
| "loss": 2.3414, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 40.02610966057441, | |
| "grad_norm": 0.42887982726097107, | |
| "learning_rate": 0.0001689473684210526, | |
| "loss": 2.3221, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 40.44386422976501, | |
| "grad_norm": 0.4936973452568054, | |
| "learning_rate": 0.00016736842105263155, | |
| "loss": 2.3082, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 40.86161879895561, | |
| "grad_norm": 0.39211633801460266, | |
| "learning_rate": 0.00016578947368421052, | |
| "loss": 2.3249, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 41.27937336814622, | |
| "grad_norm": 0.3981688916683197, | |
| "learning_rate": 0.00016421052631578948, | |
| "loss": 2.2999, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 41.697127937336816, | |
| "grad_norm": 0.42434969544410706, | |
| "learning_rate": 0.00016263157894736842, | |
| "loss": 2.3225, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 42.114882506527415, | |
| "grad_norm": 0.4085942208766937, | |
| "learning_rate": 0.00016105263157894736, | |
| "loss": 2.3113, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 42.532637075718014, | |
| "grad_norm": 0.44574835896492004, | |
| "learning_rate": 0.0001594736842105263, | |
| "loss": 2.3211, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 42.95039164490861, | |
| "grad_norm": 0.4391286075115204, | |
| "learning_rate": 0.00015789473684210524, | |
| "loss": 2.2893, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 43.36814621409922, | |
| "grad_norm": 0.4376542866230011, | |
| "learning_rate": 0.00015631578947368418, | |
| "loss": 2.2774, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 43.78590078328982, | |
| "grad_norm": 0.47575268149375916, | |
| "learning_rate": 0.00015473684210526317, | |
| "loss": 2.3193, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 44.20365535248042, | |
| "grad_norm": 0.46462905406951904, | |
| "learning_rate": 0.0001531578947368421, | |
| "loss": 2.2713, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 44.62140992167102, | |
| "grad_norm": 0.42894020676612854, | |
| "learning_rate": 0.00015157894736842105, | |
| "loss": 2.3082, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 45.039164490861616, | |
| "grad_norm": 0.41724729537963867, | |
| "learning_rate": 0.00015, | |
| "loss": 2.282, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 45.45691906005222, | |
| "grad_norm": 0.4280991554260254, | |
| "learning_rate": 0.00014842105263157893, | |
| "loss": 2.2724, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 45.87467362924282, | |
| "grad_norm": 0.4132491946220398, | |
| "learning_rate": 0.0001468421052631579, | |
| "loss": 2.2808, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 46.29242819843342, | |
| "grad_norm": 0.41796839237213135, | |
| "learning_rate": 0.00014526315789473683, | |
| "loss": 2.2805, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 46.71018276762402, | |
| "grad_norm": 0.44347137212753296, | |
| "learning_rate": 0.00014368421052631577, | |
| "loss": 2.2488, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 47.12793733681462, | |
| "grad_norm": 0.44872087240219116, | |
| "learning_rate": 0.0001421052631578947, | |
| "loss": 2.2868, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 48.21148825065274, | |
| "grad_norm": 0.4230802655220032, | |
| "learning_rate": 0.00014052631578947367, | |
| "loss": 2.2684, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 48.62924281984334, | |
| "grad_norm": 0.4060306251049042, | |
| "learning_rate": 0.00013894736842105261, | |
| "loss": 2.2471, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 49.04699738903394, | |
| "grad_norm": 0.4556446671485901, | |
| "learning_rate": 0.00013736842105263155, | |
| "loss": 2.294, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 49.46475195822455, | |
| "grad_norm": 0.4339825510978699, | |
| "learning_rate": 0.00013578947368421052, | |
| "loss": 2.242, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 49.882506527415146, | |
| "grad_norm": 0.4528680741786957, | |
| "learning_rate": 0.00013421052631578946, | |
| "loss": 2.2637, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 50.300261096605745, | |
| "grad_norm": 0.4693712294101715, | |
| "learning_rate": 0.0001326315789473684, | |
| "loss": 2.2601, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 50.718015665796344, | |
| "grad_norm": 0.4447150230407715, | |
| "learning_rate": 0.00013105263157894736, | |
| "loss": 2.2732, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 51.13577023498694, | |
| "grad_norm": 0.41829752922058105, | |
| "learning_rate": 0.0001294736842105263, | |
| "loss": 2.2269, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 51.55352480417755, | |
| "grad_norm": 0.48798778653144836, | |
| "learning_rate": 0.00012789473684210524, | |
| "loss": 2.2448, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 51.97127937336815, | |
| "grad_norm": 0.4357990622520447, | |
| "learning_rate": 0.00012631578947368418, | |
| "loss": 2.2459, | |
| "step": 1200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 87, | |
| "save_steps": 100, | |
| "total_flos": 1.9374365904611328e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |