| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 0, | |
| "global_step": 118, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00847457627118644, | |
| "grad_norm": 35.74056625366211, | |
| "learning_rate": 1.008e-05, | |
| "loss": 2.2767, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01694915254237288, | |
| "grad_norm": 13.115900039672852, | |
| "learning_rate": 1.0078213880062201e-05, | |
| "loss": 1.6586, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.025423728813559324, | |
| "grad_norm": 8.92249870300293, | |
| "learning_rate": 1.007285678621088e-05, | |
| "loss": 1.1126, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.03389830508474576, | |
| "grad_norm": 12.179636001586914, | |
| "learning_rate": 1.0063932515434981e-05, | |
| "loss": 0.9935, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0423728813559322, | |
| "grad_norm": 4.910240650177002, | |
| "learning_rate": 1.005144739305909e-05, | |
| "loss": 0.8416, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.05084745762711865, | |
| "grad_norm": 2.3368802070617676, | |
| "learning_rate": 1.0035410268260196e-05, | |
| "loss": 0.8057, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.059322033898305086, | |
| "grad_norm": 3.5738086700439453, | |
| "learning_rate": 1.0015832507795577e-05, | |
| "loss": 0.7107, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.06779661016949153, | |
| "grad_norm": 2.041591167449951, | |
| "learning_rate": 9.992727987946306e-06, | |
| "loss": 0.7948, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.07627118644067797, | |
| "grad_norm": 1.9192763566970825, | |
| "learning_rate": 9.966113084682048e-06, | |
| "loss": 0.7323, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.0847457627118644, | |
| "grad_norm": 2.181330919265747, | |
| "learning_rate": 9.936006662054137e-06, | |
| "loss": 0.7378, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.09322033898305085, | |
| "grad_norm": 1.7582520246505737, | |
| "learning_rate": 9.902430058825162e-06, | |
| "loss": 0.7146, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.1016949152542373, | |
| "grad_norm": 1.3104244470596313, | |
| "learning_rate": 9.865407073344506e-06, | |
| "loss": 0.6574, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.11016949152542373, | |
| "grad_norm": 1.1520931720733643, | |
| "learning_rate": 9.824963946680641e-06, | |
| "loss": 0.674, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.11864406779661017, | |
| "grad_norm": 1.3915627002716064, | |
| "learning_rate": 9.781129344022008e-06, | |
| "loss": 0.675, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.1271186440677966, | |
| "grad_norm": 1.3260822296142578, | |
| "learning_rate": 9.733934334359786e-06, | |
| "loss": 0.6156, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.13559322033898305, | |
| "grad_norm": 1.1522308588027954, | |
| "learning_rate": 9.683412368466881e-06, | |
| "loss": 0.6107, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.1440677966101695, | |
| "grad_norm": 1.0242621898651123, | |
| "learning_rate": 9.629599255188763e-06, | |
| "loss": 0.544, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.15254237288135594, | |
| "grad_norm": 1.5411088466644287, | |
| "learning_rate": 9.572533136062945e-06, | |
| "loss": 0.6265, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.16101694915254236, | |
| "grad_norm": 1.2036213874816895, | |
| "learning_rate": 9.51225445828512e-06, | |
| "loss": 0.6261, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.1694915254237288, | |
| "grad_norm": 1.004852533340454, | |
| "learning_rate": 9.448805946041084e-06, | |
| "loss": 0.5895, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.17796610169491525, | |
| "grad_norm": 1.0301568508148193, | |
| "learning_rate": 9.382232570224783e-06, | |
| "loss": 0.6057, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.1864406779661017, | |
| "grad_norm": 1.0345704555511475, | |
| "learning_rate": 9.312581516563942e-06, | |
| "loss": 0.5539, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.19491525423728814, | |
| "grad_norm": 1.0441306829452515, | |
| "learning_rate": 9.239902152175876e-06, | |
| "loss": 0.5699, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.2033898305084746, | |
| "grad_norm": 1.1769165992736816, | |
| "learning_rate": 9.164245990577172e-06, | |
| "loss": 0.573, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.211864406779661, | |
| "grad_norm": 1.0624712705612183, | |
| "learning_rate": 9.085666655172057e-06, | |
| "loss": 0.634, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.22033898305084745, | |
| "grad_norm": 1.0083361864089966, | |
| "learning_rate": 9.004219841245318e-06, | |
| "loss": 0.5617, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.2288135593220339, | |
| "grad_norm": 1.0477335453033447, | |
| "learning_rate": 8.919963276486727e-06, | |
| "loss": 0.5579, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.23728813559322035, | |
| "grad_norm": 1.229489803314209, | |
| "learning_rate": 8.832956680074937e-06, | |
| "loss": 0.6022, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.2457627118644068, | |
| "grad_norm": 1.093316912651062, | |
| "learning_rate": 8.74326172034986e-06, | |
| "loss": 0.6176, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.2542372881355932, | |
| "grad_norm": 1.174489974975586, | |
| "learning_rate": 8.650941971103509e-06, | |
| "loss": 0.5965, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.2627118644067797, | |
| "grad_norm": 0.9779760837554932, | |
| "learning_rate": 8.556062866520323e-06, | |
| "loss": 0.5618, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.2711864406779661, | |
| "grad_norm": 1.1349478960037231, | |
| "learning_rate": 8.458691654798865e-06, | |
| "loss": 0.6187, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.2796610169491525, | |
| "grad_norm": 1.027665138244629, | |
| "learning_rate": 8.358897350487796e-06, | |
| "loss": 0.5923, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.288135593220339, | |
| "grad_norm": 1.2860366106033325, | |
| "learning_rate": 8.256750685569892e-06, | |
| "loss": 0.6993, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.2966101694915254, | |
| "grad_norm": 1.019265055656433, | |
| "learning_rate": 8.152324059328807e-06, | |
| "loss": 0.561, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.3050847457627119, | |
| "grad_norm": 1.0647754669189453, | |
| "learning_rate": 8.045691487034047e-06, | |
| "loss": 0.5601, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.3135593220338983, | |
| "grad_norm": 0.9456817507743835, | |
| "learning_rate": 7.936928547480611e-06, | |
| "loss": 0.5355, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.3220338983050847, | |
| "grad_norm": 0.9935976266860962, | |
| "learning_rate": 7.826112329420414e-06, | |
| "loss": 0.5562, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.3305084745762712, | |
| "grad_norm": 1.0699551105499268, | |
| "learning_rate": 7.713321376923502e-06, | |
| "loss": 0.5859, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.3389830508474576, | |
| "grad_norm": 0.9607884287834167, | |
| "learning_rate": 7.598635633707753e-06, | |
| "loss": 0.5634, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.3474576271186441, | |
| "grad_norm": 1.007079839706421, | |
| "learning_rate": 7.482136386476557e-06, | |
| "loss": 0.5743, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.3559322033898305, | |
| "grad_norm": 1.0889322757720947, | |
| "learning_rate": 7.3639062073046025e-06, | |
| "loss": 0.6155, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.3644067796610169, | |
| "grad_norm": 1.0192927122116089, | |
| "learning_rate": 7.244028895112632e-06, | |
| "loss": 0.5434, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.3728813559322034, | |
| "grad_norm": 1.0717029571533203, | |
| "learning_rate": 7.122589416272626e-06, | |
| "loss": 0.5939, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.3813559322033898, | |
| "grad_norm": 0.958198606967926, | |
| "learning_rate": 6.999673844385537e-06, | |
| "loss": 0.6275, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.3898305084745763, | |
| "grad_norm": 1.0603076219558716, | |
| "learning_rate": 6.875369299274224e-06, | |
| "loss": 0.5774, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.3983050847457627, | |
| "grad_norm": 1.0000678300857544, | |
| "learning_rate": 6.7497638852348716e-06, | |
| "loss": 0.569, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.4067796610169492, | |
| "grad_norm": 1.054443359375, | |
| "learning_rate": 6.622946628590623e-06, | |
| "loss": 0.6047, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.4152542372881356, | |
| "grad_norm": 1.0552459955215454, | |
| "learning_rate": 6.49500741459169e-06, | |
| "loss": 0.5411, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.423728813559322, | |
| "grad_norm": 1.0494464635849, | |
| "learning_rate": 6.366036923706707e-06, | |
| "loss": 0.5714, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.4322033898305085, | |
| "grad_norm": 0.915934681892395, | |
| "learning_rate": 6.23612656735042e-06, | |
| "loss": 0.5167, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.4406779661016949, | |
| "grad_norm": 1.5039361715316772, | |
| "learning_rate": 6.105368423093307e-06, | |
| "loss": 0.6265, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.4491525423728814, | |
| "grad_norm": 1.0584355592727661, | |
| "learning_rate": 5.973855169399057e-06, | |
| "loss": 0.5589, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.4576271186440678, | |
| "grad_norm": 1.064541220664978, | |
| "learning_rate": 5.841680019936123e-06, | |
| "loss": 0.5228, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.4661016949152542, | |
| "grad_norm": 1.0165396928787231, | |
| "learning_rate": 5.708936657509959e-06, | |
| "loss": 0.6116, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.4745762711864407, | |
| "grad_norm": 1.0315970182418823, | |
| "learning_rate": 5.5757191676627215e-06, | |
| "loss": 0.5637, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.4830508474576271, | |
| "grad_norm": 0.9197196960449219, | |
| "learning_rate": 5.442121971987539e-06, | |
| "loss": 0.5194, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.4915254237288136, | |
| "grad_norm": 0.9129324555397034, | |
| "learning_rate": 5.3082397612045805e-06, | |
| "loss": 0.5541, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 1.0417673587799072, | |
| "learning_rate": 5.174167428046385e-06, | |
| "loss": 0.5888, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.5084745762711864, | |
| "grad_norm": 0.9219163656234741, | |
| "learning_rate": 5.04e-06, | |
| "loss": 0.5358, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.5169491525423728, | |
| "grad_norm": 0.998306393623352, | |
| "learning_rate": 4.905832571953615e-06, | |
| "loss": 0.5352, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.5254237288135594, | |
| "grad_norm": 1.092921495437622, | |
| "learning_rate": 4.771760238795421e-06, | |
| "loss": 0.5527, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.5338983050847458, | |
| "grad_norm": 1.0643359422683716, | |
| "learning_rate": 4.637878028012461e-06, | |
| "loss": 0.6151, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.5423728813559322, | |
| "grad_norm": 1.1080788373947144, | |
| "learning_rate": 4.504280832337279e-06, | |
| "loss": 0.5372, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.5508474576271186, | |
| "grad_norm": 0.9985234141349792, | |
| "learning_rate": 4.371063342490041e-06, | |
| "loss": 0.5466, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.559322033898305, | |
| "grad_norm": 1.0811630487442017, | |
| "learning_rate": 4.238319980063878e-06, | |
| "loss": 0.5848, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.5677966101694916, | |
| "grad_norm": 1.0621752738952637, | |
| "learning_rate": 4.106144830600945e-06, | |
| "loss": 0.5546, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.576271186440678, | |
| "grad_norm": 1.1586964130401611, | |
| "learning_rate": 3.974631576906693e-06, | |
| "loss": 0.5268, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.5847457627118644, | |
| "grad_norm": 0.9288192987442017, | |
| "learning_rate": 3.843873432649581e-06, | |
| "loss": 0.564, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.5932203389830508, | |
| "grad_norm": 0.9237483739852905, | |
| "learning_rate": 3.713963076293294e-06, | |
| "loss": 0.5849, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.6016949152542372, | |
| "grad_norm": 1.2002196311950684, | |
| "learning_rate": 3.5849925854083115e-06, | |
| "loss": 0.5766, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.6101694915254238, | |
| "grad_norm": 0.9985136389732361, | |
| "learning_rate": 3.4570533714093793e-06, | |
| "loss": 0.5443, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.6186440677966102, | |
| "grad_norm": 1.052380084991455, | |
| "learning_rate": 3.3302361147651272e-06, | |
| "loss": 0.5679, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.6271186440677966, | |
| "grad_norm": 1.0863173007965088, | |
| "learning_rate": 3.204630700725776e-06, | |
| "loss": 0.5995, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.635593220338983, | |
| "grad_norm": 0.9472033381462097, | |
| "learning_rate": 3.080326155614464e-06, | |
| "loss": 0.5524, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.6440677966101694, | |
| "grad_norm": 0.9481269717216492, | |
| "learning_rate": 2.9574105837273754e-06, | |
| "loss": 0.5578, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.652542372881356, | |
| "grad_norm": 0.8579362630844116, | |
| "learning_rate": 2.835971104887369e-06, | |
| "loss": 0.5076, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.6610169491525424, | |
| "grad_norm": 0.8677413463592529, | |
| "learning_rate": 2.7160937926953967e-06, | |
| "loss": 0.513, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.6694915254237288, | |
| "grad_norm": 1.145965814590454, | |
| "learning_rate": 2.5978636135234437e-06, | |
| "loss": 0.5288, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.6779661016949152, | |
| "grad_norm": 1.1086536645889282, | |
| "learning_rate": 2.481364366292248e-06, | |
| "loss": 0.6084, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.6864406779661016, | |
| "grad_norm": 1.011196494102478, | |
| "learning_rate": 2.3666786230764983e-06, | |
| "loss": 0.6131, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.6949152542372882, | |
| "grad_norm": 0.903338611125946, | |
| "learning_rate": 2.253887670579587e-06, | |
| "loss": 0.5771, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.7033898305084746, | |
| "grad_norm": 1.0274643898010254, | |
| "learning_rate": 2.1430714525193897e-06, | |
| "loss": 0.5836, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.711864406779661, | |
| "grad_norm": 1.0280394554138184, | |
| "learning_rate": 2.0343085129659523e-06, | |
| "loss": 0.5476, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.7203389830508474, | |
| "grad_norm": 0.8955240845680237, | |
| "learning_rate": 1.9276759406711933e-06, | |
| "loss": 0.5065, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.7288135593220338, | |
| "grad_norm": 0.9722875952720642, | |
| "learning_rate": 1.823249314430108e-06, | |
| "loss": 0.5386, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.7372881355932204, | |
| "grad_norm": 1.010117769241333, | |
| "learning_rate": 1.7211026495122058e-06, | |
| "loss": 0.5898, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.7457627118644068, | |
| "grad_norm": 0.9292120933532715, | |
| "learning_rate": 1.6213083452011349e-06, | |
| "loss": 0.5245, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.7542372881355932, | |
| "grad_norm": 0.8837317228317261, | |
| "learning_rate": 1.5239371334796776e-06, | |
| "loss": 0.5642, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.7627118644067796, | |
| "grad_norm": 1.1663438081741333, | |
| "learning_rate": 1.4290580288964914e-06, | |
| "loss": 0.593, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.7711864406779662, | |
| "grad_norm": 1.0487372875213623, | |
| "learning_rate": 1.336738279650142e-06, | |
| "loss": 0.5621, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.7796610169491526, | |
| "grad_norm": 1.05958092212677, | |
| "learning_rate": 1.247043319925062e-06, | |
| "loss": 0.5409, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.788135593220339, | |
| "grad_norm": 0.839719831943512, | |
| "learning_rate": 1.1600367235132717e-06, | |
| "loss": 0.5105, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.7966101694915254, | |
| "grad_norm": 1.0145214796066284, | |
| "learning_rate": 1.075780158754682e-06, | |
| "loss": 0.5873, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.8050847457627118, | |
| "grad_norm": 1.016517996788025, | |
| "learning_rate": 9.943333448279442e-07, | |
| "loss": 0.564, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.8135593220338984, | |
| "grad_norm": 0.8958804607391357, | |
| "learning_rate": 9.157540094228282e-07, | |
| "loss": 0.58, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.8220338983050848, | |
| "grad_norm": 0.916645884513855, | |
| "learning_rate": 8.400978478241241e-07, | |
| "loss": 0.4649, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.8305084745762712, | |
| "grad_norm": 0.8990329504013062, | |
| "learning_rate": 7.674184834360587e-07, | |
| "loss": 0.5424, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.8389830508474576, | |
| "grad_norm": 0.9331949949264526, | |
| "learning_rate": 6.977674297752169e-07, | |
| "loss": 0.5427, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.847457627118644, | |
| "grad_norm": 0.9024314284324646, | |
| "learning_rate": 6.311940539589152e-07, | |
| "loss": 0.588, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8559322033898306, | |
| "grad_norm": 0.9858136177062988, | |
| "learning_rate": 5.677455417148799e-07, | |
| "loss": 0.6236, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.864406779661017, | |
| "grad_norm": 0.924796462059021, | |
| "learning_rate": 5.074668639370556e-07, | |
| "loss": 0.5011, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.8728813559322034, | |
| "grad_norm": 0.9715655446052551, | |
| "learning_rate": 4.504007448112373e-07, | |
| "loss": 0.568, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.8813559322033898, | |
| "grad_norm": 0.9612396955490112, | |
| "learning_rate": 3.9658763153311863e-07, | |
| "loss": 0.5957, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.8898305084745762, | |
| "grad_norm": 0.8345690369606018, | |
| "learning_rate": 3.460656656402143e-07, | |
| "loss": 0.5547, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.8983050847457628, | |
| "grad_norm": 0.9040005207061768, | |
| "learning_rate": 2.9887065597799247e-07, | |
| "loss": 0.5367, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.9067796610169492, | |
| "grad_norm": 0.7813464403152466, | |
| "learning_rate": 2.5503605331935833e-07, | |
| "loss": 0.5397, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.9152542372881356, | |
| "grad_norm": 1.0138887166976929, | |
| "learning_rate": 2.145929266554925e-07, | |
| "loss": 0.5478, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.923728813559322, | |
| "grad_norm": 1.2826368808746338, | |
| "learning_rate": 1.7756994117483972e-07, | |
| "loss": 0.5731, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.9322033898305084, | |
| "grad_norm": 1.0911537408828735, | |
| "learning_rate": 1.4399333794586233e-07, | |
| "loss": 0.585, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.940677966101695, | |
| "grad_norm": 0.9331554174423218, | |
| "learning_rate": 1.1388691531795196e-07, | |
| "loss": 0.5553, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.9491525423728814, | |
| "grad_norm": 1.0142954587936401, | |
| "learning_rate": 8.727201205369431e-08, | |
| "loss": 0.5371, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.9576271186440678, | |
| "grad_norm": 0.9292749762535095, | |
| "learning_rate": 6.416749220442344e-08, | |
| "loss": 0.5288, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.9661016949152542, | |
| "grad_norm": 0.8662624955177307, | |
| "learning_rate": 4.4589731739804295e-08, | |
| "loss": 0.5724, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.9745762711864406, | |
| "grad_norm": 0.8610790371894836, | |
| "learning_rate": 2.855260694090905e-08, | |
| "loss": 0.4941, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.9830508474576272, | |
| "grad_norm": 0.8874425292015076, | |
| "learning_rate": 1.6067484565019372e-08, | |
| "loss": 0.5518, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.9915254237288136, | |
| "grad_norm": 0.9031229615211487, | |
| "learning_rate": 7.1432137891198e-09, | |
| "loss": 0.5562, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 0.8897117972373962, | |
| "learning_rate": 1.7861199377987623e-09, | |
| "loss": 0.562, | |
| "step": 118 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 118, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 0, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.3048368484214374e+17, | |
| "train_batch_size": 3, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |