| { | |
| "best_metric": 1.3127143383026123, | |
| "best_model_checkpoint": "./results/checkpoint-12000", | |
| "epoch": 2.9239766081871346, | |
| "eval_steps": 1000, | |
| "global_step": 12000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 33.17779541015625, | |
| "learning_rate": 7.711038961038962e-07, | |
| "loss": 3.4711, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 37.28006362915039, | |
| "learning_rate": 1.5827922077922078e-06, | |
| "loss": 3.5437, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 36.98271942138672, | |
| "learning_rate": 2.3944805194805195e-06, | |
| "loss": 3.3504, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 40.87253952026367, | |
| "learning_rate": 3.2061688311688315e-06, | |
| "loss": 3.0778, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 38.7576904296875, | |
| "learning_rate": 4.017857142857143e-06, | |
| "loss": 2.7729, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "grad_norm": 32.913177490234375, | |
| "learning_rate": 4.829545454545455e-06, | |
| "loss": 2.4744, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "grad_norm": 25.826562881469727, | |
| "learning_rate": 5.641233766233767e-06, | |
| "loss": 2.0562, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 18.770437240600586, | |
| "learning_rate": 6.452922077922078e-06, | |
| "loss": 1.8168, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 25.943300247192383, | |
| "learning_rate": 7.264610389610391e-06, | |
| "loss": 1.6557, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 19.324600219726562, | |
| "learning_rate": 8.076298701298701e-06, | |
| "loss": 1.6403, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_loss": 1.5921334028244019, | |
| "eval_runtime": 26.2796, | |
| "eval_samples_per_second": 138.815, | |
| "eval_steps_per_second": 17.352, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "grad_norm": 16.94064712524414, | |
| "learning_rate": 8.887987012987014e-06, | |
| "loss": 1.6253, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "grad_norm": 30.26157569885254, | |
| "learning_rate": 9.699675324675324e-06, | |
| "loss": 1.5771, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 16.245685577392578, | |
| "learning_rate": 9.999202318014557e-06, | |
| "loss": 1.5458, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 26.97745132446289, | |
| "learning_rate": 9.994661021925825e-06, | |
| "loss": 1.5736, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 16.87709617614746, | |
| "learning_rate": 9.986104618610859e-06, | |
| "loss": 1.5701, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 17.941173553466797, | |
| "learning_rate": 9.973539986389656e-06, | |
| "loss": 1.624, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 19.09428596496582, | |
| "learning_rate": 9.956977225716559e-06, | |
| "loss": 1.574, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 20.125308990478516, | |
| "learning_rate": 9.936429651060717e-06, | |
| "loss": 1.4985, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 20.364625930786133, | |
| "learning_rate": 9.911913780202837e-06, | |
| "loss": 1.6142, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "grad_norm": 41.57521438598633, | |
| "learning_rate": 9.883449320956886e-06, | |
| "loss": 1.5333, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "eval_loss": 1.4966720342636108, | |
| "eval_runtime": 26.2841, | |
| "eval_samples_per_second": 138.791, | |
| "eval_steps_per_second": 17.349, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "grad_norm": 11.074485778808594, | |
| "learning_rate": 9.851402406341606e-06, | |
| "loss": 1.5664, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 20.685998916625977, | |
| "learning_rate": 9.815151429615968e-06, | |
| "loss": 1.5363, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 26.39971923828125, | |
| "learning_rate": 9.775029649803677e-06, | |
| "loss": 1.4989, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 20.003503799438477, | |
| "learning_rate": 9.731069319994049e-06, | |
| "loss": 1.4553, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "grad_norm": 15.458146095275879, | |
| "learning_rate": 9.683305779009301e-06, | |
| "loss": 1.4732, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "grad_norm": 13.668593406677246, | |
| "learning_rate": 9.631777422996384e-06, | |
| "loss": 1.4907, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 18.606111526489258, | |
| "learning_rate": 9.576525674561088e-06, | |
| "loss": 1.5343, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 23.913843154907227, | |
| "learning_rate": 9.517594949469258e-06, | |
| "loss": 1.4561, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 23.21706771850586, | |
| "learning_rate": 9.45503262094184e-06, | |
| "loss": 1.4558, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "grad_norm": 18.72185516357422, | |
| "learning_rate": 9.388888981572521e-06, | |
| "loss": 1.4404, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "eval_loss": 1.4381717443466187, | |
| "eval_runtime": 26.2702, | |
| "eval_samples_per_second": 138.865, | |
| "eval_steps_per_second": 17.358, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 21.71692657470703, | |
| "learning_rate": 9.319217202898511e-06, | |
| "loss": 1.46, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 23.30307960510254, | |
| "learning_rate": 9.246073292657036e-06, | |
| "loss": 1.4993, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 17.032285690307617, | |
| "learning_rate": 9.169516049761827e-06, | |
| "loss": 1.426, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 25.84226417541504, | |
| "learning_rate": 9.089607017035875e-06, | |
| "loss": 1.45, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 15.991667747497559, | |
| "learning_rate": 9.006410431738393e-06, | |
| "loss": 1.4416, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 25.301294326782227, | |
| "learning_rate": 8.919993173925775e-06, | |
| "loss": 1.4338, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 21.2309513092041, | |
| "learning_rate": 8.830424712688075e-06, | |
| "loss": 1.4441, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "grad_norm": 27.830860137939453, | |
| "learning_rate": 8.737777050304201e-06, | |
| "loss": 1.4336, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 13.40440845489502, | |
| "learning_rate": 8.642124664360743e-06, | |
| "loss": 1.4462, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "grad_norm": 12.035055160522461, | |
| "learning_rate": 8.543544447880932e-06, | |
| "loss": 1.3731, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "eval_loss": 1.4002097845077515, | |
| "eval_runtime": 26.2861, | |
| "eval_samples_per_second": 138.78, | |
| "eval_steps_per_second": 17.348, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 26.902008056640625, | |
| "learning_rate": 8.442115647511902e-06, | |
| "loss": 1.3534, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 22.86239242553711, | |
| "learning_rate": 8.338975181963125e-06, | |
| "loss": 1.423, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "grad_norm": 14.839469909667969, | |
| "learning_rate": 8.232122458995769e-06, | |
| "loss": 1.4046, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "grad_norm": 11.413098335266113, | |
| "learning_rate": 8.122671497992996e-06, | |
| "loss": 1.4214, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 12.68773078918457, | |
| "learning_rate": 8.010710284374138e-06, | |
| "loss": 1.3689, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 26.15570640563965, | |
| "learning_rate": 7.896328821499958e-06, | |
| "loss": 1.4359, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "grad_norm": 17.21908950805664, | |
| "learning_rate": 7.779619058320773e-06, | |
| "loss": 1.3365, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "grad_norm": 13.629748344421387, | |
| "learning_rate": 7.660674815460536e-06, | |
| "loss": 1.4184, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "grad_norm": 18.672447204589844, | |
| "learning_rate": 7.539591709796332e-06, | |
| "loss": 1.4249, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "grad_norm": 19.526599884033203, | |
| "learning_rate": 7.4164670775939064e-06, | |
| "loss": 1.4118, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "eval_loss": 1.371161699295044, | |
| "eval_runtime": 26.2776, | |
| "eval_samples_per_second": 138.826, | |
| "eval_steps_per_second": 17.353, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "grad_norm": 21.340396881103516, | |
| "learning_rate": 7.291399896260997e-06, | |
| "loss": 1.4033, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "grad_norm": 23.687089920043945, | |
| "learning_rate": 7.164490704781396e-06, | |
| "loss": 1.3883, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "grad_norm": 24.734172821044922, | |
| "learning_rate": 7.035841522893689e-06, | |
| "loss": 1.4099, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 10.669279098510742, | |
| "learning_rate": 6.90555576907965e-06, | |
| "loss": 1.3431, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "grad_norm": 14.781868934631348, | |
| "learning_rate": 6.7737381774282e-06, | |
| "loss": 1.3807, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "grad_norm": 25.798255920410156, | |
| "learning_rate": 6.640494713441796e-06, | |
| "loss": 1.3509, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "grad_norm": 24.66495704650879, | |
| "learning_rate": 6.505932488852898e-06, | |
| "loss": 1.3313, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "grad_norm": 18.16162872314453, | |
| "learning_rate": 6.370159675519001e-06, | |
| "loss": 1.3439, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 19.407123565673828, | |
| "learning_rate": 6.233285418465477e-06, | |
| "loss": 1.386, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "grad_norm": 17.162641525268555, | |
| "learning_rate": 6.095419748146076e-06, | |
| "loss": 1.3701, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "eval_loss": 1.3507885932922363, | |
| "eval_runtime": 26.3071, | |
| "eval_samples_per_second": 138.67, | |
| "eval_steps_per_second": 17.334, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "grad_norm": 17.1251220703125, | |
| "learning_rate": 5.9566734919916746e-06, | |
| "loss": 1.3748, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "grad_norm": 13.51869010925293, | |
| "learning_rate": 5.817158185318335e-06, | |
| "loss": 1.3594, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "grad_norm": 13.84580135345459, | |
| "learning_rate": 5.678390585745784e-06, | |
| "loss": 1.3541, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 13.059226036071777, | |
| "learning_rate": 5.537679049589568e-06, | |
| "loss": 1.3485, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "grad_norm": 15.174361228942871, | |
| "learning_rate": 5.396535284093278e-06, | |
| "loss": 1.3676, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "grad_norm": 27.069568634033203, | |
| "learning_rate": 5.255072751882363e-06, | |
| "loss": 1.3789, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "grad_norm": 18.00585174560547, | |
| "learning_rate": 5.113405171832404e-06, | |
| "loss": 1.3779, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "grad_norm": 16.768413543701172, | |
| "learning_rate": 4.971646427652806e-06, | |
| "loss": 1.3338, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "grad_norm": 24.71240234375, | |
| "learning_rate": 4.829910476337972e-06, | |
| "loss": 1.405, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "grad_norm": 11.74875259399414, | |
| "learning_rate": 4.688311256559587e-06, | |
| "loss": 1.3563, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "eval_loss": 1.3359144926071167, | |
| "eval_runtime": 26.3043, | |
| "eval_samples_per_second": 138.684, | |
| "eval_steps_per_second": 17.336, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "grad_norm": 20.35810661315918, | |
| "learning_rate": 4.546962597073607e-06, | |
| "loss": 1.3103, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "grad_norm": 19.12173080444336, | |
| "learning_rate": 4.405978125215627e-06, | |
| "loss": 1.3983, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "grad_norm": 24.56060218811035, | |
| "learning_rate": 4.265471175558156e-06, | |
| "loss": 1.3357, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 17.104286193847656, | |
| "learning_rate": 4.125554698803241e-06, | |
| "loss": 1.3546, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "grad_norm": 18.4068603515625, | |
| "learning_rate": 3.986341170983672e-06, | |
| "loss": 1.3774, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "grad_norm": 15.030558586120605, | |
| "learning_rate": 3.847942503045776e-06, | |
| "loss": 1.3573, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "grad_norm": 25.187389373779297, | |
| "learning_rate": 3.7104699508864606e-06, | |
| "loss": 1.3029, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "grad_norm": 14.766794204711914, | |
| "learning_rate": 3.5740340259168383e-06, | |
| "loss": 1.3441, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 15.01491928100586, | |
| "learning_rate": 3.4387444062243453e-06, | |
| "loss": 1.3704, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "grad_norm": 14.977212905883789, | |
| "learning_rate": 3.3047098484047314e-06, | |
| "loss": 1.3027, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "eval_loss": 1.3263477087020874, | |
| "eval_runtime": 26.2892, | |
| "eval_samples_per_second": 138.764, | |
| "eval_steps_per_second": 17.346, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "grad_norm": 22.42038917541504, | |
| "learning_rate": 3.172038100134823e-06, | |
| "loss": 1.3301, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 9.849152565002441, | |
| "learning_rate": 3.040835813556352e-06, | |
| "loss": 1.3057, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "grad_norm": 13.574536323547363, | |
| "learning_rate": 2.911208459540442e-06, | |
| "loss": 1.3523, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "grad_norm": 21.791500091552734, | |
| "learning_rate": 2.783260242901694e-06, | |
| "loss": 1.3743, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "grad_norm": 19.42542266845703, | |
| "learning_rate": 2.6583465257615547e-06, | |
| "loss": 1.3198, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "grad_norm": 10.656351089477539, | |
| "learning_rate": 2.5340443850538414e-06, | |
| "loss": 1.3312, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "grad_norm": 15.180091857910156, | |
| "learning_rate": 2.4117245763133403e-06, | |
| "loss": 1.3036, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "grad_norm": 11.437941551208496, | |
| "learning_rate": 2.2914854299664442e-06, | |
| "loss": 1.3206, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "grad_norm": 12.555315017700195, | |
| "learning_rate": 2.173423603837027e-06, | |
| "loss": 1.3197, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "grad_norm": 11.639187812805176, | |
| "learning_rate": 2.0576340054451755e-06, | |
| "loss": 1.3416, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "eval_loss": 1.319271206855774, | |
| "eval_runtime": 26.3038, | |
| "eval_samples_per_second": 138.687, | |
| "eval_steps_per_second": 17.336, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "grad_norm": 23.691478729248047, | |
| "learning_rate": 1.944209715712927e-06, | |
| "loss": 1.3242, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "grad_norm": 15.768092155456543, | |
| "learning_rate": 1.8332419141384222e-06, | |
| "loss": 1.3714, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 2.27, | |
| "grad_norm": 14.769400596618652, | |
| "learning_rate": 1.7248198054985233e-06, | |
| "loss": 1.3876, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "grad_norm": 14.024765014648438, | |
| "learning_rate": 1.6190305481389102e-06, | |
| "loss": 1.3597, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "grad_norm": 14.97281265258789, | |
| "learning_rate": 1.5159591839092319e-06, | |
| "loss": 1.3216, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "grad_norm": 27.76974868774414, | |
| "learning_rate": 1.415688569799686e-06, | |
| "loss": 1.3462, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "grad_norm": 23.970733642578125, | |
| "learning_rate": 1.3182993113339553e-06, | |
| "loss": 1.3334, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "grad_norm": 15.778496742248535, | |
| "learning_rate": 1.223869697772052e-06, | |
| "loss": 1.3404, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "grad_norm": 16.48326873779297, | |
| "learning_rate": 1.1324756391751658e-06, | |
| "loss": 1.3361, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "grad_norm": 19.057518005371094, | |
| "learning_rate": 1.0441906053830887e-06, | |
| "loss": 1.2863, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "eval_loss": 1.315341830253601, | |
| "eval_runtime": 26.3084, | |
| "eval_samples_per_second": 138.663, | |
| "eval_steps_per_second": 17.333, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "grad_norm": 15.053688049316406, | |
| "learning_rate": 9.590855669533e-07, | |
| "loss": 1.3567, | |
| "step": 10100 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "grad_norm": 16.501331329345703, | |
| "learning_rate": 8.77228938109167e-07, | |
| "loss": 1.336, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "grad_norm": 10.03864860534668, | |
| "learning_rate": 7.986865217431261e-07, | |
| "loss": 1.3121, | |
| "step": 10300 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "grad_norm": 14.122137069702148, | |
| "learning_rate": 7.235214565190696e-07, | |
| "loss": 1.3092, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "grad_norm": 25.29042625427246, | |
| "learning_rate": 6.517941661164445e-07, | |
| "loss": 1.3221, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "grad_norm": 17.808534622192383, | |
| "learning_rate": 5.835623106568783e-07, | |
| "loss": 1.3403, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "grad_norm": 15.087233543395996, | |
| "learning_rate": 5.188807403523721e-07, | |
| "loss": 1.3124, | |
| "step": 10700 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "grad_norm": 15.44356918334961, | |
| "learning_rate": 4.5780145141231526e-07, | |
| "loss": 1.3397, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "grad_norm": 18.34058952331543, | |
| "learning_rate": 4.0037354424478926e-07, | |
| "loss": 1.3471, | |
| "step": 10900 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "grad_norm": 13.286628723144531, | |
| "learning_rate": 3.466431839857326e-07, | |
| "loss": 1.329, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "eval_loss": 1.3130227327346802, | |
| "eval_runtime": 26.3079, | |
| "eval_samples_per_second": 138.665, | |
| "eval_steps_per_second": 17.333, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "grad_norm": 25.197416305541992, | |
| "learning_rate": 2.9713480847324947e-07, | |
| "loss": 1.3656, | |
| "step": 11100 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "grad_norm": 16.662456512451172, | |
| "learning_rate": 2.508881149660197e-07, | |
| "loss": 1.343, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "grad_norm": 19.900924682617188, | |
| "learning_rate": 2.0845913668792794e-07, | |
| "loss": 1.3524, | |
| "step": 11300 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "grad_norm": 12.059335708618164, | |
| "learning_rate": 1.698819814385927e-07, | |
| "loss": 1.3436, | |
| "step": 11400 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 20.73145294189453, | |
| "learning_rate": 1.3518766061480726e-07, | |
| "loss": 1.3223, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "grad_norm": 13.213232040405273, | |
| "learning_rate": 1.0440406428111116e-07, | |
| "loss": 1.3015, | |
| "step": 11600 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "grad_norm": 17.85313606262207, | |
| "learning_rate": 7.755593874952505e-08, | |
| "loss": 1.2938, | |
| "step": 11700 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "grad_norm": 18.61409568786621, | |
| "learning_rate": 5.4664866686491845e-08, | |
| "loss": 1.3169, | |
| "step": 11800 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "grad_norm": 17.018802642822266, | |
| "learning_rate": 3.574924976300742e-08, | |
| "loss": 1.2538, | |
| "step": 11900 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "grad_norm": 22.11118507385254, | |
| "learning_rate": 2.082429386188578e-08, | |
| "loss": 1.3278, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "eval_loss": 1.3127143383026123, | |
| "eval_runtime": 26.3216, | |
| "eval_samples_per_second": 138.593, | |
| "eval_steps_per_second": 17.324, | |
| "step": 12000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 12312, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 1000, | |
| "total_flos": 3.087179791853568e+16, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |