| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.0, | |
| "eval_steps": 500, | |
| "global_step": 20000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 5.078273773193359, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 1.8583, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "grad_norm": 6.52927303314209, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 1.7022, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "grad_norm": 4.726405620574951, | |
| "learning_rate": 1.2e-05, | |
| "loss": 1.7109, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "grad_norm": 6.379272937774658, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 1.6689, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "grad_norm": 3.252293109893799, | |
| "learning_rate": 2e-05, | |
| "loss": 1.6766, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "eval_loss": 1.6459439992904663, | |
| "eval_runtime": 21.8542, | |
| "eval_samples_per_second": 45.758, | |
| "eval_steps_per_second": 5.72, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "grad_norm": 3.9442427158355713, | |
| "learning_rate": 1.9932203389830512e-05, | |
| "loss": 1.6865, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 2.5662379264831543, | |
| "learning_rate": 1.986440677966102e-05, | |
| "loss": 1.6535, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 4.087454795837402, | |
| "learning_rate": 1.9796610169491527e-05, | |
| "loss": 1.6463, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 3.5358777046203613, | |
| "learning_rate": 1.9728813559322034e-05, | |
| "loss": 1.6238, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 4.294543743133545, | |
| "learning_rate": 1.9661016949152545e-05, | |
| "loss": 1.614, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "eval_loss": 1.587148666381836, | |
| "eval_runtime": 21.7559, | |
| "eval_samples_per_second": 45.965, | |
| "eval_steps_per_second": 5.746, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "grad_norm": 4.340688705444336, | |
| "learning_rate": 1.9593220338983052e-05, | |
| "loss": 1.5889, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "grad_norm": 3.321453094482422, | |
| "learning_rate": 1.9525423728813562e-05, | |
| "loss": 1.5936, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "grad_norm": 2.8375635147094727, | |
| "learning_rate": 1.945762711864407e-05, | |
| "loss": 1.5839, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 4.362390518188477, | |
| "learning_rate": 1.938983050847458e-05, | |
| "loss": 1.5912, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 2.9954068660736084, | |
| "learning_rate": 1.9322033898305087e-05, | |
| "loss": 1.5593, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "eval_loss": 1.5585581064224243, | |
| "eval_runtime": 21.6908, | |
| "eval_samples_per_second": 46.102, | |
| "eval_steps_per_second": 5.763, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 3.2162888050079346, | |
| "learning_rate": 1.9254237288135595e-05, | |
| "loss": 1.5627, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 4.054400444030762, | |
| "learning_rate": 1.9186440677966102e-05, | |
| "loss": 1.5486, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "grad_norm": 3.2316372394561768, | |
| "learning_rate": 1.9118644067796613e-05, | |
| "loss": 1.536, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 3.4788625240325928, | |
| "learning_rate": 1.905084745762712e-05, | |
| "loss": 1.5398, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 2.387064218521118, | |
| "learning_rate": 1.898305084745763e-05, | |
| "loss": 1.5452, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "eval_loss": 1.5381073951721191, | |
| "eval_runtime": 21.6995, | |
| "eval_samples_per_second": 46.084, | |
| "eval_steps_per_second": 5.761, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "grad_norm": 3.577282428741455, | |
| "learning_rate": 1.8915254237288138e-05, | |
| "loss": 1.5287, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 2.2378621101379395, | |
| "learning_rate": 1.8847457627118645e-05, | |
| "loss": 1.5211, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 3.0354368686676025, | |
| "learning_rate": 1.878033898305085e-05, | |
| "loss": 1.5209, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 2.8990373611450195, | |
| "learning_rate": 1.8712542372881358e-05, | |
| "loss": 1.5072, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 2.9740238189697266, | |
| "learning_rate": 1.8644745762711865e-05, | |
| "loss": 1.496, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "eval_loss": 1.5467579364776611, | |
| "eval_runtime": 21.7136, | |
| "eval_samples_per_second": 46.054, | |
| "eval_steps_per_second": 5.757, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "grad_norm": 2.4562160968780518, | |
| "learning_rate": 1.8576949152542373e-05, | |
| "loss": 1.5311, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "grad_norm": 2.0434107780456543, | |
| "learning_rate": 1.8509152542372883e-05, | |
| "loss": 1.5107, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "grad_norm": 3.7302677631378174, | |
| "learning_rate": 1.844135593220339e-05, | |
| "loss": 1.4883, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "grad_norm": 2.7409582138061523, | |
| "learning_rate": 1.83735593220339e-05, | |
| "loss": 1.4936, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 3.0569841861724854, | |
| "learning_rate": 1.8305762711864408e-05, | |
| "loss": 1.4633, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "eval_loss": 1.4889390468597412, | |
| "eval_runtime": 21.6749, | |
| "eval_samples_per_second": 46.136, | |
| "eval_steps_per_second": 5.767, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 3.209660768508911, | |
| "learning_rate": 1.823796610169492e-05, | |
| "loss": 1.468, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 2.617828130722046, | |
| "learning_rate": 1.8170169491525426e-05, | |
| "loss": 1.4635, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 3.617861747741699, | |
| "learning_rate": 1.8102372881355933e-05, | |
| "loss": 1.4716, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "grad_norm": 3.5618486404418945, | |
| "learning_rate": 1.803457627118644e-05, | |
| "loss": 1.4736, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "grad_norm": 3.6922543048858643, | |
| "learning_rate": 1.796677966101695e-05, | |
| "loss": 1.4669, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "eval_loss": 1.4697989225387573, | |
| "eval_runtime": 21.6816, | |
| "eval_samples_per_second": 46.122, | |
| "eval_steps_per_second": 5.765, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "grad_norm": 2.977830171585083, | |
| "learning_rate": 1.789898305084746e-05, | |
| "loss": 1.4782, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "grad_norm": 2.5865039825439453, | |
| "learning_rate": 1.783118644067797e-05, | |
| "loss": 1.4519, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 2.9079766273498535, | |
| "learning_rate": 1.7763389830508476e-05, | |
| "loss": 1.4447, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 2.9517862796783447, | |
| "learning_rate": 1.7695593220338983e-05, | |
| "loss": 1.4631, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 3.2457385063171387, | |
| "learning_rate": 1.762779661016949e-05, | |
| "loss": 1.4366, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "eval_loss": 1.4445842504501343, | |
| "eval_runtime": 21.6792, | |
| "eval_samples_per_second": 46.127, | |
| "eval_steps_per_second": 5.766, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "grad_norm": 3.206425189971924, | |
| "learning_rate": 1.756e-05, | |
| "loss": 1.4466, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "grad_norm": 2.559532403945923, | |
| "learning_rate": 1.749220338983051e-05, | |
| "loss": 1.433, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "grad_norm": 3.243443012237549, | |
| "learning_rate": 1.742440677966102e-05, | |
| "loss": 1.437, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "grad_norm": 3.06264066696167, | |
| "learning_rate": 1.7357288135593222e-05, | |
| "loss": 1.4512, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "grad_norm": 3.4852590560913086, | |
| "learning_rate": 1.728949152542373e-05, | |
| "loss": 1.4121, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "eval_loss": 1.4205034971237183, | |
| "eval_runtime": 21.6783, | |
| "eval_samples_per_second": 46.129, | |
| "eval_steps_per_second": 5.766, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 3.3655240535736084, | |
| "learning_rate": 1.722169491525424e-05, | |
| "loss": 1.4101, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 2.9424891471862793, | |
| "learning_rate": 1.7154576271186442e-05, | |
| "loss": 1.4278, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 2.7033684253692627, | |
| "learning_rate": 1.708677966101695e-05, | |
| "loss": 1.4321, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "grad_norm": 2.4086625576019287, | |
| "learning_rate": 1.701898305084746e-05, | |
| "loss": 1.4152, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 3.7739360332489014, | |
| "learning_rate": 1.6951186440677967e-05, | |
| "loss": 1.3941, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 1.4043042659759521, | |
| "eval_runtime": 21.6957, | |
| "eval_samples_per_second": 46.092, | |
| "eval_steps_per_second": 5.762, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "grad_norm": 2.722904682159424, | |
| "learning_rate": 1.6883389830508478e-05, | |
| "loss": 1.3979, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "grad_norm": 2.617483615875244, | |
| "learning_rate": 1.6815593220338985e-05, | |
| "loss": 1.3741, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "grad_norm": 4.135140419006348, | |
| "learning_rate": 1.6747796610169492e-05, | |
| "loss": 1.3929, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 2.5010900497436523, | |
| "learning_rate": 1.668e-05, | |
| "loss": 1.3895, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 2.8484292030334473, | |
| "learning_rate": 1.661220338983051e-05, | |
| "loss": 1.4055, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "eval_loss": 1.4005763530731201, | |
| "eval_runtime": 21.7476, | |
| "eval_samples_per_second": 45.982, | |
| "eval_steps_per_second": 5.748, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "grad_norm": 3.469156503677368, | |
| "learning_rate": 1.6544406779661017e-05, | |
| "loss": 1.3843, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "grad_norm": 2.569080352783203, | |
| "learning_rate": 1.6476610169491528e-05, | |
| "loss": 1.3812, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "grad_norm": 2.751028299331665, | |
| "learning_rate": 1.6408813559322035e-05, | |
| "loss": 1.3811, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "grad_norm": 3.277967691421509, | |
| "learning_rate": 1.6341016949152542e-05, | |
| "loss": 1.3601, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 2.5392448902130127, | |
| "learning_rate": 1.627322033898305e-05, | |
| "loss": 1.359, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "eval_loss": 1.3665966987609863, | |
| "eval_runtime": 21.7115, | |
| "eval_samples_per_second": 46.059, | |
| "eval_steps_per_second": 5.757, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "grad_norm": 2.700984477996826, | |
| "learning_rate": 1.620542372881356e-05, | |
| "loss": 1.3541, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "grad_norm": 1.9332221746444702, | |
| "learning_rate": 1.6137627118644068e-05, | |
| "loss": 1.3674, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "grad_norm": 2.326171636581421, | |
| "learning_rate": 1.6069830508474578e-05, | |
| "loss": 1.3703, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "grad_norm": 2.8028652667999268, | |
| "learning_rate": 1.600203389830509e-05, | |
| "loss": 1.3662, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "grad_norm": 3.253483295440674, | |
| "learning_rate": 1.5934237288135596e-05, | |
| "loss": 1.348, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "eval_loss": 1.388239860534668, | |
| "eval_runtime": 21.7372, | |
| "eval_samples_per_second": 46.004, | |
| "eval_steps_per_second": 5.751, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "grad_norm": 2.5033419132232666, | |
| "learning_rate": 1.5866440677966103e-05, | |
| "loss": 1.3552, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "grad_norm": 3.955854892730713, | |
| "learning_rate": 1.579864406779661e-05, | |
| "loss": 1.364, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "grad_norm": 6.7678422927856445, | |
| "learning_rate": 1.573084745762712e-05, | |
| "loss": 1.3657, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "grad_norm": 3.65767502784729, | |
| "learning_rate": 1.5663050847457628e-05, | |
| "loss": 1.37, | |
| "step": 6900 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 3.6031250953674316, | |
| "learning_rate": 1.559525423728814e-05, | |
| "loss": 1.3703, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "eval_loss": 1.40153169631958, | |
| "eval_runtime": 21.6996, | |
| "eval_samples_per_second": 46.084, | |
| "eval_steps_per_second": 5.76, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "grad_norm": 3.184861421585083, | |
| "learning_rate": 1.5527457627118646e-05, | |
| "loss": 1.3393, | |
| "step": 7100 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 2.626770496368408, | |
| "learning_rate": 1.546033898305085e-05, | |
| "loss": 1.3341, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "grad_norm": 3.0047240257263184, | |
| "learning_rate": 1.5392542372881356e-05, | |
| "loss": 1.3362, | |
| "step": 7300 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "grad_norm": 2.881134033203125, | |
| "learning_rate": 1.5324745762711867e-05, | |
| "loss": 1.3449, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "grad_norm": 3.289036512374878, | |
| "learning_rate": 1.5256949152542376e-05, | |
| "loss": 1.3317, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "eval_loss": 1.372812271118164, | |
| "eval_runtime": 21.7039, | |
| "eval_samples_per_second": 46.075, | |
| "eval_steps_per_second": 5.759, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "grad_norm": 2.3777339458465576, | |
| "learning_rate": 1.5189152542372883e-05, | |
| "loss": 1.3556, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "grad_norm": 2.7420947551727295, | |
| "learning_rate": 1.5121355932203392e-05, | |
| "loss": 1.3268, | |
| "step": 7700 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "grad_norm": 4.065169334411621, | |
| "learning_rate": 1.5053559322033899e-05, | |
| "loss": 1.3289, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "grad_norm": 2.720052719116211, | |
| "learning_rate": 1.498576271186441e-05, | |
| "loss": 1.324, | |
| "step": 7900 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 2.457838773727417, | |
| "learning_rate": 1.4917966101694917e-05, | |
| "loss": 1.3361, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "eval_loss": 1.298282265663147, | |
| "eval_runtime": 21.7632, | |
| "eval_samples_per_second": 45.949, | |
| "eval_steps_per_second": 5.744, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "grad_norm": 3.103018045425415, | |
| "learning_rate": 1.4850169491525426e-05, | |
| "loss": 1.3192, | |
| "step": 8100 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "grad_norm": 2.3513784408569336, | |
| "learning_rate": 1.4782372881355933e-05, | |
| "loss": 1.3202, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "grad_norm": 2.4736735820770264, | |
| "learning_rate": 1.4714576271186442e-05, | |
| "loss": 1.3378, | |
| "step": 8300 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "grad_norm": 2.902289628982544, | |
| "learning_rate": 1.464677966101695e-05, | |
| "loss": 1.3158, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "grad_norm": 4.001801490783691, | |
| "learning_rate": 1.457898305084746e-05, | |
| "loss": 1.317, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "eval_loss": 1.3023865222930908, | |
| "eval_runtime": 21.713, | |
| "eval_samples_per_second": 46.055, | |
| "eval_steps_per_second": 5.757, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "grad_norm": 3.5500471591949463, | |
| "learning_rate": 1.4511186440677967e-05, | |
| "loss": 1.3322, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "grad_norm": 5.206698894500732, | |
| "learning_rate": 1.4443389830508476e-05, | |
| "loss": 1.3038, | |
| "step": 8700 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "grad_norm": 3.068844795227051, | |
| "learning_rate": 1.4375593220338983e-05, | |
| "loss": 1.3209, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "grad_norm": 2.7392385005950928, | |
| "learning_rate": 1.4307796610169494e-05, | |
| "loss": 1.3151, | |
| "step": 8900 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 2.984192132949829, | |
| "learning_rate": 1.4240000000000001e-05, | |
| "loss": 1.3201, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "eval_loss": 1.2849023342132568, | |
| "eval_runtime": 21.7137, | |
| "eval_samples_per_second": 46.054, | |
| "eval_steps_per_second": 5.757, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "grad_norm": 2.816089630126953, | |
| "learning_rate": 1.417220338983051e-05, | |
| "loss": 1.3105, | |
| "step": 9100 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "grad_norm": 2.776275634765625, | |
| "learning_rate": 1.4104406779661017e-05, | |
| "loss": 1.3145, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "grad_norm": 3.164463758468628, | |
| "learning_rate": 1.4036610169491528e-05, | |
| "loss": 1.3104, | |
| "step": 9300 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "grad_norm": 4.110715866088867, | |
| "learning_rate": 1.3968813559322035e-05, | |
| "loss": 1.2873, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "grad_norm": 3.3705735206604004, | |
| "learning_rate": 1.3901016949152544e-05, | |
| "loss": 1.2955, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "eval_loss": 1.317952275276184, | |
| "eval_runtime": 21.6894, | |
| "eval_samples_per_second": 46.105, | |
| "eval_steps_per_second": 5.763, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 2.7719411849975586, | |
| "learning_rate": 1.3833220338983051e-05, | |
| "loss": 1.2867, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "grad_norm": 3.694045305252075, | |
| "learning_rate": 1.376542372881356e-05, | |
| "loss": 1.3052, | |
| "step": 9700 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "grad_norm": 3.4335978031158447, | |
| "learning_rate": 1.3697627118644067e-05, | |
| "loss": 1.2788, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "grad_norm": 2.300137758255005, | |
| "learning_rate": 1.3629830508474578e-05, | |
| "loss": 1.2964, | |
| "step": 9900 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 3.1676926612854004, | |
| "learning_rate": 1.3562033898305085e-05, | |
| "loss": 1.2852, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 1.3348548412322998, | |
| "eval_runtime": 21.6828, | |
| "eval_samples_per_second": 46.12, | |
| "eval_steps_per_second": 5.765, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "grad_norm": 2.558415651321411, | |
| "learning_rate": 1.3494237288135594e-05, | |
| "loss": 1.2674, | |
| "step": 10100 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "grad_norm": 2.86045241355896, | |
| "learning_rate": 1.3426440677966105e-05, | |
| "loss": 1.2711, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "grad_norm": 4.301458358764648, | |
| "learning_rate": 1.3358644067796612e-05, | |
| "loss": 1.2672, | |
| "step": 10300 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "grad_norm": 2.699399709701538, | |
| "learning_rate": 1.329084745762712e-05, | |
| "loss": 1.2781, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "grad_norm": 2.920149087905884, | |
| "learning_rate": 1.3223050847457628e-05, | |
| "loss": 1.2323, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "eval_loss": 1.2642160654067993, | |
| "eval_runtime": 21.6839, | |
| "eval_samples_per_second": 46.117, | |
| "eval_steps_per_second": 5.765, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "grad_norm": 3.0505363941192627, | |
| "learning_rate": 1.3155254237288137e-05, | |
| "loss": 1.2486, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "grad_norm": 3.141021490097046, | |
| "learning_rate": 1.3087457627118644e-05, | |
| "loss": 1.2725, | |
| "step": 10700 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "grad_norm": 2.433058977127075, | |
| "learning_rate": 1.3019661016949155e-05, | |
| "loss": 1.2387, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "grad_norm": 2.907503366470337, | |
| "learning_rate": 1.2951864406779662e-05, | |
| "loss": 1.2391, | |
| "step": 10900 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 2.7510807514190674, | |
| "learning_rate": 1.2884067796610171e-05, | |
| "loss": 1.2601, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "eval_loss": 1.3154401779174805, | |
| "eval_runtime": 21.6833, | |
| "eval_samples_per_second": 46.119, | |
| "eval_steps_per_second": 5.765, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "grad_norm": 4.341352462768555, | |
| "learning_rate": 1.2816949152542375e-05, | |
| "loss": 1.2722, | |
| "step": 11100 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "grad_norm": 3.5941874980926514, | |
| "learning_rate": 1.2749152542372882e-05, | |
| "loss": 1.2785, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "grad_norm": 2.437255382537842, | |
| "learning_rate": 1.2681355932203391e-05, | |
| "loss": 1.224, | |
| "step": 11300 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "grad_norm": 2.8359203338623047, | |
| "learning_rate": 1.2613559322033899e-05, | |
| "loss": 1.2363, | |
| "step": 11400 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "grad_norm": 2.7164149284362793, | |
| "learning_rate": 1.2545762711864409e-05, | |
| "loss": 1.2596, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "eval_loss": 1.3095141649246216, | |
| "eval_runtime": 21.6842, | |
| "eval_samples_per_second": 46.117, | |
| "eval_steps_per_second": 5.765, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "grad_norm": 3.826754570007324, | |
| "learning_rate": 1.2477966101694916e-05, | |
| "loss": 1.2449, | |
| "step": 11600 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "grad_norm": 3.325650691986084, | |
| "learning_rate": 1.2410169491525425e-05, | |
| "loss": 1.2449, | |
| "step": 11700 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "grad_norm": 3.709676504135132, | |
| "learning_rate": 1.2342372881355933e-05, | |
| "loss": 1.2362, | |
| "step": 11800 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "grad_norm": 3.0213212966918945, | |
| "learning_rate": 1.2274576271186443e-05, | |
| "loss": 1.2403, | |
| "step": 11900 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 3.0036745071411133, | |
| "learning_rate": 1.220677966101695e-05, | |
| "loss": 1.2504, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "eval_loss": 1.2542531490325928, | |
| "eval_runtime": 21.677, | |
| "eval_samples_per_second": 46.132, | |
| "eval_steps_per_second": 5.766, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "grad_norm": 3.031303882598877, | |
| "learning_rate": 1.213898305084746e-05, | |
| "loss": 1.2479, | |
| "step": 12100 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "grad_norm": 2.805032968521118, | |
| "learning_rate": 1.2071864406779664e-05, | |
| "loss": 1.2266, | |
| "step": 12200 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "grad_norm": 3.027392625808716, | |
| "learning_rate": 1.200406779661017e-05, | |
| "loss": 1.2298, | |
| "step": 12300 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "grad_norm": 3.9545884132385254, | |
| "learning_rate": 1.193627118644068e-05, | |
| "loss": 1.2506, | |
| "step": 12400 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "grad_norm": 2.7534937858581543, | |
| "learning_rate": 1.1868474576271187e-05, | |
| "loss": 1.2424, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "eval_loss": 1.274129867553711, | |
| "eval_runtime": 21.6835, | |
| "eval_samples_per_second": 46.118, | |
| "eval_steps_per_second": 5.765, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "grad_norm": 3.8661746978759766, | |
| "learning_rate": 1.1800677966101698e-05, | |
| "loss": 1.2244, | |
| "step": 12600 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "grad_norm": 3.3711798191070557, | |
| "learning_rate": 1.1732881355932205e-05, | |
| "loss": 1.218, | |
| "step": 12700 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "grad_norm": 2.4355905055999756, | |
| "learning_rate": 1.1665084745762714e-05, | |
| "loss": 1.2145, | |
| "step": 12800 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "grad_norm": 3.467618942260742, | |
| "learning_rate": 1.1597288135593221e-05, | |
| "loss": 1.2331, | |
| "step": 12900 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 2.9236645698547363, | |
| "learning_rate": 1.152949152542373e-05, | |
| "loss": 1.2457, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "eval_loss": 1.278497576713562, | |
| "eval_runtime": 21.6894, | |
| "eval_samples_per_second": 46.105, | |
| "eval_steps_per_second": 5.763, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "grad_norm": 2.343590259552002, | |
| "learning_rate": 1.1461694915254237e-05, | |
| "loss": 1.227, | |
| "step": 13100 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "grad_norm": 3.2813720703125, | |
| "learning_rate": 1.1393898305084748e-05, | |
| "loss": 1.2429, | |
| "step": 13200 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "grad_norm": 3.157639503479004, | |
| "learning_rate": 1.1326101694915255e-05, | |
| "loss": 1.2018, | |
| "step": 13300 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "grad_norm": 3.017834424972534, | |
| "learning_rate": 1.1258305084745764e-05, | |
| "loss": 1.2358, | |
| "step": 13400 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "grad_norm": 2.672147274017334, | |
| "learning_rate": 1.1190508474576271e-05, | |
| "loss": 1.2224, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "eval_loss": 1.2449040412902832, | |
| "eval_runtime": 21.7033, | |
| "eval_samples_per_second": 46.076, | |
| "eval_steps_per_second": 5.759, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "grad_norm": 3.6956965923309326, | |
| "learning_rate": 1.1122711864406782e-05, | |
| "loss": 1.2373, | |
| "step": 13600 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "grad_norm": 3.9109997749328613, | |
| "learning_rate": 1.1054915254237289e-05, | |
| "loss": 1.2181, | |
| "step": 13700 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "grad_norm": 2.4544878005981445, | |
| "learning_rate": 1.0987118644067798e-05, | |
| "loss": 1.2264, | |
| "step": 13800 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "grad_norm": 3.372176170349121, | |
| "learning_rate": 1.0919322033898305e-05, | |
| "loss": 1.2231, | |
| "step": 13900 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 2.1014392375946045, | |
| "learning_rate": 1.0851525423728814e-05, | |
| "loss": 1.2189, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "eval_loss": 1.2467758655548096, | |
| "eval_runtime": 21.68, | |
| "eval_samples_per_second": 46.125, | |
| "eval_steps_per_second": 5.766, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "grad_norm": 3.0673794746398926, | |
| "learning_rate": 1.0783728813559321e-05, | |
| "loss": 1.2277, | |
| "step": 14100 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "grad_norm": 3.052077531814575, | |
| "learning_rate": 1.0715932203389832e-05, | |
| "loss": 1.2175, | |
| "step": 14200 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "grad_norm": 3.2878353595733643, | |
| "learning_rate": 1.0648135593220339e-05, | |
| "loss": 1.1976, | |
| "step": 14300 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "grad_norm": 3.1332175731658936, | |
| "learning_rate": 1.0580338983050848e-05, | |
| "loss": 1.2191, | |
| "step": 14400 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "grad_norm": 3.573296308517456, | |
| "learning_rate": 1.0512542372881355e-05, | |
| "loss": 1.2118, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "eval_loss": 1.2437331676483154, | |
| "eval_runtime": 21.7028, | |
| "eval_samples_per_second": 46.077, | |
| "eval_steps_per_second": 5.76, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "grad_norm": 2.835487127304077, | |
| "learning_rate": 1.0444745762711866e-05, | |
| "loss": 1.2057, | |
| "step": 14600 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "grad_norm": 3.7973036766052246, | |
| "learning_rate": 1.0376949152542373e-05, | |
| "loss": 1.2043, | |
| "step": 14700 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "grad_norm": 3.0734457969665527, | |
| "learning_rate": 1.0309152542372882e-05, | |
| "loss": 1.1986, | |
| "step": 14800 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "grad_norm": 2.8893818855285645, | |
| "learning_rate": 1.0241355932203391e-05, | |
| "loss": 1.2152, | |
| "step": 14900 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 3.576925039291382, | |
| "learning_rate": 1.01735593220339e-05, | |
| "loss": 1.2324, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 1.2094933986663818, | |
| "eval_runtime": 21.6901, | |
| "eval_samples_per_second": 46.104, | |
| "eval_steps_per_second": 5.763, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "grad_norm": 2.6094210147857666, | |
| "learning_rate": 1.0105762711864409e-05, | |
| "loss": 1.1757, | |
| "step": 15100 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "grad_norm": 3.7070980072021484, | |
| "learning_rate": 1.0037966101694916e-05, | |
| "loss": 1.1799, | |
| "step": 15200 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "grad_norm": 3.4685747623443604, | |
| "learning_rate": 9.970169491525425e-06, | |
| "loss": 1.1715, | |
| "step": 15300 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "grad_norm": 2.8845009803771973, | |
| "learning_rate": 9.902372881355932e-06, | |
| "loss": 1.1536, | |
| "step": 15400 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "grad_norm": 3.583442211151123, | |
| "learning_rate": 9.834576271186441e-06, | |
| "loss": 1.1663, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "eval_loss": 1.2352399826049805, | |
| "eval_runtime": 21.7081, | |
| "eval_samples_per_second": 46.066, | |
| "eval_steps_per_second": 5.758, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "grad_norm": 3.343341827392578, | |
| "learning_rate": 9.76677966101695e-06, | |
| "loss": 1.169, | |
| "step": 15600 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "grad_norm": 3.7276506423950195, | |
| "learning_rate": 9.698983050847457e-06, | |
| "loss": 1.16, | |
| "step": 15700 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "grad_norm": 2.9194743633270264, | |
| "learning_rate": 9.631864406779662e-06, | |
| "loss": 1.1731, | |
| "step": 15800 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "grad_norm": 3.365588426589966, | |
| "learning_rate": 9.56406779661017e-06, | |
| "loss": 1.1727, | |
| "step": 15900 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "grad_norm": 4.017807483673096, | |
| "learning_rate": 9.49627118644068e-06, | |
| "loss": 1.153, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "eval_loss": 1.2391220331192017, | |
| "eval_runtime": 21.7087, | |
| "eval_samples_per_second": 46.064, | |
| "eval_steps_per_second": 5.758, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "grad_norm": 3.4895777702331543, | |
| "learning_rate": 9.428474576271187e-06, | |
| "loss": 1.1423, | |
| "step": 16100 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "grad_norm": 3.7175400257110596, | |
| "learning_rate": 9.360677966101696e-06, | |
| "loss": 1.1687, | |
| "step": 16200 | |
| }, | |
| { | |
| "epoch": 3.26, | |
| "grad_norm": 4.337020397186279, | |
| "learning_rate": 9.292881355932204e-06, | |
| "loss": 1.1395, | |
| "step": 16300 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "grad_norm": 2.255091905593872, | |
| "learning_rate": 9.225084745762712e-06, | |
| "loss": 1.1648, | |
| "step": 16400 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "grad_norm": 2.8030898571014404, | |
| "learning_rate": 9.15728813559322e-06, | |
| "loss": 1.1507, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "eval_loss": 1.2618576288223267, | |
| "eval_runtime": 21.7474, | |
| "eval_samples_per_second": 45.982, | |
| "eval_steps_per_second": 5.748, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "grad_norm": 3.8894705772399902, | |
| "learning_rate": 9.08949152542373e-06, | |
| "loss": 1.1533, | |
| "step": 16600 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "grad_norm": 3.553032398223877, | |
| "learning_rate": 9.021694915254238e-06, | |
| "loss": 1.1472, | |
| "step": 16700 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "grad_norm": 4.201476097106934, | |
| "learning_rate": 8.953898305084746e-06, | |
| "loss": 1.1577, | |
| "step": 16800 | |
| }, | |
| { | |
| "epoch": 3.38, | |
| "grad_norm": 3.702571153640747, | |
| "learning_rate": 8.886101694915255e-06, | |
| "loss": 1.1598, | |
| "step": 16900 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "grad_norm": 4.799776077270508, | |
| "learning_rate": 8.818305084745764e-06, | |
| "loss": 1.1653, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "eval_loss": 1.236145257949829, | |
| "eval_runtime": 21.7006, | |
| "eval_samples_per_second": 46.082, | |
| "eval_steps_per_second": 5.76, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "grad_norm": 3.39278244972229, | |
| "learning_rate": 8.75050847457627e-06, | |
| "loss": 1.1573, | |
| "step": 17100 | |
| }, | |
| { | |
| "epoch": 3.44, | |
| "grad_norm": 3.210745334625244, | |
| "learning_rate": 8.68271186440678e-06, | |
| "loss": 1.1597, | |
| "step": 17200 | |
| }, | |
| { | |
| "epoch": 3.46, | |
| "grad_norm": 3.6098508834838867, | |
| "learning_rate": 8.614915254237289e-06, | |
| "loss": 1.1487, | |
| "step": 17300 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "grad_norm": 2.9269042015075684, | |
| "learning_rate": 8.547118644067798e-06, | |
| "loss": 1.1511, | |
| "step": 17400 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "grad_norm": 2.969214677810669, | |
| "learning_rate": 8.479322033898306e-06, | |
| "loss": 1.1619, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "eval_loss": 1.2679368257522583, | |
| "eval_runtime": 21.6891, | |
| "eval_samples_per_second": 46.106, | |
| "eval_steps_per_second": 5.763, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "grad_norm": 3.1029062271118164, | |
| "learning_rate": 8.411525423728815e-06, | |
| "loss": 1.1475, | |
| "step": 17600 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "grad_norm": 3.8454771041870117, | |
| "learning_rate": 8.343728813559323e-06, | |
| "loss": 1.1606, | |
| "step": 17700 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "grad_norm": 3.526176929473877, | |
| "learning_rate": 8.275932203389832e-06, | |
| "loss": 1.1378, | |
| "step": 17800 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "grad_norm": 3.3350830078125, | |
| "learning_rate": 8.20813559322034e-06, | |
| "loss": 1.1394, | |
| "step": 17900 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "grad_norm": 3.0847814083099365, | |
| "learning_rate": 8.140338983050848e-06, | |
| "loss": 1.1326, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "eval_loss": 1.1844274997711182, | |
| "eval_runtime": 21.679, | |
| "eval_samples_per_second": 46.128, | |
| "eval_steps_per_second": 5.766, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "grad_norm": 3.5144150257110596, | |
| "learning_rate": 8.072542372881357e-06, | |
| "loss": 1.1651, | |
| "step": 18100 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "grad_norm": 3.8517796993255615, | |
| "learning_rate": 8.004745762711866e-06, | |
| "loss": 1.141, | |
| "step": 18200 | |
| }, | |
| { | |
| "epoch": 3.66, | |
| "grad_norm": 3.909298896789551, | |
| "learning_rate": 7.936949152542374e-06, | |
| "loss": 1.1452, | |
| "step": 18300 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "grad_norm": 4.035452365875244, | |
| "learning_rate": 7.869152542372882e-06, | |
| "loss": 1.1231, | |
| "step": 18400 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "grad_norm": 4.456254005432129, | |
| "learning_rate": 7.80135593220339e-06, | |
| "loss": 1.1399, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "eval_loss": 1.1855617761611938, | |
| "eval_runtime": 21.7513, | |
| "eval_samples_per_second": 45.974, | |
| "eval_steps_per_second": 5.747, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "grad_norm": 3.6256027221679688, | |
| "learning_rate": 7.7335593220339e-06, | |
| "loss": 1.1358, | |
| "step": 18600 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "grad_norm": 3.213271379470825, | |
| "learning_rate": 7.665762711864407e-06, | |
| "loss": 1.1656, | |
| "step": 18700 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "grad_norm": 3.707228660583496, | |
| "learning_rate": 7.597966101694916e-06, | |
| "loss": 1.1242, | |
| "step": 18800 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "grad_norm": 4.290109634399414, | |
| "learning_rate": 7.530169491525425e-06, | |
| "loss": 1.1259, | |
| "step": 18900 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "grad_norm": 3.573901653289795, | |
| "learning_rate": 7.462372881355933e-06, | |
| "loss": 1.1479, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "eval_loss": 1.2305423021316528, | |
| "eval_runtime": 21.7032, | |
| "eval_samples_per_second": 46.076, | |
| "eval_steps_per_second": 5.76, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 3.82, | |
| "grad_norm": 2.7192249298095703, | |
| "learning_rate": 7.394576271186441e-06, | |
| "loss": 1.1305, | |
| "step": 19100 | |
| }, | |
| { | |
| "epoch": 3.84, | |
| "grad_norm": 3.500718355178833, | |
| "learning_rate": 7.32677966101695e-06, | |
| "loss": 1.1393, | |
| "step": 19200 | |
| }, | |
| { | |
| "epoch": 3.86, | |
| "grad_norm": 3.137812852859497, | |
| "learning_rate": 7.258983050847458e-06, | |
| "loss": 1.1469, | |
| "step": 19300 | |
| }, | |
| { | |
| "epoch": 3.88, | |
| "grad_norm": 3.7159972190856934, | |
| "learning_rate": 7.191186440677967e-06, | |
| "loss": 1.1334, | |
| "step": 19400 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "grad_norm": 3.60858416557312, | |
| "learning_rate": 7.123389830508475e-06, | |
| "loss": 1.1508, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "eval_loss": 1.2171376943588257, | |
| "eval_runtime": 21.677, | |
| "eval_samples_per_second": 46.132, | |
| "eval_steps_per_second": 5.766, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "grad_norm": 3.1851789951324463, | |
| "learning_rate": 7.055593220338983e-06, | |
| "loss": 1.1485, | |
| "step": 19600 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "grad_norm": 4.118582248687744, | |
| "learning_rate": 6.987796610169492e-06, | |
| "loss": 1.1243, | |
| "step": 19700 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "grad_norm": 2.8532917499542236, | |
| "learning_rate": 6.92e-06, | |
| "loss": 1.1052, | |
| "step": 19800 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "grad_norm": 4.4881815910339355, | |
| "learning_rate": 6.852881355932204e-06, | |
| "loss": 1.1387, | |
| "step": 19900 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "grad_norm": 3.3558459281921387, | |
| "learning_rate": 6.785084745762712e-06, | |
| "loss": 1.1294, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 1.1705737113952637, | |
| "eval_runtime": 21.6721, | |
| "eval_samples_per_second": 46.142, | |
| "eval_steps_per_second": 5.768, | |
| "step": 20000 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 30000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 6, | |
| "save_steps": 500, | |
| "total_flos": 9.4402189983744e+17, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |