| { | |
| "best_metric": 3.7249155044555664, | |
| "best_model_checkpoint": "./results/models/checkpoint-58650", | |
| "epoch": 30.0, | |
| "eval_steps": 500, | |
| "global_step": 58650, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2557544757033248, | |
| "grad_norm": 0.047607421875, | |
| "learning_rate": 0.003979539641943734, | |
| "loss": 1.9283, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.5115089514066496, | |
| "grad_norm": 0.05810546875, | |
| "learning_rate": 0.003959079283887468, | |
| "loss": 1.89, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.7672634271099744, | |
| "grad_norm": 0.048828125, | |
| "learning_rate": 0.003938618925831202, | |
| "loss": 1.8823, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 3.7650158405303955, | |
| "eval_runtime": 1.1856, | |
| "eval_samples_per_second": 421.713, | |
| "eval_steps_per_second": 0.843, | |
| "step": 1955 | |
| }, | |
| { | |
| "epoch": 1.0230179028132993, | |
| "grad_norm": 0.048583984375, | |
| "learning_rate": 0.003918158567774936, | |
| "loss": 1.8791, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.278772378516624, | |
| "grad_norm": 0.0458984375, | |
| "learning_rate": 0.00389769820971867, | |
| "loss": 1.8765, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.5345268542199488, | |
| "grad_norm": 0.04443359375, | |
| "learning_rate": 0.003877237851662404, | |
| "loss": 1.8758, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.7902813299232738, | |
| "grad_norm": 0.04638671875, | |
| "learning_rate": 0.003856777493606138, | |
| "loss": 1.874, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 3.755614757537842, | |
| "eval_runtime": 1.2689, | |
| "eval_samples_per_second": 394.03, | |
| "eval_steps_per_second": 0.788, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 2.0460358056265986, | |
| "grad_norm": 0.0322265625, | |
| "learning_rate": 0.0038363171355498722, | |
| "loss": 1.8728, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.3017902813299234, | |
| "grad_norm": 0.03857421875, | |
| "learning_rate": 0.0038158567774936062, | |
| "loss": 1.8721, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.557544757033248, | |
| "grad_norm": 0.051025390625, | |
| "learning_rate": 0.0037953964194373403, | |
| "loss": 1.8714, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.813299232736573, | |
| "grad_norm": 0.043701171875, | |
| "learning_rate": 0.0037749360613810743, | |
| "loss": 1.8704, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 3.749542474746704, | |
| "eval_runtime": 1.1996, | |
| "eval_samples_per_second": 416.822, | |
| "eval_steps_per_second": 0.834, | |
| "step": 5865 | |
| }, | |
| { | |
| "epoch": 3.0690537084398977, | |
| "grad_norm": 0.045654296875, | |
| "learning_rate": 0.0037544757033248083, | |
| "loss": 1.8699, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.3248081841432224, | |
| "grad_norm": 0.038818359375, | |
| "learning_rate": 0.0037340153452685423, | |
| "loss": 1.8695, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.580562659846547, | |
| "grad_norm": 0.04541015625, | |
| "learning_rate": 0.0037135549872122763, | |
| "loss": 1.8688, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.836317135549872, | |
| "grad_norm": 0.037109375, | |
| "learning_rate": 0.0036930946291560103, | |
| "loss": 1.8686, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 3.7466166019439697, | |
| "eval_runtime": 1.1726, | |
| "eval_samples_per_second": 426.391, | |
| "eval_steps_per_second": 0.853, | |
| "step": 7820 | |
| }, | |
| { | |
| "epoch": 4.092071611253197, | |
| "grad_norm": 0.04833984375, | |
| "learning_rate": 0.0036726342710997444, | |
| "loss": 1.8681, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 4.3478260869565215, | |
| "grad_norm": 0.047607421875, | |
| "learning_rate": 0.003652173913043478, | |
| "loss": 1.8678, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 4.603580562659847, | |
| "grad_norm": 0.056640625, | |
| "learning_rate": 0.0036317135549872124, | |
| "loss": 1.8674, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.859335038363171, | |
| "grad_norm": 0.0400390625, | |
| "learning_rate": 0.0036112531969309464, | |
| "loss": 1.867, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 3.7444469928741455, | |
| "eval_runtime": 1.1971, | |
| "eval_samples_per_second": 417.668, | |
| "eval_steps_per_second": 0.835, | |
| "step": 9775 | |
| }, | |
| { | |
| "epoch": 5.115089514066496, | |
| "grad_norm": 0.04736328125, | |
| "learning_rate": 0.0035907928388746804, | |
| "loss": 1.8668, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 5.370843989769821, | |
| "grad_norm": 0.03564453125, | |
| "learning_rate": 0.0035703324808184144, | |
| "loss": 1.8668, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 5.626598465473146, | |
| "grad_norm": 0.047119140625, | |
| "learning_rate": 0.003549872122762148, | |
| "loss": 1.866, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.882352941176471, | |
| "grad_norm": 0.04052734375, | |
| "learning_rate": 0.0035294117647058825, | |
| "loss": 1.8662, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 3.742023229598999, | |
| "eval_runtime": 1.1689, | |
| "eval_samples_per_second": 427.744, | |
| "eval_steps_per_second": 0.855, | |
| "step": 11730 | |
| }, | |
| { | |
| "epoch": 6.138107416879795, | |
| "grad_norm": 0.047119140625, | |
| "learning_rate": 0.0035089514066496165, | |
| "loss": 1.8652, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 6.3938618925831205, | |
| "grad_norm": 0.041259765625, | |
| "learning_rate": 0.0034884910485933505, | |
| "loss": 1.8656, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 6.649616368286445, | |
| "grad_norm": 0.040283203125, | |
| "learning_rate": 0.0034680306905370845, | |
| "loss": 1.8655, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 6.90537084398977, | |
| "grad_norm": 0.04736328125, | |
| "learning_rate": 0.003447570332480818, | |
| "loss": 1.8653, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 3.7410786151885986, | |
| "eval_runtime": 1.2028, | |
| "eval_samples_per_second": 415.68, | |
| "eval_steps_per_second": 0.831, | |
| "step": 13685 | |
| }, | |
| { | |
| "epoch": 7.161125319693094, | |
| "grad_norm": 0.0439453125, | |
| "learning_rate": 0.0034271099744245526, | |
| "loss": 1.865, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 7.41687979539642, | |
| "grad_norm": 0.04052734375, | |
| "learning_rate": 0.0034066496163682866, | |
| "loss": 1.8648, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 7.672634271099744, | |
| "grad_norm": 0.060302734375, | |
| "learning_rate": 0.0033861892583120206, | |
| "loss": 1.865, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 7.928388746803069, | |
| "grad_norm": 0.0400390625, | |
| "learning_rate": 0.0033657289002557546, | |
| "loss": 1.8641, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 3.7378389835357666, | |
| "eval_runtime": 1.1927, | |
| "eval_samples_per_second": 419.217, | |
| "eval_steps_per_second": 0.838, | |
| "step": 15640 | |
| }, | |
| { | |
| "epoch": 8.184143222506394, | |
| "grad_norm": 0.04345703125, | |
| "learning_rate": 0.003345268542199488, | |
| "loss": 1.8644, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 8.43989769820972, | |
| "grad_norm": 0.04736328125, | |
| "learning_rate": 0.0033248081841432226, | |
| "loss": 1.8638, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 8.695652173913043, | |
| "grad_norm": 0.05126953125, | |
| "learning_rate": 0.0033043478260869567, | |
| "loss": 1.8639, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 8.951406649616368, | |
| "grad_norm": 0.05224609375, | |
| "learning_rate": 0.0032838874680306907, | |
| "loss": 1.8641, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 3.7366719245910645, | |
| "eval_runtime": 1.2549, | |
| "eval_samples_per_second": 398.446, | |
| "eval_steps_per_second": 0.797, | |
| "step": 17595 | |
| }, | |
| { | |
| "epoch": 9.207161125319693, | |
| "grad_norm": 0.06396484375, | |
| "learning_rate": 0.0032634271099744247, | |
| "loss": 1.8636, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 9.462915601023019, | |
| "grad_norm": 0.03857421875, | |
| "learning_rate": 0.0032429667519181583, | |
| "loss": 1.8655, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 9.718670076726342, | |
| "grad_norm": 0.038818359375, | |
| "learning_rate": 0.0032225063938618927, | |
| "loss": 1.8655, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 9.974424552429667, | |
| "grad_norm": 0.04150390625, | |
| "learning_rate": 0.0032020460358056268, | |
| "loss": 1.8646, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 3.7368271350860596, | |
| "eval_runtime": 1.1654, | |
| "eval_samples_per_second": 429.02, | |
| "eval_steps_per_second": 0.858, | |
| "step": 19550 | |
| }, | |
| { | |
| "epoch": 10.230179028132993, | |
| "grad_norm": 0.051513671875, | |
| "learning_rate": 0.0031815856777493608, | |
| "loss": 1.8637, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 10.485933503836318, | |
| "grad_norm": 0.048095703125, | |
| "learning_rate": 0.003161125319693095, | |
| "loss": 1.8641, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 10.741687979539641, | |
| "grad_norm": 0.037841796875, | |
| "learning_rate": 0.0031406649616368284, | |
| "loss": 1.8633, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 10.997442455242966, | |
| "grad_norm": 0.04931640625, | |
| "learning_rate": 0.003120204603580563, | |
| "loss": 1.8633, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_loss": 3.736281156539917, | |
| "eval_runtime": 1.3206, | |
| "eval_samples_per_second": 378.604, | |
| "eval_steps_per_second": 0.757, | |
| "step": 21505 | |
| }, | |
| { | |
| "epoch": 11.253196930946292, | |
| "grad_norm": 0.04833984375, | |
| "learning_rate": 0.003099744245524297, | |
| "loss": 1.8636, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 11.508951406649617, | |
| "grad_norm": 0.0478515625, | |
| "learning_rate": 0.003079283887468031, | |
| "loss": 1.863, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 11.764705882352942, | |
| "grad_norm": 0.048583984375, | |
| "learning_rate": 0.0030588235294117644, | |
| "loss": 1.8629, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_loss": 3.734565496444702, | |
| "eval_runtime": 1.1835, | |
| "eval_samples_per_second": 422.479, | |
| "eval_steps_per_second": 0.845, | |
| "step": 23460 | |
| }, | |
| { | |
| "epoch": 12.020460358056265, | |
| "grad_norm": 0.044189453125, | |
| "learning_rate": 0.0030383631713554985, | |
| "loss": 1.8625, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 12.27621483375959, | |
| "grad_norm": 0.048828125, | |
| "learning_rate": 0.003017902813299233, | |
| "loss": 1.8629, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 12.531969309462916, | |
| "grad_norm": 0.04931640625, | |
| "learning_rate": 0.002997442455242967, | |
| "loss": 1.8624, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 12.787723785166241, | |
| "grad_norm": 0.04345703125, | |
| "learning_rate": 0.002976982097186701, | |
| "loss": 1.8621, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 13.0, | |
| "eval_loss": 3.7329983711242676, | |
| "eval_runtime": 1.3627, | |
| "eval_samples_per_second": 366.916, | |
| "eval_steps_per_second": 0.734, | |
| "step": 25415 | |
| }, | |
| { | |
| "epoch": 13.043478260869565, | |
| "grad_norm": 0.0546875, | |
| "learning_rate": 0.0029565217391304345, | |
| "loss": 1.8624, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 13.29923273657289, | |
| "grad_norm": 0.048095703125, | |
| "learning_rate": 0.002936061381074169, | |
| "loss": 1.8624, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 13.554987212276215, | |
| "grad_norm": 0.05517578125, | |
| "learning_rate": 0.002915601023017903, | |
| "loss": 1.8618, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 13.81074168797954, | |
| "grad_norm": 0.04443359375, | |
| "learning_rate": 0.002895140664961637, | |
| "loss": 1.8619, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 14.0, | |
| "eval_loss": 3.7325527667999268, | |
| "eval_runtime": 1.1565, | |
| "eval_samples_per_second": 432.347, | |
| "eval_steps_per_second": 0.865, | |
| "step": 27370 | |
| }, | |
| { | |
| "epoch": 14.066496163682864, | |
| "grad_norm": 0.043212890625, | |
| "learning_rate": 0.002874680306905371, | |
| "loss": 1.862, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 14.322250639386189, | |
| "grad_norm": 0.06640625, | |
| "learning_rate": 0.0028542199488491046, | |
| "loss": 1.8616, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 14.578005115089514, | |
| "grad_norm": 0.050048828125, | |
| "learning_rate": 0.002833759590792839, | |
| "loss": 1.8615, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 14.83375959079284, | |
| "grad_norm": 0.046875, | |
| "learning_rate": 0.002813299232736573, | |
| "loss": 1.8623, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 15.0, | |
| "eval_loss": 3.729966402053833, | |
| "eval_runtime": 1.1923, | |
| "eval_samples_per_second": 419.349, | |
| "eval_steps_per_second": 0.839, | |
| "step": 29325 | |
| }, | |
| { | |
| "epoch": 15.089514066496164, | |
| "grad_norm": 0.0458984375, | |
| "learning_rate": 0.002792838874680307, | |
| "loss": 1.8615, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 15.345268542199488, | |
| "grad_norm": 0.042236328125, | |
| "learning_rate": 0.002772378516624041, | |
| "loss": 1.8616, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 15.601023017902813, | |
| "grad_norm": 0.037353515625, | |
| "learning_rate": 0.0027519181585677747, | |
| "loss": 1.8618, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 15.856777493606138, | |
| "grad_norm": 0.06591796875, | |
| "learning_rate": 0.002731457800511509, | |
| "loss": 1.8613, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 16.0, | |
| "eval_loss": 3.7326087951660156, | |
| "eval_runtime": 1.1839, | |
| "eval_samples_per_second": 422.34, | |
| "eval_steps_per_second": 0.845, | |
| "step": 31280 | |
| }, | |
| { | |
| "epoch": 16.11253196930946, | |
| "grad_norm": 0.038818359375, | |
| "learning_rate": 0.002710997442455243, | |
| "loss": 1.8615, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 16.36828644501279, | |
| "grad_norm": 0.0458984375, | |
| "learning_rate": 0.002690537084398977, | |
| "loss": 1.8619, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 16.624040920716112, | |
| "grad_norm": 0.044677734375, | |
| "learning_rate": 0.002670076726342711, | |
| "loss": 1.8616, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 16.87979539641944, | |
| "grad_norm": 0.04541015625, | |
| "learning_rate": 0.0026496163682864448, | |
| "loss": 1.8616, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 17.0, | |
| "eval_loss": 3.729959011077881, | |
| "eval_runtime": 1.2063, | |
| "eval_samples_per_second": 414.478, | |
| "eval_steps_per_second": 0.829, | |
| "step": 33235 | |
| }, | |
| { | |
| "epoch": 17.135549872122763, | |
| "grad_norm": 0.04541015625, | |
| "learning_rate": 0.0026291560102301792, | |
| "loss": 1.8616, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 17.391304347826086, | |
| "grad_norm": 0.048583984375, | |
| "learning_rate": 0.0026086956521739132, | |
| "loss": 1.8608, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 17.647058823529413, | |
| "grad_norm": 0.047119140625, | |
| "learning_rate": 0.0025882352941176473, | |
| "loss": 1.8609, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 17.902813299232736, | |
| "grad_norm": 0.041259765625, | |
| "learning_rate": 0.002567774936061381, | |
| "loss": 1.8606, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 18.0, | |
| "eval_loss": 3.7307207584381104, | |
| "eval_runtime": 1.1724, | |
| "eval_samples_per_second": 426.468, | |
| "eval_steps_per_second": 0.853, | |
| "step": 35190 | |
| }, | |
| { | |
| "epoch": 18.15856777493606, | |
| "grad_norm": 0.04052734375, | |
| "learning_rate": 0.002547314578005115, | |
| "loss": 1.8613, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 18.414322250639387, | |
| "grad_norm": 0.042236328125, | |
| "learning_rate": 0.0025268542199488493, | |
| "loss": 1.8608, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 18.67007672634271, | |
| "grad_norm": 0.046142578125, | |
| "learning_rate": 0.0025063938618925833, | |
| "loss": 1.8607, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 18.925831202046037, | |
| "grad_norm": 0.03955078125, | |
| "learning_rate": 0.0024859335038363174, | |
| "loss": 1.8606, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 19.0, | |
| "eval_loss": 3.729092836380005, | |
| "eval_runtime": 1.1564, | |
| "eval_samples_per_second": 432.369, | |
| "eval_steps_per_second": 0.865, | |
| "step": 37145 | |
| }, | |
| { | |
| "epoch": 19.18158567774936, | |
| "grad_norm": 0.041259765625, | |
| "learning_rate": 0.002465473145780051, | |
| "loss": 1.8604, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 19.437340153452684, | |
| "grad_norm": 0.047607421875, | |
| "learning_rate": 0.002445012787723785, | |
| "loss": 1.8607, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 19.69309462915601, | |
| "grad_norm": 0.056396484375, | |
| "learning_rate": 0.0024245524296675194, | |
| "loss": 1.8605, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 19.948849104859335, | |
| "grad_norm": 0.05517578125, | |
| "learning_rate": 0.0024040920716112534, | |
| "loss": 1.8606, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 20.0, | |
| "eval_loss": 3.727680206298828, | |
| "eval_runtime": 1.1607, | |
| "eval_samples_per_second": 430.785, | |
| "eval_steps_per_second": 0.862, | |
| "step": 39100 | |
| }, | |
| { | |
| "epoch": 20.20460358056266, | |
| "grad_norm": 0.043212890625, | |
| "learning_rate": 0.0023836317135549874, | |
| "loss": 1.8605, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 20.460358056265985, | |
| "grad_norm": 0.04345703125, | |
| "learning_rate": 0.002363171355498721, | |
| "loss": 1.8605, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 20.71611253196931, | |
| "grad_norm": 0.037841796875, | |
| "learning_rate": 0.002342710997442455, | |
| "loss": 1.8606, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 20.971867007672635, | |
| "grad_norm": 0.051513671875, | |
| "learning_rate": 0.0023222506393861895, | |
| "loss": 1.8601, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 21.0, | |
| "eval_loss": 3.7270610332489014, | |
| "eval_runtime": 1.1618, | |
| "eval_samples_per_second": 430.348, | |
| "eval_steps_per_second": 0.861, | |
| "step": 41055 | |
| }, | |
| { | |
| "epoch": 21.22762148337596, | |
| "grad_norm": 0.05126953125, | |
| "learning_rate": 0.0023017902813299235, | |
| "loss": 1.8601, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 21.483375959079282, | |
| "grad_norm": 0.056884765625, | |
| "learning_rate": 0.0022813299232736575, | |
| "loss": 1.8601, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 21.73913043478261, | |
| "grad_norm": 0.045166015625, | |
| "learning_rate": 0.002260869565217391, | |
| "loss": 1.8602, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 21.994884910485933, | |
| "grad_norm": 0.036376953125, | |
| "learning_rate": 0.002240409207161125, | |
| "loss": 1.86, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 22.0, | |
| "eval_loss": 3.72686505317688, | |
| "eval_runtime": 1.2281, | |
| "eval_samples_per_second": 407.144, | |
| "eval_steps_per_second": 0.814, | |
| "step": 43010 | |
| }, | |
| { | |
| "epoch": 22.25063938618926, | |
| "grad_norm": 0.04931640625, | |
| "learning_rate": 0.0022199488491048596, | |
| "loss": 1.86, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 22.506393861892583, | |
| "grad_norm": 0.0380859375, | |
| "learning_rate": 0.0021994884910485936, | |
| "loss": 1.8596, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 22.762148337595907, | |
| "grad_norm": 0.039306640625, | |
| "learning_rate": 0.0021790281329923276, | |
| "loss": 1.8602, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 23.0, | |
| "eval_loss": 3.7282092571258545, | |
| "eval_runtime": 1.1696, | |
| "eval_samples_per_second": 427.495, | |
| "eval_steps_per_second": 0.855, | |
| "step": 44965 | |
| }, | |
| { | |
| "epoch": 23.017902813299234, | |
| "grad_norm": 0.046142578125, | |
| "learning_rate": 0.002158567774936061, | |
| "loss": 1.86, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 23.273657289002557, | |
| "grad_norm": 0.039306640625, | |
| "learning_rate": 0.002138107416879795, | |
| "loss": 1.8598, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 23.529411764705884, | |
| "grad_norm": 0.04296875, | |
| "learning_rate": 0.0021176470588235297, | |
| "loss": 1.86, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 23.785166240409207, | |
| "grad_norm": 0.054931640625, | |
| "learning_rate": 0.0020971867007672637, | |
| "loss": 1.8596, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 24.0, | |
| "eval_loss": 3.7264657020568848, | |
| "eval_runtime": 1.1709, | |
| "eval_samples_per_second": 427.022, | |
| "eval_steps_per_second": 0.854, | |
| "step": 46920 | |
| }, | |
| { | |
| "epoch": 24.04092071611253, | |
| "grad_norm": 0.0419921875, | |
| "learning_rate": 0.0020767263427109977, | |
| "loss": 1.8599, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 24.296675191815858, | |
| "grad_norm": 0.043701171875, | |
| "learning_rate": 0.0020562659846547313, | |
| "loss": 1.8603, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 24.55242966751918, | |
| "grad_norm": 0.06884765625, | |
| "learning_rate": 0.0020358056265984653, | |
| "loss": 1.8592, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 24.808184143222505, | |
| "grad_norm": 0.037353515625, | |
| "learning_rate": 0.0020153452685421997, | |
| "loss": 1.8596, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 25.0, | |
| "eval_loss": 3.7268757820129395, | |
| "eval_runtime": 1.2077, | |
| "eval_samples_per_second": 414.025, | |
| "eval_steps_per_second": 0.828, | |
| "step": 48875 | |
| }, | |
| { | |
| "epoch": 25.06393861892583, | |
| "grad_norm": 0.0498046875, | |
| "learning_rate": 0.0019948849104859333, | |
| "loss": 1.8595, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 25.319693094629155, | |
| "grad_norm": 0.0419921875, | |
| "learning_rate": 0.0019744245524296678, | |
| "loss": 1.8592, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 25.575447570332482, | |
| "grad_norm": 0.0478515625, | |
| "learning_rate": 0.001953964194373402, | |
| "loss": 1.8595, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 25.831202046035806, | |
| "grad_norm": 0.04052734375, | |
| "learning_rate": 0.0019335038363171356, | |
| "loss": 1.8598, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 26.0, | |
| "eval_loss": 3.7266719341278076, | |
| "eval_runtime": 1.1718, | |
| "eval_samples_per_second": 426.711, | |
| "eval_steps_per_second": 0.853, | |
| "step": 50830 | |
| }, | |
| { | |
| "epoch": 26.08695652173913, | |
| "grad_norm": 0.046142578125, | |
| "learning_rate": 0.0019130434782608696, | |
| "loss": 1.8595, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 26.342710997442456, | |
| "grad_norm": 0.043701171875, | |
| "learning_rate": 0.0018925831202046036, | |
| "loss": 1.8595, | |
| "step": 51500 | |
| }, | |
| { | |
| "epoch": 26.59846547314578, | |
| "grad_norm": 0.03369140625, | |
| "learning_rate": 0.0018721227621483376, | |
| "loss": 1.8593, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 26.854219948849106, | |
| "grad_norm": 0.03662109375, | |
| "learning_rate": 0.0018516624040920714, | |
| "loss": 1.8595, | |
| "step": 52500 | |
| }, | |
| { | |
| "epoch": 27.0, | |
| "eval_loss": 3.72537899017334, | |
| "eval_runtime": 1.2057, | |
| "eval_samples_per_second": 414.687, | |
| "eval_steps_per_second": 0.829, | |
| "step": 52785 | |
| }, | |
| { | |
| "epoch": 27.10997442455243, | |
| "grad_norm": 0.044921875, | |
| "learning_rate": 0.0018312020460358057, | |
| "loss": 1.8596, | |
| "step": 53000 | |
| }, | |
| { | |
| "epoch": 27.365728900255753, | |
| "grad_norm": 0.04248046875, | |
| "learning_rate": 0.0018107416879795397, | |
| "loss": 1.8592, | |
| "step": 53500 | |
| }, | |
| { | |
| "epoch": 27.62148337595908, | |
| "grad_norm": 0.04833984375, | |
| "learning_rate": 0.0017902813299232737, | |
| "loss": 1.8598, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 27.877237851662404, | |
| "grad_norm": 0.0400390625, | |
| "learning_rate": 0.0017698209718670077, | |
| "loss": 1.8592, | |
| "step": 54500 | |
| }, | |
| { | |
| "epoch": 28.0, | |
| "eval_loss": 3.7256040573120117, | |
| "eval_runtime": 1.1731, | |
| "eval_samples_per_second": 426.237, | |
| "eval_steps_per_second": 0.852, | |
| "step": 54740 | |
| }, | |
| { | |
| "epoch": 28.132992327365727, | |
| "grad_norm": 0.045654296875, | |
| "learning_rate": 0.0017493606138107415, | |
| "loss": 1.8593, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 28.388746803069054, | |
| "grad_norm": 0.0439453125, | |
| "learning_rate": 0.0017289002557544758, | |
| "loss": 1.8589, | |
| "step": 55500 | |
| }, | |
| { | |
| "epoch": 28.644501278772378, | |
| "grad_norm": 0.047607421875, | |
| "learning_rate": 0.0017084398976982098, | |
| "loss": 1.8591, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 28.900255754475705, | |
| "grad_norm": 0.05029296875, | |
| "learning_rate": 0.0016879795396419438, | |
| "loss": 1.8592, | |
| "step": 56500 | |
| }, | |
| { | |
| "epoch": 29.0, | |
| "eval_loss": 3.7257955074310303, | |
| "eval_runtime": 1.1794, | |
| "eval_samples_per_second": 423.928, | |
| "eval_steps_per_second": 0.848, | |
| "step": 56695 | |
| }, | |
| { | |
| "epoch": 29.156010230179028, | |
| "grad_norm": 0.045166015625, | |
| "learning_rate": 0.0016675191815856778, | |
| "loss": 1.8589, | |
| "step": 57000 | |
| }, | |
| { | |
| "epoch": 29.41176470588235, | |
| "grad_norm": 0.037109375, | |
| "learning_rate": 0.0016470588235294116, | |
| "loss": 1.8591, | |
| "step": 57500 | |
| }, | |
| { | |
| "epoch": 29.66751918158568, | |
| "grad_norm": 0.0390625, | |
| "learning_rate": 0.0016265984654731459, | |
| "loss": 1.8591, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 29.923273657289002, | |
| "grad_norm": 0.052734375, | |
| "learning_rate": 0.0016061381074168799, | |
| "loss": 1.8596, | |
| "step": 58500 | |
| }, | |
| { | |
| "epoch": 30.0, | |
| "eval_loss": 3.7249155044555664, | |
| "eval_runtime": 1.1467, | |
| "eval_samples_per_second": 436.021, | |
| "eval_steps_per_second": 0.872, | |
| "step": 58650 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 97750, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "EarlyStoppingCallback": { | |
| "args": { | |
| "early_stopping_patience": 3, | |
| "early_stopping_threshold": 0.0 | |
| }, | |
| "attributes": { | |
| "early_stopping_patience_counter": 0 | |
| } | |
| }, | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.390099826432e+16, | |
| "train_batch_size": 512, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |