| { | |
| "best_metric": 0.9716248512268066, | |
| "best_model_checkpoint": "/scratch/gpfs/BG11/char-model/char-model-linear-30-V2/checkpoint-244800", | |
| "epoch": 30.0, | |
| "eval_steps": 1600, | |
| "global_step": 245640, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.19540791402051783, | |
| "grad_norm": 1.4876313209533691, | |
| "learning_rate": 8e-05, | |
| "loss": 2.5789, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.19540791402051783, | |
| "eval_loss": 2.3971974849700928, | |
| "eval_runtime": 114.1562, | |
| "eval_samples_per_second": 255.019, | |
| "eval_steps_per_second": 7.972, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.39081582804103565, | |
| "grad_norm": 0.6994622349739075, | |
| "learning_rate": 0.00016, | |
| "loss": 2.2769, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.39081582804103565, | |
| "eval_loss": 2.1549973487854004, | |
| "eval_runtime": 114.8328, | |
| "eval_samples_per_second": 253.516, | |
| "eval_steps_per_second": 7.925, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.5862237420615535, | |
| "grad_norm": 0.6388512849807739, | |
| "learning_rate": 0.00019999459110993603, | |
| "loss": 2.093, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.5862237420615535, | |
| "eval_loss": 1.955593228340149, | |
| "eval_runtime": 114.1957, | |
| "eval_samples_per_second": 254.931, | |
| "eval_steps_per_second": 7.969, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.7816316560820713, | |
| "grad_norm": 0.5748873353004456, | |
| "learning_rate": 0.00019995132350009195, | |
| "loss": 1.9338, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.7816316560820713, | |
| "eval_loss": 1.803764820098877, | |
| "eval_runtime": 114.1068, | |
| "eval_samples_per_second": 255.129, | |
| "eval_steps_per_second": 7.975, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.9770395701025891, | |
| "grad_norm": 0.5466416478157043, | |
| "learning_rate": 0.0001998648070022771, | |
| "loss": 1.83, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.9770395701025891, | |
| "eval_loss": 1.705185890197754, | |
| "eval_runtime": 114.5361, | |
| "eval_samples_per_second": 254.173, | |
| "eval_steps_per_second": 7.945, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.172447484123107, | |
| "grad_norm": 0.5952168107032776, | |
| "learning_rate": 0.00019973507905213703, | |
| "loss": 1.7599, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 1.172447484123107, | |
| "eval_loss": 1.640653133392334, | |
| "eval_runtime": 114.2365, | |
| "eval_samples_per_second": 254.84, | |
| "eval_steps_per_second": 7.966, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 1.3678553981436248, | |
| "grad_norm": 0.5351431965827942, | |
| "learning_rate": 0.00019956219578289123, | |
| "loss": 1.7148, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 1.3678553981436248, | |
| "eval_loss": 1.593337059020996, | |
| "eval_runtime": 114.2436, | |
| "eval_samples_per_second": 254.824, | |
| "eval_steps_per_second": 7.965, | |
| "step": 11200 | |
| }, | |
| { | |
| "epoch": 1.5632633121641426, | |
| "grad_norm": 0.5703535079956055, | |
| "learning_rate": 0.0001993462320010443, | |
| "loss": 1.6811, | |
| "step": 12800 | |
| }, | |
| { | |
| "epoch": 1.5632633121641426, | |
| "eval_loss": 1.563097357749939, | |
| "eval_runtime": 114.0333, | |
| "eval_samples_per_second": 255.294, | |
| "eval_steps_per_second": 7.98, | |
| "step": 12800 | |
| }, | |
| { | |
| "epoch": 1.7586712261846604, | |
| "grad_norm": 0.5037602186203003, | |
| "learning_rate": 0.00019908728115401733, | |
| "loss": 1.6543, | |
| "step": 14400 | |
| }, | |
| { | |
| "epoch": 1.7586712261846604, | |
| "eval_loss": 1.532173752784729, | |
| "eval_runtime": 114.2165, | |
| "eval_samples_per_second": 254.884, | |
| "eval_steps_per_second": 7.967, | |
| "step": 14400 | |
| }, | |
| { | |
| "epoch": 1.9540791402051783, | |
| "grad_norm": 0.5285059213638306, | |
| "learning_rate": 0.00019878545528971298, | |
| "loss": 1.6323, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 1.9540791402051783, | |
| "eval_loss": 1.5123432874679565, | |
| "eval_runtime": 114.0419, | |
| "eval_samples_per_second": 255.275, | |
| "eval_steps_per_second": 7.98, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 2.149487054225696, | |
| "grad_norm": 1.3733247518539429, | |
| "learning_rate": 0.00019844088500803263, | |
| "loss": 1.6121, | |
| "step": 17600 | |
| }, | |
| { | |
| "epoch": 2.149487054225696, | |
| "eval_loss": 1.4912608861923218, | |
| "eval_runtime": 114.2353, | |
| "eval_samples_per_second": 254.842, | |
| "eval_steps_per_second": 7.966, | |
| "step": 17600 | |
| }, | |
| { | |
| "epoch": 2.344894968246214, | |
| "grad_norm": 0.5754392147064209, | |
| "learning_rate": 0.00019805371940436587, | |
| "loss": 1.5963, | |
| "step": 19200 | |
| }, | |
| { | |
| "epoch": 2.344894968246214, | |
| "eval_loss": 1.4811656475067139, | |
| "eval_runtime": 113.8361, | |
| "eval_samples_per_second": 255.736, | |
| "eval_steps_per_second": 7.994, | |
| "step": 19200 | |
| }, | |
| { | |
| "epoch": 2.5403028822667317, | |
| "grad_norm": 0.5134813189506531, | |
| "learning_rate": 0.00019762412600507676, | |
| "loss": 1.5805, | |
| "step": 20800 | |
| }, | |
| { | |
| "epoch": 2.5403028822667317, | |
| "eval_loss": 1.4569112062454224, | |
| "eval_runtime": 113.8761, | |
| "eval_samples_per_second": 255.646, | |
| "eval_steps_per_second": 7.991, | |
| "step": 20800 | |
| }, | |
| { | |
| "epoch": 2.7357107962872496, | |
| "grad_norm": 0.5872719287872314, | |
| "learning_rate": 0.0001971522906950156, | |
| "loss": 1.5672, | |
| "step": 22400 | |
| }, | |
| { | |
| "epoch": 2.7357107962872496, | |
| "eval_loss": 1.4468671083450317, | |
| "eval_runtime": 114.3556, | |
| "eval_samples_per_second": 254.574, | |
| "eval_steps_per_second": 7.958, | |
| "step": 22400 | |
| }, | |
| { | |
| "epoch": 2.9311187103077674, | |
| "grad_norm": 0.5536867380142212, | |
| "learning_rate": 0.0001966384176370864, | |
| "loss": 1.5522, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 2.9311187103077674, | |
| "eval_loss": 1.4352666139602661, | |
| "eval_runtime": 114.2848, | |
| "eval_samples_per_second": 254.732, | |
| "eval_steps_per_second": 7.963, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 3.126526624328285, | |
| "grad_norm": 0.5568290948867798, | |
| "learning_rate": 0.00019608272918390576, | |
| "loss": 1.544, | |
| "step": 25600 | |
| }, | |
| { | |
| "epoch": 3.126526624328285, | |
| "eval_loss": 1.4248650074005127, | |
| "eval_runtime": 114.5178, | |
| "eval_samples_per_second": 254.214, | |
| "eval_steps_per_second": 7.946, | |
| "step": 25600 | |
| }, | |
| { | |
| "epoch": 3.321934538348803, | |
| "grad_norm": 0.643578052520752, | |
| "learning_rate": 0.00019548546578159097, | |
| "loss": 1.5315, | |
| "step": 27200 | |
| }, | |
| { | |
| "epoch": 3.321934538348803, | |
| "eval_loss": 1.4156839847564697, | |
| "eval_runtime": 114.7135, | |
| "eval_samples_per_second": 253.78, | |
| "eval_steps_per_second": 7.933, | |
| "step": 27200 | |
| }, | |
| { | |
| "epoch": 3.517342452369321, | |
| "grad_norm": 0.5677089691162109, | |
| "learning_rate": 0.00019484688586571922, | |
| "loss": 1.5212, | |
| "step": 28800 | |
| }, | |
| { | |
| "epoch": 3.517342452369321, | |
| "eval_loss": 1.3995802402496338, | |
| "eval_runtime": 114.5983, | |
| "eval_samples_per_second": 254.035, | |
| "eval_steps_per_second": 7.941, | |
| "step": 28800 | |
| }, | |
| { | |
| "epoch": 3.7127503663898387, | |
| "grad_norm": 0.6246566772460938, | |
| "learning_rate": 0.00019416726574950254, | |
| "loss": 1.5141, | |
| "step": 30400 | |
| }, | |
| { | |
| "epoch": 3.7127503663898387, | |
| "eval_loss": 1.390926718711853, | |
| "eval_runtime": 114.3382, | |
| "eval_samples_per_second": 254.613, | |
| "eval_steps_per_second": 7.959, | |
| "step": 30400 | |
| }, | |
| { | |
| "epoch": 3.9081582804103565, | |
| "grad_norm": 0.5574250221252441, | |
| "learning_rate": 0.0001934468995042272, | |
| "loss": 1.5031, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 3.9081582804103565, | |
| "eval_loss": 1.3884520530700684, | |
| "eval_runtime": 114.5913, | |
| "eval_samples_per_second": 254.051, | |
| "eval_steps_per_second": 7.941, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 4.103566194430875, | |
| "grad_norm": 0.6128900051116943, | |
| "learning_rate": 0.00019268609883200935, | |
| "loss": 1.4957, | |
| "step": 33600 | |
| }, | |
| { | |
| "epoch": 4.103566194430875, | |
| "eval_loss": 1.3740371465682983, | |
| "eval_runtime": 114.6971, | |
| "eval_samples_per_second": 253.816, | |
| "eval_steps_per_second": 7.934, | |
| "step": 33600 | |
| }, | |
| { | |
| "epoch": 4.298974108451392, | |
| "grad_norm": 0.5937383770942688, | |
| "learning_rate": 0.00019188519293092138, | |
| "loss": 1.4874, | |
| "step": 35200 | |
| }, | |
| { | |
| "epoch": 4.298974108451392, | |
| "eval_loss": 1.3618671894073486, | |
| "eval_runtime": 114.3011, | |
| "eval_samples_per_second": 254.696, | |
| "eval_steps_per_second": 7.961, | |
| "step": 35200 | |
| }, | |
| { | |
| "epoch": 4.49438202247191, | |
| "grad_norm": 0.5859814286231995, | |
| "learning_rate": 0.00019104452835254848, | |
| "loss": 1.4788, | |
| "step": 36800 | |
| }, | |
| { | |
| "epoch": 4.49438202247191, | |
| "eval_loss": 1.3512194156646729, | |
| "eval_runtime": 114.1714, | |
| "eval_samples_per_second": 254.985, | |
| "eval_steps_per_second": 7.97, | |
| "step": 36800 | |
| }, | |
| { | |
| "epoch": 4.689789936492428, | |
| "grad_norm": 0.6147826910018921, | |
| "learning_rate": 0.00019016446885203558, | |
| "loss": 1.4728, | |
| "step": 38400 | |
| }, | |
| { | |
| "epoch": 4.689789936492428, | |
| "eval_loss": 1.346225619316101, | |
| "eval_runtime": 114.1824, | |
| "eval_samples_per_second": 254.961, | |
| "eval_steps_per_second": 7.97, | |
| "step": 38400 | |
| }, | |
| { | |
| "epoch": 4.885197850512946, | |
| "grad_norm": 0.6011592745780945, | |
| "learning_rate": 0.00018924539523069092, | |
| "loss": 1.4642, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 4.885197850512946, | |
| "eval_loss": 1.3416181802749634, | |
| "eval_runtime": 114.2305, | |
| "eval_samples_per_second": 254.853, | |
| "eval_steps_per_second": 7.966, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 5.0806057645334635, | |
| "grad_norm": 0.5911765098571777, | |
| "learning_rate": 0.00018828770517121334, | |
| "loss": 1.4573, | |
| "step": 41600 | |
| }, | |
| { | |
| "epoch": 5.0806057645334635, | |
| "eval_loss": 1.3351951837539673, | |
| "eval_runtime": 113.6497, | |
| "eval_samples_per_second": 256.156, | |
| "eval_steps_per_second": 8.007, | |
| "step": 41600 | |
| }, | |
| { | |
| "epoch": 5.276013678553982, | |
| "grad_norm": 0.6570513844490051, | |
| "learning_rate": 0.00018729181306561534, | |
| "loss": 1.4516, | |
| "step": 43200 | |
| }, | |
| { | |
| "epoch": 5.276013678553982, | |
| "eval_loss": 1.3280487060546875, | |
| "eval_runtime": 113.3621, | |
| "eval_samples_per_second": 256.806, | |
| "eval_steps_per_second": 8.027, | |
| "step": 43200 | |
| }, | |
| { | |
| "epoch": 5.471421592574499, | |
| "grad_norm": 0.6237516403198242, | |
| "learning_rate": 0.00018625814983591572, | |
| "loss": 1.445, | |
| "step": 44800 | |
| }, | |
| { | |
| "epoch": 5.471421592574499, | |
| "eval_loss": 1.3218315839767456, | |
| "eval_runtime": 113.4, | |
| "eval_samples_per_second": 256.72, | |
| "eval_steps_per_second": 8.025, | |
| "step": 44800 | |
| }, | |
| { | |
| "epoch": 5.666829506595017, | |
| "grad_norm": 0.6138856410980225, | |
| "learning_rate": 0.00018518716274767993, | |
| "loss": 1.4403, | |
| "step": 46400 | |
| }, | |
| { | |
| "epoch": 5.666829506595017, | |
| "eval_loss": 1.3159008026123047, | |
| "eval_runtime": 113.486, | |
| "eval_samples_per_second": 256.525, | |
| "eval_steps_per_second": 8.019, | |
| "step": 46400 | |
| }, | |
| { | |
| "epoch": 5.862237420615535, | |
| "grad_norm": 0.6461225152015686, | |
| "learning_rate": 0.0001840793152164884, | |
| "loss": 1.4338, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 5.862237420615535, | |
| "eval_loss": 1.307564377784729, | |
| "eval_runtime": 113.2007, | |
| "eval_samples_per_second": 257.171, | |
| "eval_steps_per_second": 8.039, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 6.057645334636053, | |
| "grad_norm": 0.6271090507507324, | |
| "learning_rate": 0.0001829350866074169, | |
| "loss": 1.4241, | |
| "step": 49600 | |
| }, | |
| { | |
| "epoch": 6.057645334636053, | |
| "eval_loss": 1.3019992113113403, | |
| "eval_runtime": 113.8204, | |
| "eval_samples_per_second": 255.771, | |
| "eval_steps_per_second": 7.995, | |
| "step": 49600 | |
| }, | |
| { | |
| "epoch": 6.25305324865657, | |
| "grad_norm": 0.6223775148391724, | |
| "learning_rate": 0.0001817549720276156, | |
| "loss": 1.4204, | |
| "step": 51200 | |
| }, | |
| { | |
| "epoch": 6.25305324865657, | |
| "eval_loss": 1.2931100130081177, | |
| "eval_runtime": 113.6749, | |
| "eval_samples_per_second": 256.099, | |
| "eval_steps_per_second": 8.005, | |
| "step": 51200 | |
| }, | |
| { | |
| "epoch": 6.448461162677089, | |
| "grad_norm": 0.652824342250824, | |
| "learning_rate": 0.00018053948211207624, | |
| "loss": 1.4125, | |
| "step": 52800 | |
| }, | |
| { | |
| "epoch": 6.448461162677089, | |
| "eval_loss": 1.2813327312469482, | |
| "eval_runtime": 113.1715, | |
| "eval_samples_per_second": 257.238, | |
| "eval_steps_per_second": 8.041, | |
| "step": 52800 | |
| }, | |
| { | |
| "epoch": 6.643869076697606, | |
| "grad_norm": 0.6772754788398743, | |
| "learning_rate": 0.0001792891428026808, | |
| "loss": 1.406, | |
| "step": 54400 | |
| }, | |
| { | |
| "epoch": 6.643869076697606, | |
| "eval_loss": 1.2822933197021484, | |
| "eval_runtime": 113.3373, | |
| "eval_samples_per_second": 256.862, | |
| "eval_steps_per_second": 8.029, | |
| "step": 54400 | |
| }, | |
| { | |
| "epoch": 6.839276990718124, | |
| "grad_norm": 0.6514716148376465, | |
| "learning_rate": 0.0001780044951206266, | |
| "loss": 1.3993, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 6.839276990718124, | |
| "eval_loss": 1.2731530666351318, | |
| "eval_runtime": 113.4336, | |
| "eval_samples_per_second": 256.643, | |
| "eval_steps_per_second": 8.022, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 7.034684904738642, | |
| "grad_norm": 0.7061969041824341, | |
| "learning_rate": 0.00017668609493232685, | |
| "loss": 1.3954, | |
| "step": 57600 | |
| }, | |
| { | |
| "epoch": 7.034684904738642, | |
| "eval_loss": 1.2732563018798828, | |
| "eval_runtime": 113.5204, | |
| "eval_samples_per_second": 256.447, | |
| "eval_steps_per_second": 8.016, | |
| "step": 57600 | |
| }, | |
| { | |
| "epoch": 7.23009281875916, | |
| "grad_norm": 0.6283796429634094, | |
| "learning_rate": 0.00017533451270888733, | |
| "loss": 1.3885, | |
| "step": 59200 | |
| }, | |
| { | |
| "epoch": 7.23009281875916, | |
| "eval_loss": 1.2614556550979614, | |
| "eval_runtime": 113.9439, | |
| "eval_samples_per_second": 255.494, | |
| "eval_steps_per_second": 7.986, | |
| "step": 59200 | |
| }, | |
| { | |
| "epoch": 7.425500732779677, | |
| "grad_norm": 0.701786994934082, | |
| "learning_rate": 0.00017395033327926394, | |
| "loss": 1.3839, | |
| "step": 60800 | |
| }, | |
| { | |
| "epoch": 7.425500732779677, | |
| "eval_loss": 1.2564245462417603, | |
| "eval_runtime": 113.6613, | |
| "eval_samples_per_second": 256.129, | |
| "eval_steps_per_second": 8.006, | |
| "step": 60800 | |
| }, | |
| { | |
| "epoch": 7.620908646800196, | |
| "grad_norm": 0.6613229513168335, | |
| "learning_rate": 0.0001725341555772075, | |
| "loss": 1.379, | |
| "step": 62400 | |
| }, | |
| { | |
| "epoch": 7.620908646800196, | |
| "eval_loss": 1.248888373374939, | |
| "eval_runtime": 113.7924, | |
| "eval_samples_per_second": 255.834, | |
| "eval_steps_per_second": 7.997, | |
| "step": 62400 | |
| }, | |
| { | |
| "epoch": 7.816316560820713, | |
| "grad_norm": 0.6857264041900635, | |
| "learning_rate": 0.00017108659238210543, | |
| "loss": 1.3737, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 7.816316560820713, | |
| "eval_loss": 1.2474324703216553, | |
| "eval_runtime": 114.1649, | |
| "eval_samples_per_second": 255.0, | |
| "eval_steps_per_second": 7.971, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 8.01172447484123, | |
| "grad_norm": 0.6922156810760498, | |
| "learning_rate": 0.00016960827005383234, | |
| "loss": 1.3686, | |
| "step": 65600 | |
| }, | |
| { | |
| "epoch": 8.01172447484123, | |
| "eval_loss": 1.2473887205123901, | |
| "eval_runtime": 113.4996, | |
| "eval_samples_per_second": 256.494, | |
| "eval_steps_per_second": 8.018, | |
| "step": 65600 | |
| }, | |
| { | |
| "epoch": 8.20713238886175, | |
| "grad_norm": 0.6817461252212524, | |
| "learning_rate": 0.00016809982826172446, | |
| "loss": 1.3603, | |
| "step": 67200 | |
| }, | |
| { | |
| "epoch": 8.20713238886175, | |
| "eval_loss": 1.2385778427124023, | |
| "eval_runtime": 113.3075, | |
| "eval_samples_per_second": 256.929, | |
| "eval_steps_per_second": 8.031, | |
| "step": 67200 | |
| }, | |
| { | |
| "epoch": 8.402540302882267, | |
| "grad_norm": 0.6611928343772888, | |
| "learning_rate": 0.00016656191970779508, | |
| "loss": 1.3582, | |
| "step": 68800 | |
| }, | |
| { | |
| "epoch": 8.402540302882267, | |
| "eval_loss": 1.232322096824646, | |
| "eval_runtime": 113.1998, | |
| "eval_samples_per_second": 257.174, | |
| "eval_steps_per_second": 8.039, | |
| "step": 68800 | |
| }, | |
| { | |
| "epoch": 8.597948216902784, | |
| "grad_norm": 0.7477986812591553, | |
| "learning_rate": 0.0001649952098443106, | |
| "loss": 1.3533, | |
| "step": 70400 | |
| }, | |
| { | |
| "epoch": 8.597948216902784, | |
| "eval_loss": 1.2227420806884766, | |
| "eval_runtime": 113.2449, | |
| "eval_samples_per_second": 257.071, | |
| "eval_steps_per_second": 8.036, | |
| "step": 70400 | |
| }, | |
| { | |
| "epoch": 8.793356130923302, | |
| "grad_norm": 0.6740342378616333, | |
| "learning_rate": 0.00016340037658584987, | |
| "loss": 1.3479, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 8.793356130923302, | |
| "eval_loss": 1.2295479774475098, | |
| "eval_runtime": 113.3877, | |
| "eval_samples_per_second": 256.747, | |
| "eval_steps_per_second": 8.026, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 8.98876404494382, | |
| "grad_norm": 0.6922385096549988, | |
| "learning_rate": 0.00016177811001597065, | |
| "loss": 1.3436, | |
| "step": 73600 | |
| }, | |
| { | |
| "epoch": 8.98876404494382, | |
| "eval_loss": 1.211731195449829, | |
| "eval_runtime": 113.5374, | |
| "eval_samples_per_second": 256.409, | |
| "eval_steps_per_second": 8.015, | |
| "step": 73600 | |
| }, | |
| { | |
| "epoch": 9.184171958964338, | |
| "grad_norm": 0.711102306842804, | |
| "learning_rate": 0.00016012911208861095, | |
| "loss": 1.3379, | |
| "step": 75200 | |
| }, | |
| { | |
| "epoch": 9.184171958964338, | |
| "eval_loss": 1.2126644849777222, | |
| "eval_runtime": 113.5611, | |
| "eval_samples_per_second": 256.355, | |
| "eval_steps_per_second": 8.013, | |
| "step": 75200 | |
| }, | |
| { | |
| "epoch": 9.379579872984856, | |
| "grad_norm": 0.6939982175827026, | |
| "learning_rate": 0.00015845409632435383, | |
| "loss": 1.3308, | |
| "step": 76800 | |
| }, | |
| { | |
| "epoch": 9.379579872984856, | |
| "eval_loss": 1.2125294208526611, | |
| "eval_runtime": 113.3442, | |
| "eval_samples_per_second": 256.846, | |
| "eval_steps_per_second": 8.029, | |
| "step": 76800 | |
| }, | |
| { | |
| "epoch": 9.574987787005373, | |
| "grad_norm": 0.7124617695808411, | |
| "learning_rate": 0.0001567537875016875, | |
| "loss": 1.328, | |
| "step": 78400 | |
| }, | |
| { | |
| "epoch": 9.574987787005373, | |
| "eval_loss": 1.2044532299041748, | |
| "eval_runtime": 113.4468, | |
| "eval_samples_per_second": 256.614, | |
| "eval_steps_per_second": 8.021, | |
| "step": 78400 | |
| }, | |
| { | |
| "epoch": 9.770395701025892, | |
| "grad_norm": 0.7330592274665833, | |
| "learning_rate": 0.00015502892134339392, | |
| "loss": 1.3218, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 9.770395701025892, | |
| "eval_loss": 1.1967058181762695, | |
| "eval_runtime": 113.2601, | |
| "eval_samples_per_second": 257.037, | |
| "eval_steps_per_second": 8.035, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 9.96580361504641, | |
| "grad_norm": 0.6967942714691162, | |
| "learning_rate": 0.00015328024419820202, | |
| "loss": 1.3183, | |
| "step": 81600 | |
| }, | |
| { | |
| "epoch": 9.96580361504641, | |
| "eval_loss": 1.1870883703231812, | |
| "eval_runtime": 113.4792, | |
| "eval_samples_per_second": 256.541, | |
| "eval_steps_per_second": 8.019, | |
| "step": 81600 | |
| }, | |
| { | |
| "epoch": 10.161211529066927, | |
| "grad_norm": 0.7507591843605042, | |
| "learning_rate": 0.00015150851271784278, | |
| "loss": 1.3141, | |
| "step": 83200 | |
| }, | |
| { | |
| "epoch": 10.161211529066927, | |
| "eval_loss": 1.1836739778518677, | |
| "eval_runtime": 113.0477, | |
| "eval_samples_per_second": 257.52, | |
| "eval_steps_per_second": 8.05, | |
| "step": 83200 | |
| }, | |
| { | |
| "epoch": 10.356619443087444, | |
| "grad_norm": 0.700063169002533, | |
| "learning_rate": 0.00014971449352964674, | |
| "loss": 1.3069, | |
| "step": 84800 | |
| }, | |
| { | |
| "epoch": 10.356619443087444, | |
| "eval_loss": 1.1819714307785034, | |
| "eval_runtime": 113.2705, | |
| "eval_samples_per_second": 257.013, | |
| "eval_steps_per_second": 8.034, | |
| "step": 84800 | |
| }, | |
| { | |
| "epoch": 10.552027357107963, | |
| "grad_norm": 0.7527259588241577, | |
| "learning_rate": 0.00014789896290482433, | |
| "loss": 1.3023, | |
| "step": 86400 | |
| }, | |
| { | |
| "epoch": 10.552027357107963, | |
| "eval_loss": 1.180099606513977, | |
| "eval_runtime": 113.5043, | |
| "eval_samples_per_second": 256.484, | |
| "eval_steps_per_second": 8.017, | |
| "step": 86400 | |
| }, | |
| { | |
| "epoch": 10.74743527112848, | |
| "grad_norm": 0.7384325861930847, | |
| "learning_rate": 0.00014606270642257408, | |
| "loss": 1.3004, | |
| "step": 88000 | |
| }, | |
| { | |
| "epoch": 10.74743527112848, | |
| "eval_loss": 1.1687953472137451, | |
| "eval_runtime": 113.2405, | |
| "eval_samples_per_second": 257.081, | |
| "eval_steps_per_second": 8.036, | |
| "step": 88000 | |
| }, | |
| { | |
| "epoch": 10.942843185148998, | |
| "grad_norm": 0.7243154644966125, | |
| "learning_rate": 0.00014420651863016263, | |
| "loss": 1.2952, | |
| "step": 89600 | |
| }, | |
| { | |
| "epoch": 10.942843185148998, | |
| "eval_loss": 1.1710914373397827, | |
| "eval_runtime": 113.1239, | |
| "eval_samples_per_second": 257.346, | |
| "eval_steps_per_second": 8.044, | |
| "step": 89600 | |
| }, | |
| { | |
| "epoch": 11.138251099169516, | |
| "grad_norm": 0.7848924994468689, | |
| "learning_rate": 0.0001423312026991247, | |
| "loss": 1.2897, | |
| "step": 91200 | |
| }, | |
| { | |
| "epoch": 11.138251099169516, | |
| "eval_loss": 1.169708013534546, | |
| "eval_runtime": 113.4134, | |
| "eval_samples_per_second": 256.689, | |
| "eval_steps_per_second": 8.024, | |
| "step": 91200 | |
| }, | |
| { | |
| "epoch": 11.333659013190035, | |
| "grad_norm": 0.7706081867218018, | |
| "learning_rate": 0.00014043757007773094, | |
| "loss": 1.2844, | |
| "step": 92800 | |
| }, | |
| { | |
| "epoch": 11.333659013190035, | |
| "eval_loss": 1.1610310077667236, | |
| "eval_runtime": 113.6197, | |
| "eval_samples_per_second": 256.223, | |
| "eval_steps_per_second": 8.009, | |
| "step": 92800 | |
| }, | |
| { | |
| "epoch": 11.529066927210552, | |
| "grad_norm": 0.7377546429634094, | |
| "learning_rate": 0.00013852644013987489, | |
| "loss": 1.2803, | |
| "step": 94400 | |
| }, | |
| { | |
| "epoch": 11.529066927210552, | |
| "eval_loss": 1.1572864055633545, | |
| "eval_runtime": 113.4329, | |
| "eval_samples_per_second": 256.645, | |
| "eval_steps_per_second": 8.022, | |
| "step": 94400 | |
| }, | |
| { | |
| "epoch": 11.72447484123107, | |
| "grad_norm": 0.7930458188056946, | |
| "learning_rate": 0.00013659863983053026, | |
| "loss": 1.2777, | |
| "step": 96000 | |
| }, | |
| { | |
| "epoch": 11.72447484123107, | |
| "eval_loss": 1.1565219163894653, | |
| "eval_runtime": 113.3539, | |
| "eval_samples_per_second": 256.824, | |
| "eval_steps_per_second": 8.028, | |
| "step": 96000 | |
| }, | |
| { | |
| "epoch": 11.919882755251587, | |
| "grad_norm": 0.7491051554679871, | |
| "learning_rate": 0.00013465500330793215, | |
| "loss": 1.2734, | |
| "step": 97600 | |
| }, | |
| { | |
| "epoch": 11.919882755251587, | |
| "eval_loss": 1.148636817932129, | |
| "eval_runtime": 113.4666, | |
| "eval_samples_per_second": 256.569, | |
| "eval_steps_per_second": 8.02, | |
| "step": 97600 | |
| }, | |
| { | |
| "epoch": 12.115290669272106, | |
| "grad_norm": 0.765771746635437, | |
| "learning_rate": 0.0001326963715826373, | |
| "loss": 1.2688, | |
| "step": 99200 | |
| }, | |
| { | |
| "epoch": 12.115290669272106, | |
| "eval_loss": 1.1487972736358643, | |
| "eval_runtime": 113.1177, | |
| "eval_samples_per_second": 257.36, | |
| "eval_steps_per_second": 8.045, | |
| "step": 99200 | |
| }, | |
| { | |
| "epoch": 12.310698583292623, | |
| "grad_norm": 0.7452722191810608, | |
| "learning_rate": 0.00013072359215361915, | |
| "loss": 1.2646, | |
| "step": 100800 | |
| }, | |
| { | |
| "epoch": 12.310698583292623, | |
| "eval_loss": 1.1423231363296509, | |
| "eval_runtime": 113.0385, | |
| "eval_samples_per_second": 257.541, | |
| "eval_steps_per_second": 8.05, | |
| "step": 100800 | |
| }, | |
| { | |
| "epoch": 12.50610649731314, | |
| "grad_norm": 0.8033236265182495, | |
| "learning_rate": 0.00012873751864155556, | |
| "loss": 1.2604, | |
| "step": 102400 | |
| }, | |
| { | |
| "epoch": 12.50610649731314, | |
| "eval_loss": 1.135223627090454, | |
| "eval_runtime": 113.5509, | |
| "eval_samples_per_second": 256.378, | |
| "eval_steps_per_second": 8.014, | |
| "step": 102400 | |
| }, | |
| { | |
| "epoch": 12.701514411333658, | |
| "grad_norm": 0.7842743396759033, | |
| "learning_rate": 0.0001267390104194675, | |
| "loss": 1.2566, | |
| "step": 104000 | |
| }, | |
| { | |
| "epoch": 12.701514411333658, | |
| "eval_loss": 1.1383661031723022, | |
| "eval_runtime": 113.3823, | |
| "eval_samples_per_second": 256.76, | |
| "eval_steps_per_second": 8.026, | |
| "step": 104000 | |
| }, | |
| { | |
| "epoch": 12.896922325354177, | |
| "grad_norm": 0.7673171758651733, | |
| "learning_rate": 0.00012472893224086873, | |
| "loss": 1.2534, | |
| "step": 105600 | |
| }, | |
| { | |
| "epoch": 12.896922325354177, | |
| "eval_loss": 1.1316802501678467, | |
| "eval_runtime": 113.8092, | |
| "eval_samples_per_second": 255.797, | |
| "eval_steps_per_second": 7.996, | |
| "step": 105600 | |
| }, | |
| { | |
| "epoch": 13.092330239374695, | |
| "grad_norm": 4.324079513549805, | |
| "learning_rate": 0.00012270815386558753, | |
| "loss": 1.2495, | |
| "step": 107200 | |
| }, | |
| { | |
| "epoch": 13.092330239374695, | |
| "eval_loss": 1.1293587684631348, | |
| "eval_runtime": 113.1334, | |
| "eval_samples_per_second": 257.324, | |
| "eval_steps_per_second": 8.044, | |
| "step": 107200 | |
| }, | |
| { | |
| "epoch": 13.287738153395212, | |
| "grad_norm": 0.8010779023170471, | |
| "learning_rate": 0.00012067754968342186, | |
| "loss": 1.2429, | |
| "step": 108800 | |
| }, | |
| { | |
| "epoch": 13.287738153395212, | |
| "eval_loss": 1.1205388307571411, | |
| "eval_runtime": 113.0887, | |
| "eval_samples_per_second": 257.426, | |
| "eval_steps_per_second": 8.047, | |
| "step": 108800 | |
| }, | |
| { | |
| "epoch": 13.48314606741573, | |
| "grad_norm": 0.7941340804100037, | |
| "learning_rate": 0.00011863799833579153, | |
| "loss": 1.2403, | |
| "step": 110400 | |
| }, | |
| { | |
| "epoch": 13.48314606741573, | |
| "eval_loss": 1.1161994934082031, | |
| "eval_runtime": 112.9454, | |
| "eval_samples_per_second": 257.753, | |
| "eval_steps_per_second": 8.057, | |
| "step": 110400 | |
| }, | |
| { | |
| "epoch": 13.678553981436249, | |
| "grad_norm": 0.746963381767273, | |
| "learning_rate": 0.00011659038233555033, | |
| "loss": 1.2386, | |
| "step": 112000 | |
| }, | |
| { | |
| "epoch": 13.678553981436249, | |
| "eval_loss": 1.1152887344360352, | |
| "eval_runtime": 113.0036, | |
| "eval_samples_per_second": 257.62, | |
| "eval_steps_per_second": 8.053, | |
| "step": 112000 | |
| }, | |
| { | |
| "epoch": 13.873961895456766, | |
| "grad_norm": 0.7765190601348877, | |
| "learning_rate": 0.00011453558768512322, | |
| "loss": 1.2332, | |
| "step": 113600 | |
| }, | |
| { | |
| "epoch": 13.873961895456766, | |
| "eval_loss": 1.1129028797149658, | |
| "eval_runtime": 113.3982, | |
| "eval_samples_per_second": 256.724, | |
| "eval_steps_per_second": 8.025, | |
| "step": 113600 | |
| }, | |
| { | |
| "epoch": 14.069369809477283, | |
| "grad_norm": 0.8052350282669067, | |
| "learning_rate": 0.00011247450349313363, | |
| "loss": 1.2326, | |
| "step": 115200 | |
| }, | |
| { | |
| "epoch": 14.069369809477283, | |
| "eval_loss": 1.107361078262329, | |
| "eval_runtime": 113.042, | |
| "eval_samples_per_second": 257.533, | |
| "eval_steps_per_second": 8.05, | |
| "step": 115200 | |
| }, | |
| { | |
| "epoch": 14.2647777234978, | |
| "grad_norm": 0.7904307246208191, | |
| "learning_rate": 0.00011040802158968633, | |
| "loss": 1.2251, | |
| "step": 116800 | |
| }, | |
| { | |
| "epoch": 14.2647777234978, | |
| "eval_loss": 1.109073281288147, | |
| "eval_runtime": 113.3982, | |
| "eval_samples_per_second": 256.724, | |
| "eval_steps_per_second": 8.025, | |
| "step": 116800 | |
| }, | |
| { | |
| "epoch": 14.46018563751832, | |
| "grad_norm": 0.8042516112327576, | |
| "learning_rate": 0.00010833703614047328, | |
| "loss": 1.2231, | |
| "step": 118400 | |
| }, | |
| { | |
| "epoch": 14.46018563751832, | |
| "eval_loss": 1.1034891605377197, | |
| "eval_runtime": 113.4506, | |
| "eval_samples_per_second": 256.605, | |
| "eval_steps_per_second": 8.021, | |
| "step": 118400 | |
| }, | |
| { | |
| "epoch": 14.655593551538837, | |
| "grad_norm": 0.8420639038085938, | |
| "learning_rate": 0.00010626244325986843, | |
| "loss": 1.2205, | |
| "step": 120000 | |
| }, | |
| { | |
| "epoch": 14.655593551538837, | |
| "eval_loss": 1.101244330406189, | |
| "eval_runtime": 113.3452, | |
| "eval_samples_per_second": 256.844, | |
| "eval_steps_per_second": 8.029, | |
| "step": 120000 | |
| }, | |
| { | |
| "epoch": 14.851001465559355, | |
| "grad_norm": 0.8175898790359497, | |
| "learning_rate": 0.00010418514062317943, | |
| "loss": 1.2158, | |
| "step": 121600 | |
| }, | |
| { | |
| "epoch": 14.851001465559355, | |
| "eval_loss": 1.1000375747680664, | |
| "eval_runtime": 113.0595, | |
| "eval_samples_per_second": 257.493, | |
| "eval_steps_per_second": 8.049, | |
| "step": 121600 | |
| }, | |
| { | |
| "epoch": 15.046409379579872, | |
| "grad_norm": 0.8033199906349182, | |
| "learning_rate": 0.00010210602707822416, | |
| "loss": 1.2126, | |
| "step": 123200 | |
| }, | |
| { | |
| "epoch": 15.046409379579872, | |
| "eval_loss": 1.09498131275177, | |
| "eval_runtime": 113.2967, | |
| "eval_samples_per_second": 256.954, | |
| "eval_steps_per_second": 8.032, | |
| "step": 123200 | |
| }, | |
| { | |
| "epoch": 15.241817293600391, | |
| "grad_norm": 0.84133380651474, | |
| "learning_rate": 0.00010002600225639952, | |
| "loss": 1.2065, | |
| "step": 124800 | |
| }, | |
| { | |
| "epoch": 15.241817293600391, | |
| "eval_loss": 1.0932313203811646, | |
| "eval_runtime": 113.5069, | |
| "eval_samples_per_second": 256.478, | |
| "eval_steps_per_second": 8.017, | |
| "step": 124800 | |
| }, | |
| { | |
| "epoch": 15.437225207620909, | |
| "grad_norm": 0.8350341320037842, | |
| "learning_rate": 9.794596618341145e-05, | |
| "loss": 1.2052, | |
| "step": 126400 | |
| }, | |
| { | |
| "epoch": 15.437225207620909, | |
| "eval_loss": 1.0876601934432983, | |
| "eval_runtime": 113.1095, | |
| "eval_samples_per_second": 257.379, | |
| "eval_steps_per_second": 8.045, | |
| "step": 126400 | |
| }, | |
| { | |
| "epoch": 15.632633121641426, | |
| "grad_norm": 0.8618020415306091, | |
| "learning_rate": 9.586681888983431e-05, | |
| "loss": 1.2018, | |
| "step": 128000 | |
| }, | |
| { | |
| "epoch": 15.632633121641426, | |
| "eval_loss": 1.0861952304840088, | |
| "eval_runtime": 113.5585, | |
| "eval_samples_per_second": 256.361, | |
| "eval_steps_per_second": 8.013, | |
| "step": 128000 | |
| }, | |
| { | |
| "epoch": 15.828041035661943, | |
| "grad_norm": 0.8563352227210999, | |
| "learning_rate": 9.378946002166804e-05, | |
| "loss": 1.1988, | |
| "step": 129600 | |
| }, | |
| { | |
| "epoch": 15.828041035661943, | |
| "eval_loss": 1.0840609073638916, | |
| "eval_runtime": 113.2623, | |
| "eval_samples_per_second": 257.032, | |
| "eval_steps_per_second": 8.034, | |
| "step": 129600 | |
| }, | |
| { | |
| "epoch": 16.02344894968246, | |
| "grad_norm": 0.8290483951568604, | |
| "learning_rate": 9.171478845106179e-05, | |
| "loss": 1.1932, | |
| "step": 131200 | |
| }, | |
| { | |
| "epoch": 16.02344894968246, | |
| "eval_loss": 1.0800191164016724, | |
| "eval_runtime": 113.4589, | |
| "eval_samples_per_second": 256.586, | |
| "eval_steps_per_second": 8.021, | |
| "step": 131200 | |
| }, | |
| { | |
| "epoch": 16.21885686370298, | |
| "grad_norm": 0.84539794921875, | |
| "learning_rate": 8.964370188737233e-05, | |
| "loss": 1.1907, | |
| "step": 132800 | |
| }, | |
| { | |
| "epoch": 16.21885686370298, | |
| "eval_loss": 1.0787382125854492, | |
| "eval_runtime": 113.3297, | |
| "eval_samples_per_second": 256.879, | |
| "eval_steps_per_second": 8.03, | |
| "step": 132800 | |
| }, | |
| { | |
| "epoch": 16.4142647777235, | |
| "grad_norm": 0.81632000207901, | |
| "learning_rate": 8.757709648872583e-05, | |
| "loss": 1.1873, | |
| "step": 134400 | |
| }, | |
| { | |
| "epoch": 16.4142647777235, | |
| "eval_loss": 1.0750751495361328, | |
| "eval_runtime": 113.6133, | |
| "eval_samples_per_second": 256.238, | |
| "eval_steps_per_second": 8.01, | |
| "step": 134400 | |
| }, | |
| { | |
| "epoch": 16.609672691744017, | |
| "grad_norm": 0.8491987586021423, | |
| "learning_rate": 8.551586647425051e-05, | |
| "loss": 1.1819, | |
| "step": 136000 | |
| }, | |
| { | |
| "epoch": 16.609672691744017, | |
| "eval_loss": 1.0718854665756226, | |
| "eval_runtime": 113.4371, | |
| "eval_samples_per_second": 256.636, | |
| "eval_steps_per_second": 8.022, | |
| "step": 136000 | |
| }, | |
| { | |
| "epoch": 16.805080605764534, | |
| "grad_norm": 0.8197298645973206, | |
| "learning_rate": 8.346090373714858e-05, | |
| "loss": 1.1802, | |
| "step": 137600 | |
| }, | |
| { | |
| "epoch": 16.805080605764534, | |
| "eval_loss": 1.0670559406280518, | |
| "eval_runtime": 113.4213, | |
| "eval_samples_per_second": 256.671, | |
| "eval_steps_per_second": 8.023, | |
| "step": 137600 | |
| }, | |
| { | |
| "epoch": 17.00048851978505, | |
| "grad_norm": 0.8388739824295044, | |
| "learning_rate": 8.141309745877437e-05, | |
| "loss": 1.1768, | |
| "step": 139200 | |
| }, | |
| { | |
| "epoch": 17.00048851978505, | |
| "eval_loss": 1.0644892454147339, | |
| "eval_runtime": 112.6464, | |
| "eval_samples_per_second": 258.437, | |
| "eval_steps_per_second": 8.078, | |
| "step": 139200 | |
| }, | |
| { | |
| "epoch": 17.19589643380557, | |
| "grad_norm": 0.9059694409370422, | |
| "learning_rate": 7.93733337238861e-05, | |
| "loss": 1.1738, | |
| "step": 140800 | |
| }, | |
| { | |
| "epoch": 17.19589643380557, | |
| "eval_loss": 1.0664381980895996, | |
| "eval_runtime": 112.556, | |
| "eval_samples_per_second": 258.645, | |
| "eval_steps_per_second": 8.085, | |
| "step": 140800 | |
| }, | |
| { | |
| "epoch": 17.391304347826086, | |
| "grad_norm": 0.9374914765357971, | |
| "learning_rate": 7.734249513723749e-05, | |
| "loss": 1.1699, | |
| "step": 142400 | |
| }, | |
| { | |
| "epoch": 17.391304347826086, | |
| "eval_loss": 1.0624644756317139, | |
| "eval_runtime": 112.922, | |
| "eval_samples_per_second": 257.806, | |
| "eval_steps_per_second": 8.059, | |
| "step": 142400 | |
| }, | |
| { | |
| "epoch": 17.586712261846603, | |
| "grad_norm": 0.8572972416877747, | |
| "learning_rate": 7.532146044167501e-05, | |
| "loss": 1.1667, | |
| "step": 144000 | |
| }, | |
| { | |
| "epoch": 17.586712261846603, | |
| "eval_loss": 1.0545681715011597, | |
| "eval_runtime": 112.6964, | |
| "eval_samples_per_second": 258.322, | |
| "eval_steps_per_second": 8.075, | |
| "step": 144000 | |
| }, | |
| { | |
| "epoch": 17.78212017586712, | |
| "grad_norm": 0.8783763647079468, | |
| "learning_rate": 7.33111041379063e-05, | |
| "loss": 1.164, | |
| "step": 145600 | |
| }, | |
| { | |
| "epoch": 17.78212017586712, | |
| "eval_loss": 1.0523958206176758, | |
| "eval_runtime": 112.8245, | |
| "eval_samples_per_second": 258.029, | |
| "eval_steps_per_second": 8.066, | |
| "step": 145600 | |
| }, | |
| { | |
| "epoch": 17.97752808988764, | |
| "grad_norm": 0.8747693300247192, | |
| "learning_rate": 7.131229610610423e-05, | |
| "loss": 1.1595, | |
| "step": 147200 | |
| }, | |
| { | |
| "epoch": 17.97752808988764, | |
| "eval_loss": 1.0538229942321777, | |
| "eval_runtime": 112.8739, | |
| "eval_samples_per_second": 257.916, | |
| "eval_steps_per_second": 8.062, | |
| "step": 147200 | |
| }, | |
| { | |
| "epoch": 18.17293600390816, | |
| "grad_norm": 0.8842704892158508, | |
| "learning_rate": 6.932590122951006e-05, | |
| "loss": 1.1571, | |
| "step": 148800 | |
| }, | |
| { | |
| "epoch": 18.17293600390816, | |
| "eval_loss": 1.051466941833496, | |
| "eval_runtime": 113.4259, | |
| "eval_samples_per_second": 256.661, | |
| "eval_steps_per_second": 8.023, | |
| "step": 148800 | |
| }, | |
| { | |
| "epoch": 18.368343917928676, | |
| "grad_norm": 0.8712752461433411, | |
| "learning_rate": 6.735277902019914e-05, | |
| "loss": 1.1552, | |
| "step": 150400 | |
| }, | |
| { | |
| "epoch": 18.368343917928676, | |
| "eval_loss": 1.0449974536895752, | |
| "eval_runtime": 113.4246, | |
| "eval_samples_per_second": 256.664, | |
| "eval_steps_per_second": 8.023, | |
| "step": 150400 | |
| }, | |
| { | |
| "epoch": 18.563751831949194, | |
| "grad_norm": 0.8981324434280396, | |
| "learning_rate": 6.539378324717007e-05, | |
| "loss": 1.1503, | |
| "step": 152000 | |
| }, | |
| { | |
| "epoch": 18.563751831949194, | |
| "eval_loss": 1.044567584991455, | |
| "eval_runtime": 113.3988, | |
| "eval_samples_per_second": 256.722, | |
| "eval_steps_per_second": 8.025, | |
| "step": 152000 | |
| }, | |
| { | |
| "epoch": 18.75915974596971, | |
| "grad_norm": 0.9067941904067993, | |
| "learning_rate": 6.344976156691964e-05, | |
| "loss": 1.1478, | |
| "step": 153600 | |
| }, | |
| { | |
| "epoch": 18.75915974596971, | |
| "eval_loss": 1.045156478881836, | |
| "eval_runtime": 112.9533, | |
| "eval_samples_per_second": 257.735, | |
| "eval_steps_per_second": 8.056, | |
| "step": 153600 | |
| }, | |
| { | |
| "epoch": 18.95456765999023, | |
| "grad_norm": 0.8807551264762878, | |
| "learning_rate": 6.152155515666206e-05, | |
| "loss": 1.145, | |
| "step": 155200 | |
| }, | |
| { | |
| "epoch": 18.95456765999023, | |
| "eval_loss": 1.0368510484695435, | |
| "eval_runtime": 113.1918, | |
| "eval_samples_per_second": 257.192, | |
| "eval_steps_per_second": 8.039, | |
| "step": 155200 | |
| }, | |
| { | |
| "epoch": 19.149975574010746, | |
| "grad_norm": 0.8794445991516113, | |
| "learning_rate": 5.96099983503521e-05, | |
| "loss": 1.1403, | |
| "step": 156800 | |
| }, | |
| { | |
| "epoch": 19.149975574010746, | |
| "eval_loss": 1.0389199256896973, | |
| "eval_runtime": 113.1353, | |
| "eval_samples_per_second": 257.32, | |
| "eval_steps_per_second": 8.043, | |
| "step": 156800 | |
| }, | |
| { | |
| "epoch": 19.345383488031267, | |
| "grad_norm": 0.9533663392066956, | |
| "learning_rate": 5.771591827766929e-05, | |
| "loss": 1.1403, | |
| "step": 158400 | |
| }, | |
| { | |
| "epoch": 19.345383488031267, | |
| "eval_loss": 1.034336805343628, | |
| "eval_runtime": 113.0076, | |
| "eval_samples_per_second": 257.611, | |
| "eval_steps_per_second": 8.053, | |
| "step": 158400 | |
| }, | |
| { | |
| "epoch": 19.540791402051784, | |
| "grad_norm": 0.9403147101402283, | |
| "learning_rate": 5.5840134506119026e-05, | |
| "loss": 1.1355, | |
| "step": 160000 | |
| }, | |
| { | |
| "epoch": 19.540791402051784, | |
| "eval_loss": 1.032446026802063, | |
| "eval_runtime": 112.7939, | |
| "eval_samples_per_second": 258.099, | |
| "eval_steps_per_second": 8.068, | |
| "step": 160000 | |
| }, | |
| { | |
| "epoch": 19.7361993160723, | |
| "grad_norm": 0.8987004160881042, | |
| "learning_rate": 5.398345868640643e-05, | |
| "loss": 1.1331, | |
| "step": 161600 | |
| }, | |
| { | |
| "epoch": 19.7361993160723, | |
| "eval_loss": 1.0282906293869019, | |
| "eval_runtime": 113.1606, | |
| "eval_samples_per_second": 257.263, | |
| "eval_steps_per_second": 8.042, | |
| "step": 161600 | |
| }, | |
| { | |
| "epoch": 19.93160723009282, | |
| "grad_norm": 0.9014713764190674, | |
| "learning_rate": 5.2146694201235327e-05, | |
| "loss": 1.1295, | |
| "step": 163200 | |
| }, | |
| { | |
| "epoch": 19.93160723009282, | |
| "eval_loss": 1.028027057647705, | |
| "eval_runtime": 113.1223, | |
| "eval_samples_per_second": 257.35, | |
| "eval_steps_per_second": 8.044, | |
| "step": 163200 | |
| }, | |
| { | |
| "epoch": 20.127015144113336, | |
| "grad_norm": 0.9117501378059387, | |
| "learning_rate": 5.033063581768499e-05, | |
| "loss": 1.1283, | |
| "step": 164800 | |
| }, | |
| { | |
| "epoch": 20.127015144113336, | |
| "eval_loss": 1.0252844095230103, | |
| "eval_runtime": 113.2628, | |
| "eval_samples_per_second": 257.031, | |
| "eval_steps_per_second": 8.034, | |
| "step": 164800 | |
| }, | |
| { | |
| "epoch": 20.322423058133854, | |
| "grad_norm": 0.9267609119415283, | |
| "learning_rate": 4.8536069343314827e-05, | |
| "loss": 1.1249, | |
| "step": 166400 | |
| }, | |
| { | |
| "epoch": 20.322423058133854, | |
| "eval_loss": 1.0217244625091553, | |
| "eval_runtime": 113.3099, | |
| "eval_samples_per_second": 256.924, | |
| "eval_steps_per_second": 8.031, | |
| "step": 166400 | |
| }, | |
| { | |
| "epoch": 20.51783097215437, | |
| "grad_norm": 0.9269524812698364, | |
| "learning_rate": 4.676377128614583e-05, | |
| "loss": 1.1209, | |
| "step": 168000 | |
| }, | |
| { | |
| "epoch": 20.51783097215437, | |
| "eval_loss": 1.0205986499786377, | |
| "eval_runtime": 113.1734, | |
| "eval_samples_per_second": 257.234, | |
| "eval_steps_per_second": 8.041, | |
| "step": 168000 | |
| }, | |
| { | |
| "epoch": 20.71323888617489, | |
| "grad_norm": 0.8992347121238708, | |
| "learning_rate": 4.501450851866593e-05, | |
| "loss": 1.1199, | |
| "step": 169600 | |
| }, | |
| { | |
| "epoch": 20.71323888617489, | |
| "eval_loss": 1.0179848670959473, | |
| "eval_runtime": 113.2921, | |
| "eval_samples_per_second": 256.964, | |
| "eval_steps_per_second": 8.032, | |
| "step": 169600 | |
| }, | |
| { | |
| "epoch": 20.90864680019541, | |
| "grad_norm": 0.9611796736717224, | |
| "learning_rate": 4.3289037946004674e-05, | |
| "loss": 1.1164, | |
| "step": 171200 | |
| }, | |
| { | |
| "epoch": 20.90864680019541, | |
| "eval_loss": 1.015058994293213, | |
| "eval_runtime": 113.2965, | |
| "eval_samples_per_second": 256.954, | |
| "eval_steps_per_second": 8.032, | |
| "step": 171200 | |
| }, | |
| { | |
| "epoch": 21.104054714215927, | |
| "grad_norm": 0.9261363744735718, | |
| "learning_rate": 4.158810617842075e-05, | |
| "loss": 1.1125, | |
| "step": 172800 | |
| }, | |
| { | |
| "epoch": 21.104054714215927, | |
| "eval_loss": 1.0152846574783325, | |
| "eval_runtime": 113.2272, | |
| "eval_samples_per_second": 257.111, | |
| "eval_steps_per_second": 8.037, | |
| "step": 172800 | |
| }, | |
| { | |
| "epoch": 21.299462628236444, | |
| "grad_norm": 0.9231986999511719, | |
| "learning_rate": 3.9912449208244075e-05, | |
| "loss": 1.1104, | |
| "step": 174400 | |
| }, | |
| { | |
| "epoch": 21.299462628236444, | |
| "eval_loss": 1.015738606452942, | |
| "eval_runtime": 113.0864, | |
| "eval_samples_per_second": 257.431, | |
| "eval_steps_per_second": 8.047, | |
| "step": 174400 | |
| }, | |
| { | |
| "epoch": 21.49487054225696, | |
| "grad_norm": 0.9297599792480469, | |
| "learning_rate": 3.826279209141231e-05, | |
| "loss": 1.1087, | |
| "step": 176000 | |
| }, | |
| { | |
| "epoch": 21.49487054225696, | |
| "eval_loss": 1.0116798877716064, | |
| "eval_runtime": 113.0246, | |
| "eval_samples_per_second": 257.572, | |
| "eval_steps_per_second": 8.051, | |
| "step": 176000 | |
| }, | |
| { | |
| "epoch": 21.69027845627748, | |
| "grad_norm": 0.9401277303695679, | |
| "learning_rate": 3.663984863373953e-05, | |
| "loss": 1.1052, | |
| "step": 177600 | |
| }, | |
| { | |
| "epoch": 21.69027845627748, | |
| "eval_loss": 1.007462978363037, | |
| "eval_runtime": 113.0923, | |
| "eval_samples_per_second": 257.418, | |
| "eval_steps_per_second": 8.047, | |
| "step": 177600 | |
| }, | |
| { | |
| "epoch": 21.885686370297996, | |
| "grad_norm": 0.9512894749641418, | |
| "learning_rate": 3.504432108205271e-05, | |
| "loss": 1.1037, | |
| "step": 179200 | |
| }, | |
| { | |
| "epoch": 21.885686370297996, | |
| "eval_loss": 1.0074025392532349, | |
| "eval_runtime": 113.1625, | |
| "eval_samples_per_second": 257.258, | |
| "eval_steps_per_second": 8.042, | |
| "step": 179200 | |
| }, | |
| { | |
| "epoch": 22.081094284318514, | |
| "grad_norm": 0.94953453540802, | |
| "learning_rate": 3.347689982033e-05, | |
| "loss": 1.1, | |
| "step": 180800 | |
| }, | |
| { | |
| "epoch": 22.081094284318514, | |
| "eval_loss": 1.0040948390960693, | |
| "eval_runtime": 113.5667, | |
| "eval_samples_per_second": 256.343, | |
| "eval_steps_per_second": 8.013, | |
| "step": 180800 | |
| }, | |
| { | |
| "epoch": 22.27650219833903, | |
| "grad_norm": 1.0053319931030273, | |
| "learning_rate": 3.193826307097183e-05, | |
| "loss": 1.097, | |
| "step": 182400 | |
| }, | |
| { | |
| "epoch": 22.27650219833903, | |
| "eval_loss": 1.0029879808425903, | |
| "eval_runtime": 113.4145, | |
| "eval_samples_per_second": 256.687, | |
| "eval_steps_per_second": 8.024, | |
| "step": 182400 | |
| }, | |
| { | |
| "epoch": 22.471910112359552, | |
| "grad_norm": 0.9804225564002991, | |
| "learning_rate": 3.042907660133447e-05, | |
| "loss": 1.0965, | |
| "step": 184000 | |
| }, | |
| { | |
| "epoch": 22.471910112359552, | |
| "eval_loss": 1.0013761520385742, | |
| "eval_runtime": 113.1629, | |
| "eval_samples_per_second": 257.258, | |
| "eval_steps_per_second": 8.042, | |
| "step": 184000 | |
| }, | |
| { | |
| "epoch": 22.66731802638007, | |
| "grad_norm": 0.9840267896652222, | |
| "learning_rate": 2.89499934356528e-05, | |
| "loss": 1.0941, | |
| "step": 185600 | |
| }, | |
| { | |
| "epoch": 22.66731802638007, | |
| "eval_loss": 1.001320719718933, | |
| "eval_runtime": 113.395, | |
| "eval_samples_per_second": 256.731, | |
| "eval_steps_per_second": 8.025, | |
| "step": 185600 | |
| }, | |
| { | |
| "epoch": 22.862725940400587, | |
| "grad_norm": 0.9260748028755188, | |
| "learning_rate": 2.7501653572476948e-05, | |
| "loss": 1.0902, | |
| "step": 187200 | |
| }, | |
| { | |
| "epoch": 22.862725940400587, | |
| "eval_loss": 0.998078465461731, | |
| "eval_runtime": 113.1711, | |
| "eval_samples_per_second": 257.239, | |
| "eval_steps_per_second": 8.041, | |
| "step": 187200 | |
| }, | |
| { | |
| "epoch": 23.058133854421104, | |
| "grad_norm": 0.9746040105819702, | |
| "learning_rate": 2.6084683707745506e-05, | |
| "loss": 1.0887, | |
| "step": 188800 | |
| }, | |
| { | |
| "epoch": 23.058133854421104, | |
| "eval_loss": 0.9981781244277954, | |
| "eval_runtime": 112.8918, | |
| "eval_samples_per_second": 257.875, | |
| "eval_steps_per_second": 8.061, | |
| "step": 188800 | |
| }, | |
| { | |
| "epoch": 23.25354176844162, | |
| "grad_norm": 0.9964615106582642, | |
| "learning_rate": 2.4699696963614248e-05, | |
| "loss": 1.087, | |
| "step": 190400 | |
| }, | |
| { | |
| "epoch": 23.25354176844162, | |
| "eval_loss": 0.9973635673522949, | |
| "eval_runtime": 113.0464, | |
| "eval_samples_per_second": 257.523, | |
| "eval_steps_per_second": 8.05, | |
| "step": 190400 | |
| }, | |
| { | |
| "epoch": 23.44894968246214, | |
| "grad_norm": 0.9735720157623291, | |
| "learning_rate": 2.334729262315859e-05, | |
| "loss": 1.0847, | |
| "step": 192000 | |
| }, | |
| { | |
| "epoch": 23.44894968246214, | |
| "eval_loss": 0.9933942556381226, | |
| "eval_runtime": 112.8565, | |
| "eval_samples_per_second": 257.956, | |
| "eval_steps_per_second": 8.063, | |
| "step": 192000 | |
| }, | |
| { | |
| "epoch": 23.644357596482656, | |
| "grad_norm": 1.0612947940826416, | |
| "learning_rate": 2.2028055871064014e-05, | |
| "loss": 1.0811, | |
| "step": 193600 | |
| }, | |
| { | |
| "epoch": 23.644357596482656, | |
| "eval_loss": 0.9925423860549927, | |
| "eval_runtime": 112.8304, | |
| "eval_samples_per_second": 258.016, | |
| "eval_steps_per_second": 8.065, | |
| "step": 193600 | |
| }, | |
| { | |
| "epoch": 23.839765510503174, | |
| "grad_norm": 0.9771234393119812, | |
| "learning_rate": 2.0742557540417086e-05, | |
| "loss": 1.0806, | |
| "step": 195200 | |
| }, | |
| { | |
| "epoch": 23.839765510503174, | |
| "eval_loss": 0.9896884560585022, | |
| "eval_runtime": 112.7262, | |
| "eval_samples_per_second": 258.254, | |
| "eval_steps_per_second": 8.073, | |
| "step": 195200 | |
| }, | |
| { | |
| "epoch": 24.035173424523695, | |
| "grad_norm": 1.0061612129211426, | |
| "learning_rate": 1.9491353865706208e-05, | |
| "loss": 1.0787, | |
| "step": 196800 | |
| }, | |
| { | |
| "epoch": 24.035173424523695, | |
| "eval_loss": 0.9893516898155212, | |
| "eval_runtime": 113.0708, | |
| "eval_samples_per_second": 257.467, | |
| "eval_steps_per_second": 8.048, | |
| "step": 196800 | |
| }, | |
| { | |
| "epoch": 24.230581338544212, | |
| "grad_norm": 0.9902617931365967, | |
| "learning_rate": 1.8274986242139203e-05, | |
| "loss": 1.0755, | |
| "step": 198400 | |
| }, | |
| { | |
| "epoch": 24.230581338544212, | |
| "eval_loss": 0.9926121234893799, | |
| "eval_runtime": 113.3137, | |
| "eval_samples_per_second": 256.915, | |
| "eval_steps_per_second": 8.031, | |
| "step": 198400 | |
| }, | |
| { | |
| "epoch": 24.42598925256473, | |
| "grad_norm": 0.9563286900520325, | |
| "learning_rate": 1.7093980991381786e-05, | |
| "loss": 1.0759, | |
| "step": 200000 | |
| }, | |
| { | |
| "epoch": 24.42598925256473, | |
| "eval_loss": 0.9888877272605896, | |
| "eval_runtime": 113.167, | |
| "eval_samples_per_second": 257.248, | |
| "eval_steps_per_second": 8.041, | |
| "step": 200000 | |
| }, | |
| { | |
| "epoch": 24.621397166585247, | |
| "grad_norm": 0.9523786902427673, | |
| "learning_rate": 1.5948849133818656e-05, | |
| "loss": 1.0719, | |
| "step": 201600 | |
| }, | |
| { | |
| "epoch": 24.621397166585247, | |
| "eval_loss": 0.9879076480865479, | |
| "eval_runtime": 113.1405, | |
| "eval_samples_per_second": 257.308, | |
| "eval_steps_per_second": 8.043, | |
| "step": 201600 | |
| }, | |
| { | |
| "epoch": 24.816805080605764, | |
| "grad_norm": 0.996898889541626, | |
| "learning_rate": 1.4840086167435107e-05, | |
| "loss": 1.0688, | |
| "step": 203200 | |
| }, | |
| { | |
| "epoch": 24.816805080605764, | |
| "eval_loss": 0.9834213852882385, | |
| "eval_runtime": 113.246, | |
| "eval_samples_per_second": 257.069, | |
| "eval_steps_per_second": 8.036, | |
| "step": 203200 | |
| }, | |
| { | |
| "epoch": 25.01221299462628, | |
| "grad_norm": 1.096548318862915, | |
| "learning_rate": 1.376817185341529e-05, | |
| "loss": 1.0676, | |
| "step": 204800 | |
| }, | |
| { | |
| "epoch": 25.01221299462628, | |
| "eval_loss": 0.9849600791931152, | |
| "eval_runtime": 113.3085, | |
| "eval_samples_per_second": 256.927, | |
| "eval_steps_per_second": 8.031, | |
| "step": 204800 | |
| }, | |
| { | |
| "epoch": 25.2076209086468, | |
| "grad_norm": 1.0107381343841553, | |
| "learning_rate": 1.2733570008549767e-05, | |
| "loss": 1.067, | |
| "step": 206400 | |
| }, | |
| { | |
| "epoch": 25.2076209086468, | |
| "eval_loss": 0.9812195301055908, | |
| "eval_runtime": 113.3157, | |
| "eval_samples_per_second": 256.911, | |
| "eval_steps_per_second": 8.031, | |
| "step": 206400 | |
| }, | |
| { | |
| "epoch": 25.403028822667316, | |
| "grad_norm": 1.0007399320602417, | |
| "learning_rate": 1.1736728304542287e-05, | |
| "loss": 1.0621, | |
| "step": 208000 | |
| }, | |
| { | |
| "epoch": 25.403028822667316, | |
| "eval_loss": 0.9833276271820068, | |
| "eval_runtime": 113.2967, | |
| "eval_samples_per_second": 256.954, | |
| "eval_steps_per_second": 8.032, | |
| "step": 208000 | |
| }, | |
| { | |
| "epoch": 25.598436736687837, | |
| "grad_norm": 1.0498933792114258, | |
| "learning_rate": 1.0778078074302412e-05, | |
| "loss": 1.0627, | |
| "step": 209600 | |
| }, | |
| { | |
| "epoch": 25.598436736687837, | |
| "eval_loss": 0.9808743596076965, | |
| "eval_runtime": 113.2258, | |
| "eval_samples_per_second": 257.114, | |
| "eval_steps_per_second": 8.037, | |
| "step": 209600 | |
| }, | |
| { | |
| "epoch": 25.793844650708355, | |
| "grad_norm": 1.1033276319503784, | |
| "learning_rate": 9.85803412530808e-06, | |
| "loss": 1.0624, | |
| "step": 211200 | |
| }, | |
| { | |
| "epoch": 25.793844650708355, | |
| "eval_loss": 0.9798229336738586, | |
| "eval_runtime": 113.2698, | |
| "eval_samples_per_second": 257.015, | |
| "eval_steps_per_second": 8.034, | |
| "step": 211200 | |
| }, | |
| { | |
| "epoch": 25.989252564728872, | |
| "grad_norm": 1.0208085775375366, | |
| "learning_rate": 8.976994560118401e-06, | |
| "loss": 1.0614, | |
| "step": 212800 | |
| }, | |
| { | |
| "epoch": 25.989252564728872, | |
| "eval_loss": 0.9794700145721436, | |
| "eval_runtime": 113.4354, | |
| "eval_samples_per_second": 256.64, | |
| "eval_steps_per_second": 8.022, | |
| "step": 212800 | |
| }, | |
| { | |
| "epoch": 26.18466047874939, | |
| "grad_norm": 1.050437092781067, | |
| "learning_rate": 8.135340604115083e-06, | |
| "loss": 1.0602, | |
| "step": 214400 | |
| }, | |
| { | |
| "epoch": 26.18466047874939, | |
| "eval_loss": 0.9784888029098511, | |
| "eval_runtime": 113.3908, | |
| "eval_samples_per_second": 256.74, | |
| "eval_steps_per_second": 8.025, | |
| "step": 214400 | |
| }, | |
| { | |
| "epoch": 26.380068392769907, | |
| "grad_norm": 1.0029643774032593, | |
| "learning_rate": 7.333436440546337e-06, | |
| "loss": 1.0551, | |
| "step": 216000 | |
| }, | |
| { | |
| "epoch": 26.380068392769907, | |
| "eval_loss": 0.978079617023468, | |
| "eval_runtime": 113.3592, | |
| "eval_samples_per_second": 256.812, | |
| "eval_steps_per_second": 8.028, | |
| "step": 216000 | |
| }, | |
| { | |
| "epoch": 26.575476306790424, | |
| "grad_norm": 0.9882290959358215, | |
| "learning_rate": 6.571629052944928e-06, | |
| "loss": 1.0554, | |
| "step": 217600 | |
| }, | |
| { | |
| "epoch": 26.575476306790424, | |
| "eval_loss": 0.9771565794944763, | |
| "eval_runtime": 113.2886, | |
| "eval_samples_per_second": 256.972, | |
| "eval_steps_per_second": 8.033, | |
| "step": 217600 | |
| }, | |
| { | |
| "epoch": 26.77088422081094, | |
| "grad_norm": 1.0697990655899048, | |
| "learning_rate": 5.850248074988618e-06, | |
| "loss": 1.0539, | |
| "step": 219200 | |
| }, | |
| { | |
| "epoch": 26.77088422081094, | |
| "eval_loss": 0.9780036211013794, | |
| "eval_runtime": 113.0821, | |
| "eval_samples_per_second": 257.441, | |
| "eval_steps_per_second": 8.047, | |
| "step": 219200 | |
| }, | |
| { | |
| "epoch": 26.96629213483146, | |
| "grad_norm": 1.0118811130523682, | |
| "learning_rate": 5.169605647867792e-06, | |
| "loss": 1.0533, | |
| "step": 220800 | |
| }, | |
| { | |
| "epoch": 26.96629213483146, | |
| "eval_loss": 0.9773528575897217, | |
| "eval_runtime": 113.4381, | |
| "eval_samples_per_second": 256.633, | |
| "eval_steps_per_second": 8.022, | |
| "step": 220800 | |
| }, | |
| { | |
| "epoch": 27.16170004885198, | |
| "grad_norm": 1.0081678628921509, | |
| "learning_rate": 4.5299962852221935e-06, | |
| "loss": 1.0505, | |
| "step": 222400 | |
| }, | |
| { | |
| "epoch": 27.16170004885198, | |
| "eval_loss": 0.976798951625824, | |
| "eval_runtime": 113.7993, | |
| "eval_samples_per_second": 255.819, | |
| "eval_steps_per_second": 7.997, | |
| "step": 222400 | |
| }, | |
| { | |
| "epoch": 27.357107962872497, | |
| "grad_norm": 1.0098705291748047, | |
| "learning_rate": 3.931696745704927e-06, | |
| "loss": 1.0493, | |
| "step": 224000 | |
| }, | |
| { | |
| "epoch": 27.357107962872497, | |
| "eval_loss": 0.9746509790420532, | |
| "eval_runtime": 113.5227, | |
| "eval_samples_per_second": 256.442, | |
| "eval_steps_per_second": 8.016, | |
| "step": 224000 | |
| }, | |
| { | |
| "epoch": 27.552515876893015, | |
| "grad_norm": 0.9799443483352661, | |
| "learning_rate": 3.374965913229211e-06, | |
| "loss": 1.0499, | |
| "step": 225600 | |
| }, | |
| { | |
| "epoch": 27.552515876893015, | |
| "eval_loss": 0.9754048585891724, | |
| "eval_runtime": 113.9041, | |
| "eval_samples_per_second": 255.583, | |
| "eval_steps_per_second": 7.989, | |
| "step": 225600 | |
| }, | |
| { | |
| "epoch": 27.747923790913532, | |
| "grad_norm": 1.1257084608078003, | |
| "learning_rate": 2.8600446849493812e-06, | |
| "loss": 1.0466, | |
| "step": 227200 | |
| }, | |
| { | |
| "epoch": 27.747923790913532, | |
| "eval_loss": 0.9729560017585754, | |
| "eval_runtime": 114.0446, | |
| "eval_samples_per_second": 255.269, | |
| "eval_steps_per_second": 7.979, | |
| "step": 227200 | |
| }, | |
| { | |
| "epoch": 27.94333170493405, | |
| "grad_norm": 1.0175942182540894, | |
| "learning_rate": 2.3871558670248374e-06, | |
| "loss": 1.0451, | |
| "step": 228800 | |
| }, | |
| { | |
| "epoch": 27.94333170493405, | |
| "eval_loss": 0.9726097583770752, | |
| "eval_runtime": 113.8136, | |
| "eval_samples_per_second": 255.787, | |
| "eval_steps_per_second": 7.996, | |
| "step": 228800 | |
| }, | |
| { | |
| "epoch": 28.138739618954567, | |
| "grad_norm": 1.0665676593780518, | |
| "learning_rate": 1.9565040782119183e-06, | |
| "loss": 1.046, | |
| "step": 230400 | |
| }, | |
| { | |
| "epoch": 28.138739618954567, | |
| "eval_loss": 0.9738107919692993, | |
| "eval_runtime": 113.6919, | |
| "eval_samples_per_second": 256.06, | |
| "eval_steps_per_second": 8.004, | |
| "step": 230400 | |
| }, | |
| { | |
| "epoch": 28.334147532975084, | |
| "grad_norm": 0.9979843497276306, | |
| "learning_rate": 1.5682756613254578e-06, | |
| "loss": 1.0461, | |
| "step": 232000 | |
| }, | |
| { | |
| "epoch": 28.334147532975084, | |
| "eval_loss": 0.9727683663368225, | |
| "eval_runtime": 113.5319, | |
| "eval_samples_per_second": 256.421, | |
| "eval_steps_per_second": 8.015, | |
| "step": 232000 | |
| }, | |
| { | |
| "epoch": 28.5295554469956, | |
| "grad_norm": 1.0315582752227783, | |
| "learning_rate": 1.2226386026083835e-06, | |
| "loss": 1.044, | |
| "step": 233600 | |
| }, | |
| { | |
| "epoch": 28.5295554469956, | |
| "eval_loss": 0.9729660749435425, | |
| "eval_runtime": 113.6603, | |
| "eval_samples_per_second": 256.132, | |
| "eval_steps_per_second": 8.006, | |
| "step": 233600 | |
| }, | |
| { | |
| "epoch": 28.724963361016123, | |
| "grad_norm": 1.0685278177261353, | |
| "learning_rate": 9.19742459044104e-07, | |
| "loss": 1.0445, | |
| "step": 235200 | |
| }, | |
| { | |
| "epoch": 28.724963361016123, | |
| "eval_loss": 0.9717814326286316, | |
| "eval_runtime": 113.7616, | |
| "eval_samples_per_second": 255.904, | |
| "eval_steps_per_second": 7.999, | |
| "step": 235200 | |
| }, | |
| { | |
| "epoch": 28.92037127503664, | |
| "grad_norm": 1.077459454536438, | |
| "learning_rate": 6.597182936433189e-07, | |
| "loss": 1.0412, | |
| "step": 236800 | |
| }, | |
| { | |
| "epoch": 28.92037127503664, | |
| "eval_loss": 0.9729886054992676, | |
| "eval_runtime": 113.5403, | |
| "eval_samples_per_second": 256.402, | |
| "eval_steps_per_second": 8.015, | |
| "step": 236800 | |
| }, | |
| { | |
| "epoch": 29.115779189057157, | |
| "grad_norm": 1.0783263444900513, | |
| "learning_rate": 4.426786187330612e-07, | |
| "loss": 1.042, | |
| "step": 238400 | |
| }, | |
| { | |
| "epoch": 29.115779189057157, | |
| "eval_loss": 0.9737541675567627, | |
| "eval_runtime": 113.3216, | |
| "eval_samples_per_second": 256.897, | |
| "eval_steps_per_second": 8.03, | |
| "step": 238400 | |
| }, | |
| { | |
| "epoch": 29.311187103077675, | |
| "grad_norm": 1.0996705293655396, | |
| "learning_rate": 2.6871734727274e-07, | |
| "loss": 1.04, | |
| "step": 240000 | |
| }, | |
| { | |
| "epoch": 29.311187103077675, | |
| "eval_loss": 0.9717140197753906, | |
| "eval_runtime": 113.3888, | |
| "eval_samples_per_second": 256.745, | |
| "eval_steps_per_second": 8.025, | |
| "step": 240000 | |
| }, | |
| { | |
| "epoch": 29.506595017098192, | |
| "grad_norm": 1.12758207321167, | |
| "learning_rate": 1.3790975221799062e-07, | |
| "loss": 1.0402, | |
| "step": 241600 | |
| }, | |
| { | |
| "epoch": 29.506595017098192, | |
| "eval_loss": 0.9729249477386475, | |
| "eval_runtime": 113.1761, | |
| "eval_samples_per_second": 257.227, | |
| "eval_steps_per_second": 8.041, | |
| "step": 241600 | |
| }, | |
| { | |
| "epoch": 29.70200293111871, | |
| "grad_norm": 1.1545348167419434, | |
| "learning_rate": 5.03124339501504e-08, | |
| "loss": 1.0398, | |
| "step": 243200 | |
| }, | |
| { | |
| "epoch": 29.70200293111871, | |
| "eval_loss": 0.9734637141227722, | |
| "eval_runtime": 113.3684, | |
| "eval_samples_per_second": 256.791, | |
| "eval_steps_per_second": 8.027, | |
| "step": 243200 | |
| }, | |
| { | |
| "epoch": 29.897410845139227, | |
| "grad_norm": 1.0583094358444214, | |
| "learning_rate": 5.963295785271772e-09, | |
| "loss": 1.038, | |
| "step": 244800 | |
| }, | |
| { | |
| "epoch": 29.897410845139227, | |
| "eval_loss": 0.9716248512268066, | |
| "eval_runtime": 113.626, | |
| "eval_samples_per_second": 256.209, | |
| "eval_steps_per_second": 8.009, | |
| "step": 244800 | |
| } | |
| ], | |
| "logging_steps": 1600, | |
| "max_steps": 245640, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 30, | |
| "save_steps": 1600, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7.783639331402416e+18, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |