| { | |
| "best_metric": 0.03188331797719002, | |
| "best_model_checkpoint": "saves/psy-course/MentaLLaMA-chat-7B/train/fold6/checkpoint-1900", | |
| "epoch": 4.995305164319249, | |
| "eval_steps": 50, | |
| "global_step": 3325, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.015023474178403756, | |
| "grad_norm": 1.6643978357315063, | |
| "learning_rate": 3.003003003003003e-06, | |
| "loss": 1.6617, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03004694835680751, | |
| "grad_norm": 1.970920443534851, | |
| "learning_rate": 6.006006006006006e-06, | |
| "loss": 1.7717, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04507042253521127, | |
| "grad_norm": 2.2089600563049316, | |
| "learning_rate": 9.00900900900901e-06, | |
| "loss": 1.6755, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.06009389671361502, | |
| "grad_norm": 3.677905797958374, | |
| "learning_rate": 1.2012012012012012e-05, | |
| "loss": 1.3133, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07511737089201878, | |
| "grad_norm": 1.0802251100540161, | |
| "learning_rate": 1.5015015015015016e-05, | |
| "loss": 0.8292, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07511737089201878, | |
| "eval_loss": 0.6473308205604553, | |
| "eval_runtime": 207.0843, | |
| "eval_samples_per_second": 5.717, | |
| "eval_steps_per_second": 5.717, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.09014084507042254, | |
| "grad_norm": 0.8090672492980957, | |
| "learning_rate": 1.801801801801802e-05, | |
| "loss": 0.6816, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.10516431924882629, | |
| "grad_norm": 0.7934019565582275, | |
| "learning_rate": 2.102102102102102e-05, | |
| "loss": 0.4595, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.12018779342723004, | |
| "grad_norm": 0.7469967007637024, | |
| "learning_rate": 2.4024024024024024e-05, | |
| "loss": 0.2868, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1352112676056338, | |
| "grad_norm": 0.6989622712135315, | |
| "learning_rate": 2.702702702702703e-05, | |
| "loss": 0.1691, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.15023474178403756, | |
| "grad_norm": 0.714993417263031, | |
| "learning_rate": 3.0030030030030033e-05, | |
| "loss": 0.1595, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15023474178403756, | |
| "eval_loss": 0.11690635979175568, | |
| "eval_runtime": 208.0685, | |
| "eval_samples_per_second": 5.69, | |
| "eval_steps_per_second": 5.69, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1652582159624413, | |
| "grad_norm": 0.6915186643600464, | |
| "learning_rate": 3.3033033033033035e-05, | |
| "loss": 0.1358, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.18028169014084508, | |
| "grad_norm": 0.7759954333305359, | |
| "learning_rate": 3.603603603603604e-05, | |
| "loss": 0.1063, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.19530516431924883, | |
| "grad_norm": 0.8066917061805725, | |
| "learning_rate": 3.903903903903904e-05, | |
| "loss": 0.0996, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.21032863849765257, | |
| "grad_norm": 0.4547995924949646, | |
| "learning_rate": 4.204204204204204e-05, | |
| "loss": 0.1047, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.22535211267605634, | |
| "grad_norm": 0.7537536025047302, | |
| "learning_rate": 4.5045045045045046e-05, | |
| "loss": 0.0933, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.22535211267605634, | |
| "eval_loss": 0.07271095365285873, | |
| "eval_runtime": 207.977, | |
| "eval_samples_per_second": 5.693, | |
| "eval_steps_per_second": 5.693, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.2403755868544601, | |
| "grad_norm": 0.652043879032135, | |
| "learning_rate": 4.804804804804805e-05, | |
| "loss": 0.0726, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.25539906103286386, | |
| "grad_norm": 0.7913267016410828, | |
| "learning_rate": 5.105105105105106e-05, | |
| "loss": 0.0503, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.2704225352112676, | |
| "grad_norm": 0.6111043095588684, | |
| "learning_rate": 5.405405405405406e-05, | |
| "loss": 0.0682, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.28544600938967135, | |
| "grad_norm": 0.6898893713951111, | |
| "learning_rate": 5.705705705705706e-05, | |
| "loss": 0.0651, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.3004694835680751, | |
| "grad_norm": 0.5223749876022339, | |
| "learning_rate": 6.0060060060060066e-05, | |
| "loss": 0.0512, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3004694835680751, | |
| "eval_loss": 0.05808287858963013, | |
| "eval_runtime": 207.9535, | |
| "eval_samples_per_second": 5.694, | |
| "eval_steps_per_second": 5.694, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.3154929577464789, | |
| "grad_norm": 0.7966965436935425, | |
| "learning_rate": 6.306306306306306e-05, | |
| "loss": 0.0562, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.3305164319248826, | |
| "grad_norm": 0.4759034514427185, | |
| "learning_rate": 6.606606606606607e-05, | |
| "loss": 0.0591, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.3455399061032864, | |
| "grad_norm": 0.421832412481308, | |
| "learning_rate": 6.906906906906907e-05, | |
| "loss": 0.0504, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.36056338028169016, | |
| "grad_norm": 0.6044343113899231, | |
| "learning_rate": 7.207207207207208e-05, | |
| "loss": 0.0609, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3755868544600939, | |
| "grad_norm": 0.702263355255127, | |
| "learning_rate": 7.507507507507507e-05, | |
| "loss": 0.0619, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.3755868544600939, | |
| "eval_loss": 0.04737601429224014, | |
| "eval_runtime": 207.9514, | |
| "eval_samples_per_second": 5.694, | |
| "eval_steps_per_second": 5.694, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.39061032863849765, | |
| "grad_norm": 0.4086180627346039, | |
| "learning_rate": 7.807807807807808e-05, | |
| "loss": 0.0473, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.4056338028169014, | |
| "grad_norm": 0.3927551209926605, | |
| "learning_rate": 8.108108108108109e-05, | |
| "loss": 0.0552, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.42065727699530514, | |
| "grad_norm": 0.26502013206481934, | |
| "learning_rate": 8.408408408408409e-05, | |
| "loss": 0.0567, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4356807511737089, | |
| "grad_norm": 0.8767386674880981, | |
| "learning_rate": 8.70870870870871e-05, | |
| "loss": 0.0572, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.4507042253521127, | |
| "grad_norm": 0.4874660074710846, | |
| "learning_rate": 9.009009009009009e-05, | |
| "loss": 0.0395, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.4507042253521127, | |
| "eval_loss": 0.04596845433115959, | |
| "eval_runtime": 207.5439, | |
| "eval_samples_per_second": 5.705, | |
| "eval_steps_per_second": 5.705, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.46572769953051646, | |
| "grad_norm": 0.43807005882263184, | |
| "learning_rate": 9.30930930930931e-05, | |
| "loss": 0.0444, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.4807511737089202, | |
| "grad_norm": 0.7260940074920654, | |
| "learning_rate": 9.60960960960961e-05, | |
| "loss": 0.063, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.49577464788732395, | |
| "grad_norm": 0.40789633989334106, | |
| "learning_rate": 9.90990990990991e-05, | |
| "loss": 0.0497, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.5107981220657277, | |
| "grad_norm": 0.5078976154327393, | |
| "learning_rate": 9.999864944989638e-05, | |
| "loss": 0.058, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.5258215962441315, | |
| "grad_norm": 0.3737334907054901, | |
| "learning_rate": 9.999203468625017e-05, | |
| "loss": 0.0476, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5258215962441315, | |
| "eval_loss": 0.045401681214571, | |
| "eval_runtime": 207.0191, | |
| "eval_samples_per_second": 5.719, | |
| "eval_steps_per_second": 5.719, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.5408450704225352, | |
| "grad_norm": 0.5752517580986023, | |
| "learning_rate": 9.997990837719421e-05, | |
| "loss": 0.055, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.5558685446009389, | |
| "grad_norm": 0.28732818365097046, | |
| "learning_rate": 9.996227185963554e-05, | |
| "loss": 0.0412, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.5708920187793427, | |
| "grad_norm": 0.41930803656578064, | |
| "learning_rate": 9.993912707797329e-05, | |
| "loss": 0.0448, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.5859154929577465, | |
| "grad_norm": 0.35693687200546265, | |
| "learning_rate": 9.99104765838842e-05, | |
| "loss": 0.0423, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.6009389671361502, | |
| "grad_norm": 0.11278364062309265, | |
| "learning_rate": 9.987632353604151e-05, | |
| "loss": 0.0444, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6009389671361502, | |
| "eval_loss": 0.0407099612057209, | |
| "eval_runtime": 207.012, | |
| "eval_samples_per_second": 5.719, | |
| "eval_steps_per_second": 5.719, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.615962441314554, | |
| "grad_norm": 0.41261032223701477, | |
| "learning_rate": 9.98366716997665e-05, | |
| "loss": 0.0478, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.6309859154929578, | |
| "grad_norm": 0.4349944293498993, | |
| "learning_rate": 9.979152544661354e-05, | |
| "loss": 0.0381, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.6460093896713615, | |
| "grad_norm": 0.609887957572937, | |
| "learning_rate": 9.974088975388802e-05, | |
| "loss": 0.0435, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6610328638497652, | |
| "grad_norm": 0.37665075063705444, | |
| "learning_rate": 9.968477020409766e-05, | |
| "loss": 0.0378, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.676056338028169, | |
| "grad_norm": 0.30656006932258606, | |
| "learning_rate": 9.962317298433705e-05, | |
| "loss": 0.0543, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.676056338028169, | |
| "eval_loss": 0.042538370937108994, | |
| "eval_runtime": 207.26, | |
| "eval_samples_per_second": 5.713, | |
| "eval_steps_per_second": 5.713, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.6910798122065728, | |
| "grad_norm": 0.2620968520641327, | |
| "learning_rate": 9.955610488560551e-05, | |
| "loss": 0.0529, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.7061032863849765, | |
| "grad_norm": 0.4962412118911743, | |
| "learning_rate": 9.948357330205842e-05, | |
| "loss": 0.0488, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.7211267605633803, | |
| "grad_norm": 0.11893105506896973, | |
| "learning_rate": 9.940558623019201e-05, | |
| "loss": 0.0282, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.7361502347417841, | |
| "grad_norm": 0.24164587259292603, | |
| "learning_rate": 9.932215226796172e-05, | |
| "loss": 0.0377, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.7511737089201878, | |
| "grad_norm": 0.23037177324295044, | |
| "learning_rate": 9.923328061383435e-05, | |
| "loss": 0.0454, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7511737089201878, | |
| "eval_loss": 0.0371524840593338, | |
| "eval_runtime": 207.3218, | |
| "eval_samples_per_second": 5.711, | |
| "eval_steps_per_second": 5.711, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7661971830985915, | |
| "grad_norm": 0.1442870795726776, | |
| "learning_rate": 9.913898106577393e-05, | |
| "loss": 0.0382, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.7812206572769953, | |
| "grad_norm": 0.2532236576080322, | |
| "learning_rate": 9.903926402016153e-05, | |
| "loss": 0.0344, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.7962441314553991, | |
| "grad_norm": 0.14402006566524506, | |
| "learning_rate": 9.893414047064897e-05, | |
| "loss": 0.0398, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.8112676056338028, | |
| "grad_norm": 0.2404378205537796, | |
| "learning_rate": 9.88236220069469e-05, | |
| "loss": 0.0381, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.8262910798122066, | |
| "grad_norm": 0.27102166414260864, | |
| "learning_rate": 9.870772081354705e-05, | |
| "loss": 0.0562, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8262910798122066, | |
| "eval_loss": 0.03766689449548721, | |
| "eval_runtime": 207.5601, | |
| "eval_samples_per_second": 5.704, | |
| "eval_steps_per_second": 5.704, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.8413145539906103, | |
| "grad_norm": 0.2679292857646942, | |
| "learning_rate": 9.858644966837878e-05, | |
| "loss": 0.0406, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.856338028169014, | |
| "grad_norm": 0.0837322548031807, | |
| "learning_rate": 9.845982194140051e-05, | |
| "loss": 0.0381, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.8713615023474178, | |
| "grad_norm": 0.1658477932214737, | |
| "learning_rate": 9.832785159312559e-05, | |
| "loss": 0.0404, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.8863849765258216, | |
| "grad_norm": 0.2504975199699402, | |
| "learning_rate": 9.819055317308317e-05, | |
| "loss": 0.0365, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.9014084507042254, | |
| "grad_norm": 0.24881432950496674, | |
| "learning_rate": 9.804794181821422e-05, | |
| "loss": 0.0336, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9014084507042254, | |
| "eval_loss": 0.036089226603507996, | |
| "eval_runtime": 208.6413, | |
| "eval_samples_per_second": 5.675, | |
| "eval_steps_per_second": 5.675, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.9164319248826291, | |
| "grad_norm": 0.20400799810886383, | |
| "learning_rate": 9.790003325120261e-05, | |
| "loss": 0.0375, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.9314553990610329, | |
| "grad_norm": 0.4139688313007355, | |
| "learning_rate": 9.774684377874178e-05, | |
| "loss": 0.0466, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.9464788732394366, | |
| "grad_norm": 0.302133172750473, | |
| "learning_rate": 9.758839028973692e-05, | |
| "loss": 0.0308, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.9615023474178404, | |
| "grad_norm": 0.26886606216430664, | |
| "learning_rate": 9.742469025344298e-05, | |
| "loss": 0.0416, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.9765258215962441, | |
| "grad_norm": 0.22003549337387085, | |
| "learning_rate": 9.725576171753874e-05, | |
| "loss": 0.0494, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9765258215962441, | |
| "eval_loss": 0.03678149729967117, | |
| "eval_runtime": 209.7916, | |
| "eval_samples_per_second": 5.644, | |
| "eval_steps_per_second": 5.644, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9915492957746479, | |
| "grad_norm": 0.686739981174469, | |
| "learning_rate": 9.708162330613708e-05, | |
| "loss": 0.0367, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.0065727699530516, | |
| "grad_norm": 0.1825464963912964, | |
| "learning_rate": 9.690229421773167e-05, | |
| "loss": 0.0369, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.0215962441314554, | |
| "grad_norm": 0.442874014377594, | |
| "learning_rate": 9.67177942230804e-05, | |
| "loss": 0.0312, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.036619718309859, | |
| "grad_norm": 0.45542624592781067, | |
| "learning_rate": 9.652814366302568e-05, | |
| "loss": 0.0392, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.051643192488263, | |
| "grad_norm": 0.16704720258712769, | |
| "learning_rate": 9.633336344625185e-05, | |
| "loss": 0.0354, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.051643192488263, | |
| "eval_loss": 0.0386400930583477, | |
| "eval_runtime": 210.8244, | |
| "eval_samples_per_second": 5.616, | |
| "eval_steps_per_second": 5.616, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0666666666666667, | |
| "grad_norm": 0.2750037610530853, | |
| "learning_rate": 9.61334750469801e-05, | |
| "loss": 0.0282, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.0816901408450703, | |
| "grad_norm": 0.5251227021217346, | |
| "learning_rate": 9.592850050260089e-05, | |
| "loss": 0.0349, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.0967136150234742, | |
| "grad_norm": 0.13910022377967834, | |
| "learning_rate": 9.571846241124446e-05, | |
| "loss": 0.0291, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.1117370892018779, | |
| "grad_norm": 0.17034609615802765, | |
| "learning_rate": 9.55033839292893e-05, | |
| "loss": 0.0257, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.1267605633802817, | |
| "grad_norm": 0.4655674397945404, | |
| "learning_rate": 9.52832887688093e-05, | |
| "loss": 0.029, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1267605633802817, | |
| "eval_loss": 0.037642158567905426, | |
| "eval_runtime": 212.6123, | |
| "eval_samples_per_second": 5.569, | |
| "eval_steps_per_second": 5.569, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.1417840375586854, | |
| "grad_norm": 0.33910053968429565, | |
| "learning_rate": 9.50582011949595e-05, | |
| "loss": 0.0329, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.1568075117370893, | |
| "grad_norm": 0.1620519608259201, | |
| "learning_rate": 9.482814602330084e-05, | |
| "loss": 0.0328, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.171830985915493, | |
| "grad_norm": 0.3181590735912323, | |
| "learning_rate": 9.459314861706435e-05, | |
| "loss": 0.0363, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.1868544600938966, | |
| "grad_norm": 0.3142762780189514, | |
| "learning_rate": 9.435323488435488e-05, | |
| "loss": 0.0279, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.2018779342723005, | |
| "grad_norm": 0.34480199217796326, | |
| "learning_rate": 9.410843127529473e-05, | |
| "loss": 0.0301, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2018779342723005, | |
| "eval_loss": 0.03519855812191963, | |
| "eval_runtime": 213.9318, | |
| "eval_samples_per_second": 5.534, | |
| "eval_steps_per_second": 5.534, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.2169014084507042, | |
| "grad_norm": 0.3214387893676758, | |
| "learning_rate": 9.385876477910765e-05, | |
| "loss": 0.0317, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.231924882629108, | |
| "grad_norm": 0.20507577061653137, | |
| "learning_rate": 9.360426292114314e-05, | |
| "loss": 0.0291, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.2469483568075117, | |
| "grad_norm": 0.2585867941379547, | |
| "learning_rate": 9.334495375984212e-05, | |
| "loss": 0.0279, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.2619718309859156, | |
| "grad_norm": 0.21939821541309357, | |
| "learning_rate": 9.30808658836432e-05, | |
| "loss": 0.0293, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.2769953051643192, | |
| "grad_norm": 0.42434459924697876, | |
| "learning_rate": 9.281202840783108e-05, | |
| "loss": 0.0321, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.2769953051643192, | |
| "eval_loss": 0.034056928008794785, | |
| "eval_runtime": 215.0317, | |
| "eval_samples_per_second": 5.506, | |
| "eval_steps_per_second": 5.506, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.292018779342723, | |
| "grad_norm": 0.22999003529548645, | |
| "learning_rate": 9.253847097132655e-05, | |
| "loss": 0.0243, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.3070422535211268, | |
| "grad_norm": 0.1908729076385498, | |
| "learning_rate": 9.226022373341882e-05, | |
| "loss": 0.0247, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.3220657276995305, | |
| "grad_norm": 0.16463080048561096, | |
| "learning_rate": 9.19773173704406e-05, | |
| "loss": 0.0245, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.3370892018779343, | |
| "grad_norm": 0.5442132949829102, | |
| "learning_rate": 9.168978307238594e-05, | |
| "loss": 0.032, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.352112676056338, | |
| "grad_norm": 0.11218319833278656, | |
| "learning_rate": 9.13976525394717e-05, | |
| "loss": 0.0271, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.352112676056338, | |
| "eval_loss": 0.03430046886205673, | |
| "eval_runtime": 215.5861, | |
| "eval_samples_per_second": 5.492, | |
| "eval_steps_per_second": 5.492, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.3671361502347419, | |
| "grad_norm": 0.13387706875801086, | |
| "learning_rate": 9.110095797864263e-05, | |
| "loss": 0.0224, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.3821596244131455, | |
| "grad_norm": 0.20326955616474152, | |
| "learning_rate": 9.079973210002051e-05, | |
| "loss": 0.0318, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.3971830985915492, | |
| "grad_norm": 0.2666754424571991, | |
| "learning_rate": 9.049400811329807e-05, | |
| "loss": 0.0322, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.412206572769953, | |
| "grad_norm": 0.29467517137527466, | |
| "learning_rate": 9.01838197240775e-05, | |
| "loss": 0.0305, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.4272300469483568, | |
| "grad_norm": 0.27433130145072937, | |
| "learning_rate": 8.986920113015461e-05, | |
| "loss": 0.0351, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4272300469483568, | |
| "eval_loss": 0.03298981115221977, | |
| "eval_runtime": 216.0148, | |
| "eval_samples_per_second": 5.481, | |
| "eval_steps_per_second": 5.481, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.4422535211267606, | |
| "grad_norm": 0.23651893436908722, | |
| "learning_rate": 8.955018701774846e-05, | |
| "loss": 0.0262, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.4572769953051643, | |
| "grad_norm": 0.2778637111186981, | |
| "learning_rate": 8.922681255767731e-05, | |
| "loss": 0.0347, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.4723004694835682, | |
| "grad_norm": 0.18652495741844177, | |
| "learning_rate": 8.889911340148112e-05, | |
| "loss": 0.0308, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.4873239436619718, | |
| "grad_norm": 0.17595553398132324, | |
| "learning_rate": 8.856712567749095e-05, | |
| "loss": 0.0278, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.5023474178403755, | |
| "grad_norm": 0.4016423523426056, | |
| "learning_rate": 8.82308859868459e-05, | |
| "loss": 0.0244, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5023474178403755, | |
| "eval_loss": 0.033029552549123764, | |
| "eval_runtime": 217.6657, | |
| "eval_samples_per_second": 5.44, | |
| "eval_steps_per_second": 5.44, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.5173708920187794, | |
| "grad_norm": 0.2475142925977707, | |
| "learning_rate": 8.789043139945795e-05, | |
| "loss": 0.0298, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.532394366197183, | |
| "grad_norm": 0.2059515416622162, | |
| "learning_rate": 8.754579944992491e-05, | |
| "loss": 0.036, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.5474178403755867, | |
| "grad_norm": 0.16493824124336243, | |
| "learning_rate": 8.719702813339248e-05, | |
| "loss": 0.0288, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.5624413145539906, | |
| "grad_norm": 0.20951515436172485, | |
| "learning_rate": 8.684415590136518e-05, | |
| "loss": 0.0277, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.5774647887323945, | |
| "grad_norm": 0.0918218195438385, | |
| "learning_rate": 8.648722165746722e-05, | |
| "loss": 0.0277, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.5774647887323945, | |
| "eval_loss": 0.034053802490234375, | |
| "eval_runtime": 219.1262, | |
| "eval_samples_per_second": 5.403, | |
| "eval_steps_per_second": 5.403, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.5924882629107981, | |
| "grad_norm": 0.09690333157777786, | |
| "learning_rate": 8.61262647531534e-05, | |
| "loss": 0.0251, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.6075117370892018, | |
| "grad_norm": 0.19759806990623474, | |
| "learning_rate": 8.576132498337068e-05, | |
| "loss": 0.0362, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.6225352112676057, | |
| "grad_norm": 0.2693615257740021, | |
| "learning_rate": 8.539244258217088e-05, | |
| "loss": 0.0319, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.6375586854460094, | |
| "grad_norm": 0.20964917540550232, | |
| "learning_rate": 8.501965821827485e-05, | |
| "loss": 0.032, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.652582159624413, | |
| "grad_norm": 0.2406897395849228, | |
| "learning_rate": 8.464301299058892e-05, | |
| "loss": 0.0231, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.652582159624413, | |
| "eval_loss": 0.034014929085969925, | |
| "eval_runtime": 219.9896, | |
| "eval_samples_per_second": 5.382, | |
| "eval_steps_per_second": 5.382, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.667605633802817, | |
| "grad_norm": 0.25754356384277344, | |
| "learning_rate": 8.426254842367374e-05, | |
| "loss": 0.0258, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.6826291079812208, | |
| "grad_norm": 0.2006315290927887, | |
| "learning_rate": 8.387830646316623e-05, | |
| "loss": 0.0334, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.6976525821596244, | |
| "grad_norm": 0.24149750173091888, | |
| "learning_rate": 8.349032947115525e-05, | |
| "loss": 0.0249, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.712676056338028, | |
| "grad_norm": 0.30418938398361206, | |
| "learning_rate": 8.309866022151107e-05, | |
| "loss": 0.0382, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.727699530516432, | |
| "grad_norm": 0.18970516324043274, | |
| "learning_rate": 8.270334189516983e-05, | |
| "loss": 0.0261, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.727699530516432, | |
| "eval_loss": 0.03269151970744133, | |
| "eval_runtime": 220.3747, | |
| "eval_samples_per_second": 5.373, | |
| "eval_steps_per_second": 5.373, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.7427230046948357, | |
| "grad_norm": 0.11434226483106613, | |
| "learning_rate": 8.230441807537277e-05, | |
| "loss": 0.0393, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.7577464788732393, | |
| "grad_norm": 0.18536998331546783, | |
| "learning_rate": 8.190193274286122e-05, | |
| "loss": 0.026, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.7727699530516432, | |
| "grad_norm": 0.1809949427843094, | |
| "learning_rate": 8.149593027102789e-05, | |
| "loss": 0.0295, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.787793427230047, | |
| "grad_norm": 0.2668783664703369, | |
| "learning_rate": 8.108645542102469e-05, | |
| "loss": 0.0255, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.8028169014084507, | |
| "grad_norm": 0.19622312486171722, | |
| "learning_rate": 8.067355333682798e-05, | |
| "loss": 0.0297, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8028169014084507, | |
| "eval_loss": 0.03482316806912422, | |
| "eval_runtime": 220.3204, | |
| "eval_samples_per_second": 5.374, | |
| "eval_steps_per_second": 5.374, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.8178403755868544, | |
| "grad_norm": 0.28984394669532776, | |
| "learning_rate": 8.025726954026138e-05, | |
| "loss": 0.0228, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.8328638497652583, | |
| "grad_norm": 0.2442556470632553, | |
| "learning_rate": 7.983764992597716e-05, | |
| "loss": 0.0272, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.847887323943662, | |
| "grad_norm": 0.33816081285476685, | |
| "learning_rate": 7.94147407563964e-05, | |
| "loss": 0.0296, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.8629107981220656, | |
| "grad_norm": 0.3621574938297272, | |
| "learning_rate": 7.89885886566086e-05, | |
| "loss": 0.0333, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.8779342723004695, | |
| "grad_norm": 0.27948886156082153, | |
| "learning_rate": 7.855924060923141e-05, | |
| "loss": 0.027, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.8779342723004695, | |
| "eval_loss": 0.03339933604001999, | |
| "eval_runtime": 220.8261, | |
| "eval_samples_per_second": 5.362, | |
| "eval_steps_per_second": 5.362, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.8929577464788734, | |
| "grad_norm": 0.19844475388526917, | |
| "learning_rate": 7.812674394923077e-05, | |
| "loss": 0.03, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.907981220657277, | |
| "grad_norm": 0.17690996825695038, | |
| "learning_rate": 7.769114635870231e-05, | |
| "loss": 0.0275, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.9230046948356807, | |
| "grad_norm": 0.26047441363334656, | |
| "learning_rate": 7.725249586161463e-05, | |
| "loss": 0.0294, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.9380281690140846, | |
| "grad_norm": 0.16973556578159332, | |
| "learning_rate": 7.68108408185145e-05, | |
| "loss": 0.031, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.9530516431924883, | |
| "grad_norm": 0.18663664162158966, | |
| "learning_rate": 7.636622992119536e-05, | |
| "loss": 0.0417, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.9530516431924883, | |
| "eval_loss": 0.03484388440847397, | |
| "eval_runtime": 221.5077, | |
| "eval_samples_per_second": 5.345, | |
| "eval_steps_per_second": 5.345, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.968075117370892, | |
| "grad_norm": 0.2905583083629608, | |
| "learning_rate": 7.591871218732902e-05, | |
| "loss": 0.0329, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 1.9830985915492958, | |
| "grad_norm": 0.18613983690738678, | |
| "learning_rate": 7.54683369550616e-05, | |
| "loss": 0.0364, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 1.9981220657276997, | |
| "grad_norm": 0.1276523619890213, | |
| "learning_rate": 7.501515387757404e-05, | |
| "loss": 0.0269, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 2.013145539906103, | |
| "grad_norm": 0.22376187145709991, | |
| "learning_rate": 7.455921291760796e-05, | |
| "loss": 0.0313, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 2.028169014084507, | |
| "grad_norm": 0.09134189784526825, | |
| "learning_rate": 7.410056434195725e-05, | |
| "loss": 0.0173, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.028169014084507, | |
| "eval_loss": 0.03278001397848129, | |
| "eval_runtime": 221.4183, | |
| "eval_samples_per_second": 5.347, | |
| "eval_steps_per_second": 5.347, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 2.043192488262911, | |
| "grad_norm": 0.13673311471939087, | |
| "learning_rate": 7.363925871592629e-05, | |
| "loss": 0.0205, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 2.0582159624413148, | |
| "grad_norm": 0.161691814661026, | |
| "learning_rate": 7.317534689775528e-05, | |
| "loss": 0.0201, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 2.073239436619718, | |
| "grad_norm": 0.1258237212896347, | |
| "learning_rate": 7.270888003301304e-05, | |
| "loss": 0.0159, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 2.088262910798122, | |
| "grad_norm": 0.29005277156829834, | |
| "learning_rate": 7.22399095489584e-05, | |
| "loss": 0.0222, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 2.103286384976526, | |
| "grad_norm": 0.11337249726057053, | |
| "learning_rate": 7.176848714887042e-05, | |
| "loss": 0.0207, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.103286384976526, | |
| "eval_loss": 0.03226166218519211, | |
| "eval_runtime": 221.8527, | |
| "eval_samples_per_second": 5.337, | |
| "eval_steps_per_second": 5.337, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 2.1183098591549294, | |
| "grad_norm": 0.16560769081115723, | |
| "learning_rate": 7.129466480634806e-05, | |
| "loss": 0.0225, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 2.1333333333333333, | |
| "grad_norm": 0.3505042493343353, | |
| "learning_rate": 7.081849475958042e-05, | |
| "loss": 0.0187, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 2.148356807511737, | |
| "grad_norm": 0.16660155355930328, | |
| "learning_rate": 7.034002950558723e-05, | |
| "loss": 0.0185, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 2.1633802816901406, | |
| "grad_norm": 0.13852401077747345, | |
| "learning_rate": 6.985932179443144e-05, | |
| "loss": 0.0192, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 2.1784037558685445, | |
| "grad_norm": 0.21416670083999634, | |
| "learning_rate": 6.937642462340342e-05, | |
| "loss": 0.0223, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.1784037558685445, | |
| "eval_loss": 0.03254189342260361, | |
| "eval_runtime": 222.3283, | |
| "eval_samples_per_second": 5.325, | |
| "eval_steps_per_second": 5.325, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.1934272300469484, | |
| "grad_norm": 0.1465296745300293, | |
| "learning_rate": 6.889139123117817e-05, | |
| "loss": 0.0191, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.2084507042253523, | |
| "grad_norm": 0.19770440459251404, | |
| "learning_rate": 6.840427509194575e-05, | |
| "loss": 0.0209, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 2.2234741784037557, | |
| "grad_norm": 0.22087684273719788, | |
| "learning_rate": 6.791512990951597e-05, | |
| "loss": 0.0149, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 2.2384976525821596, | |
| "grad_norm": 0.34373584389686584, | |
| "learning_rate": 6.74240096113975e-05, | |
| "loss": 0.0211, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 2.2535211267605635, | |
| "grad_norm": 0.12940266728401184, | |
| "learning_rate": 6.693096834285256e-05, | |
| "loss": 0.0107, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.2535211267605635, | |
| "eval_loss": 0.035872478038072586, | |
| "eval_runtime": 223.0953, | |
| "eval_samples_per_second": 5.307, | |
| "eval_steps_per_second": 5.307, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.2685446009389674, | |
| "grad_norm": 0.39470982551574707, | |
| "learning_rate": 6.643606046092732e-05, | |
| "loss": 0.0226, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 2.283568075117371, | |
| "grad_norm": 0.22834685444831848, | |
| "learning_rate": 6.593934052845929e-05, | |
| "loss": 0.0253, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 2.2985915492957747, | |
| "grad_norm": 0.41707947850227356, | |
| "learning_rate": 6.544086330806181e-05, | |
| "loss": 0.0205, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 2.3136150234741786, | |
| "grad_norm": 0.1707378327846527, | |
| "learning_rate": 6.494068375608646e-05, | |
| "loss": 0.0197, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 2.328638497652582, | |
| "grad_norm": 0.17165277898311615, | |
| "learning_rate": 6.443885701656432e-05, | |
| "loss": 0.0182, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.328638497652582, | |
| "eval_loss": 0.03318268060684204, | |
| "eval_runtime": 223.282, | |
| "eval_samples_per_second": 5.303, | |
| "eval_steps_per_second": 5.303, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 2.343661971830986, | |
| "grad_norm": 0.14184924960136414, | |
| "learning_rate": 6.393543841512632e-05, | |
| "loss": 0.02, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 2.35868544600939, | |
| "grad_norm": 0.31733861565589905, | |
| "learning_rate": 6.343048345290386e-05, | |
| "loss": 0.0144, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 2.3737089201877932, | |
| "grad_norm": 0.16232426464557648, | |
| "learning_rate": 6.292404780040961e-05, | |
| "loss": 0.023, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 2.388732394366197, | |
| "grad_norm": 0.18042533099651337, | |
| "learning_rate": 6.241618729140018e-05, | |
| "loss": 0.0157, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 2.403755868544601, | |
| "grad_norm": 0.4602644741535187, | |
| "learning_rate": 6.190695791672042e-05, | |
| "loss": 0.0187, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.403755868544601, | |
| "eval_loss": 0.032255951315164566, | |
| "eval_runtime": 222.5999, | |
| "eval_samples_per_second": 5.319, | |
| "eval_steps_per_second": 5.319, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 2.418779342723005, | |
| "grad_norm": 0.0981522649526596, | |
| "learning_rate": 6.139641581813052e-05, | |
| "loss": 0.0183, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 2.4338028169014083, | |
| "grad_norm": 0.1687208116054535, | |
| "learning_rate": 6.088461728211642e-05, | |
| "loss": 0.0177, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 2.448826291079812, | |
| "grad_norm": 0.19458502531051636, | |
| "learning_rate": 6.0371618733684474e-05, | |
| "loss": 0.0117, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.463849765258216, | |
| "grad_norm": 0.12449292838573456, | |
| "learning_rate": 5.9857476730140485e-05, | |
| "loss": 0.0175, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 2.4788732394366195, | |
| "grad_norm": 0.20214103162288666, | |
| "learning_rate": 5.9342247954854466e-05, | |
| "loss": 0.018, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.4788732394366195, | |
| "eval_loss": 0.03270378336310387, | |
| "eval_runtime": 221.3125, | |
| "eval_samples_per_second": 5.35, | |
| "eval_steps_per_second": 5.35, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 2.4938967136150234, | |
| "grad_norm": 0.5592941641807556, | |
| "learning_rate": 5.8825989211011335e-05, | |
| "loss": 0.022, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 2.5089201877934273, | |
| "grad_norm": 0.20930063724517822, | |
| "learning_rate": 5.830875741534852e-05, | |
| "loss": 0.0159, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 2.523943661971831, | |
| "grad_norm": 0.24804644286632538, | |
| "learning_rate": 5.7790609591880826e-05, | |
| "loss": 0.0239, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 2.5389671361502346, | |
| "grad_norm": 0.17634353041648865, | |
| "learning_rate": 5.727160286561386e-05, | |
| "loss": 0.0224, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 2.5539906103286385, | |
| "grad_norm": 0.23160183429718018, | |
| "learning_rate": 5.675179445624581e-05, | |
| "loss": 0.0205, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.5539906103286385, | |
| "eval_loss": 0.03496889770030975, | |
| "eval_runtime": 221.4535, | |
| "eval_samples_per_second": 5.346, | |
| "eval_steps_per_second": 5.346, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 2.5690140845070424, | |
| "grad_norm": 0.09332422912120819, | |
| "learning_rate": 5.62312416718593e-05, | |
| "loss": 0.0164, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 2.584037558685446, | |
| "grad_norm": 0.09970489144325256, | |
| "learning_rate": 5.5710001902603116e-05, | |
| "loss": 0.0257, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 2.5990610328638497, | |
| "grad_norm": 0.17599700391292572, | |
| "learning_rate": 5.5188132614365094e-05, | |
| "loss": 0.0198, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 2.6140845070422536, | |
| "grad_norm": 0.11823566257953644, | |
| "learning_rate": 5.4665691342436565e-05, | |
| "loss": 0.0152, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 2.629107981220657, | |
| "grad_norm": 0.31226423382759094, | |
| "learning_rate": 5.414273568516919e-05, | |
| "loss": 0.0182, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.629107981220657, | |
| "eval_loss": 0.032272376120090485, | |
| "eval_runtime": 221.7599, | |
| "eval_samples_per_second": 5.339, | |
| "eval_steps_per_second": 5.339, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 2.644131455399061, | |
| "grad_norm": 0.12536101043224335, | |
| "learning_rate": 5.361932329762481e-05, | |
| "loss": 0.0197, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 2.659154929577465, | |
| "grad_norm": 0.11700065433979034, | |
| "learning_rate": 5.309551188521914e-05, | |
| "loss": 0.0197, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 2.6741784037558687, | |
| "grad_norm": 0.30704525113105774, | |
| "learning_rate": 5.2571359197359704e-05, | |
| "loss": 0.0161, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 2.6892018779342726, | |
| "grad_norm": 0.06998223811388016, | |
| "learning_rate": 5.2046923021079175e-05, | |
| "loss": 0.0151, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 2.704225352112676, | |
| "grad_norm": 0.09223470091819763, | |
| "learning_rate": 5.1522261174664346e-05, | |
| "loss": 0.0202, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.704225352112676, | |
| "eval_loss": 0.032467469573020935, | |
| "eval_runtime": 221.8494, | |
| "eval_samples_per_second": 5.337, | |
| "eval_steps_per_second": 5.337, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 2.71924882629108, | |
| "grad_norm": 0.3536123037338257, | |
| "learning_rate": 5.0997431501281835e-05, | |
| "loss": 0.0262, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 2.7342723004694838, | |
| "grad_norm": 0.2920973300933838, | |
| "learning_rate": 5.0472491862600915e-05, | |
| "loss": 0.0202, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 2.749295774647887, | |
| "grad_norm": 0.23328785598278046, | |
| "learning_rate": 4.994750013241435e-05, | |
| "loss": 0.022, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 2.764319248826291, | |
| "grad_norm": 0.13965994119644165, | |
| "learning_rate": 4.9422514190257974e-05, | |
| "loss": 0.0201, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 2.779342723004695, | |
| "grad_norm": 0.12157478928565979, | |
| "learning_rate": 4.88975919150294e-05, | |
| "loss": 0.0218, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.779342723004695, | |
| "eval_loss": 0.03232384845614433, | |
| "eval_runtime": 221.306, | |
| "eval_samples_per_second": 5.35, | |
| "eval_steps_per_second": 5.35, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 2.7943661971830984, | |
| "grad_norm": 0.17367467284202576, | |
| "learning_rate": 4.83727911786071e-05, | |
| "loss": 0.022, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 2.8093896713615023, | |
| "grad_norm": 0.30343097448349, | |
| "learning_rate": 4.7848169839470145e-05, | |
| "loss": 0.0224, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 2.824413145539906, | |
| "grad_norm": 0.12888950109481812, | |
| "learning_rate": 4.7323785736319244e-05, | |
| "loss": 0.021, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 2.8394366197183096, | |
| "grad_norm": 0.15008972585201263, | |
| "learning_rate": 4.679969668170024e-05, | |
| "loss": 0.0232, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 2.8544600938967135, | |
| "grad_norm": 0.10453104227781296, | |
| "learning_rate": 4.627596045563031e-05, | |
| "loss": 0.0179, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.8544600938967135, | |
| "eval_loss": 0.03188331797719002, | |
| "eval_runtime": 221.3325, | |
| "eval_samples_per_second": 5.349, | |
| "eval_steps_per_second": 5.349, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 2.8694835680751174, | |
| "grad_norm": 0.20146887004375458, | |
| "learning_rate": 4.575263479922783e-05, | |
| "loss": 0.0147, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 2.8845070422535213, | |
| "grad_norm": 0.10789839178323746, | |
| "learning_rate": 4.522977740834651e-05, | |
| "loss": 0.0118, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 2.8995305164319247, | |
| "grad_norm": 0.1628926396369934, | |
| "learning_rate": 4.4707445927214456e-05, | |
| "loss": 0.0126, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 2.9145539906103286, | |
| "grad_norm": 0.14242199063301086, | |
| "learning_rate": 4.4185697942079115e-05, | |
| "loss": 0.0199, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 2.9295774647887325, | |
| "grad_norm": 0.15721061825752258, | |
| "learning_rate": 4.366459097485832e-05, | |
| "loss": 0.0213, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9295774647887325, | |
| "eval_loss": 0.03299366310238838, | |
| "eval_runtime": 221.4895, | |
| "eval_samples_per_second": 5.346, | |
| "eval_steps_per_second": 5.346, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 2.9446009389671364, | |
| "grad_norm": 0.2670339047908783, | |
| "learning_rate": 4.314418247679866e-05, | |
| "loss": 0.0231, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 2.95962441314554, | |
| "grad_norm": 0.18777424097061157, | |
| "learning_rate": 4.26245298221416e-05, | |
| "loss": 0.0141, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 2.9746478873239437, | |
| "grad_norm": 0.08959437906742096, | |
| "learning_rate": 4.2105690301798014e-05, | |
| "loss": 0.0143, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 2.9896713615023476, | |
| "grad_norm": 0.23774972558021545, | |
| "learning_rate": 4.158772111703194e-05, | |
| "loss": 0.0167, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 3.004694835680751, | |
| "grad_norm": 0.025070570409297943, | |
| "learning_rate": 4.107067937315429e-05, | |
| "loss": 0.0104, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.004694835680751, | |
| "eval_loss": 0.03279737010598183, | |
| "eval_runtime": 222.0291, | |
| "eval_samples_per_second": 5.333, | |
| "eval_steps_per_second": 5.333, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 3.019718309859155, | |
| "grad_norm": 0.12388351559638977, | |
| "learning_rate": 4.055462207322698e-05, | |
| "loss": 0.01, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 3.034741784037559, | |
| "grad_norm": 0.024688435718417168, | |
| "learning_rate": 4.003960611177855e-05, | |
| "loss": 0.0101, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 3.0497652582159622, | |
| "grad_norm": 0.24434754252433777, | |
| "learning_rate": 3.952568826853152e-05, | |
| "loss": 0.0094, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 3.064788732394366, | |
| "grad_norm": 0.05700072646141052, | |
| "learning_rate": 3.901292520214256e-05, | |
| "loss": 0.0068, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 3.07981220657277, | |
| "grad_norm": 0.6166403889656067, | |
| "learning_rate": 3.850137344395598e-05, | |
| "loss": 0.0097, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.07981220657277, | |
| "eval_loss": 0.035948336124420166, | |
| "eval_runtime": 221.527, | |
| "eval_samples_per_second": 5.345, | |
| "eval_steps_per_second": 5.345, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 3.094835680751174, | |
| "grad_norm": 0.2868582308292389, | |
| "learning_rate": 3.799108939177118e-05, | |
| "loss": 0.0076, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 3.1098591549295773, | |
| "grad_norm": 0.201212540268898, | |
| "learning_rate": 3.7482129303624934e-05, | |
| "loss": 0.0148, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 3.124882629107981, | |
| "grad_norm": 0.512485921382904, | |
| "learning_rate": 3.697454929158901e-05, | |
| "loss": 0.0115, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 3.139906103286385, | |
| "grad_norm": 0.23441621661186218, | |
| "learning_rate": 3.6468405315583854e-05, | |
| "loss": 0.0147, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 3.1549295774647885, | |
| "grad_norm": 0.20293329656124115, | |
| "learning_rate": 3.59637531772092e-05, | |
| "loss": 0.0103, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.1549295774647885, | |
| "eval_loss": 0.0363353006541729, | |
| "eval_runtime": 221.0841, | |
| "eval_samples_per_second": 5.355, | |
| "eval_steps_per_second": 5.355, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 3.1699530516431924, | |
| "grad_norm": 0.2901355028152466, | |
| "learning_rate": 3.546064851359192e-05, | |
| "loss": 0.0082, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 3.1849765258215963, | |
| "grad_norm": 0.11046724766492844, | |
| "learning_rate": 3.495914679125212e-05, | |
| "loss": 0.0097, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "grad_norm": 0.07721813023090363, | |
| "learning_rate": 3.445930329998819e-05, | |
| "loss": 0.0103, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 3.2150234741784036, | |
| "grad_norm": 0.26007962226867676, | |
| "learning_rate": 3.396117314678097e-05, | |
| "loss": 0.0142, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 3.2300469483568075, | |
| "grad_norm": 0.4068683087825775, | |
| "learning_rate": 3.3464811249718474e-05, | |
| "loss": 0.0131, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.2300469483568075, | |
| "eval_loss": 0.03587425872683525, | |
| "eval_runtime": 221.6748, | |
| "eval_samples_per_second": 5.341, | |
| "eval_steps_per_second": 5.341, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 3.2450704225352114, | |
| "grad_norm": 0.18533091247081757, | |
| "learning_rate": 3.297027233194114e-05, | |
| "loss": 0.0126, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 3.260093896713615, | |
| "grad_norm": 0.10798130184412003, | |
| "learning_rate": 3.2477610915608704e-05, | |
| "loss": 0.0081, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 3.2751173708920187, | |
| "grad_norm": 0.19456113874912262, | |
| "learning_rate": 3.1986881315889315e-05, | |
| "loss": 0.0091, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 3.2901408450704226, | |
| "grad_norm": 0.26297393441200256, | |
| "learning_rate": 3.149813763497124e-05, | |
| "loss": 0.0155, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 3.3051643192488265, | |
| "grad_norm": 0.26686403155326843, | |
| "learning_rate": 3.101143375609818e-05, | |
| "loss": 0.0149, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.3051643192488265, | |
| "eval_loss": 0.036220405250787735, | |
| "eval_runtime": 221.9816, | |
| "eval_samples_per_second": 5.334, | |
| "eval_steps_per_second": 5.334, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 3.32018779342723, | |
| "grad_norm": 0.3246915638446808, | |
| "learning_rate": 3.0526823337628915e-05, | |
| "loss": 0.0129, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 3.335211267605634, | |
| "grad_norm": 0.4109603464603424, | |
| "learning_rate": 3.004435980712129e-05, | |
| "loss": 0.0098, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 3.3502347417840377, | |
| "grad_norm": 0.4035777747631073, | |
| "learning_rate": 2.9564096355442116e-05, | |
| "loss": 0.0128, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 3.365258215962441, | |
| "grad_norm": 0.1403612494468689, | |
| "learning_rate": 2.9086085930902824e-05, | |
| "loss": 0.0137, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 3.380281690140845, | |
| "grad_norm": 0.21675720810890198, | |
| "learning_rate": 2.8610381233422058e-05, | |
| "loss": 0.0083, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.380281690140845, | |
| "eval_loss": 0.03651123493909836, | |
| "eval_runtime": 222.0068, | |
| "eval_samples_per_second": 5.333, | |
| "eval_steps_per_second": 5.333, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 3.395305164319249, | |
| "grad_norm": 0.30155134201049805, | |
| "learning_rate": 2.8137034708715592e-05, | |
| "loss": 0.0098, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 3.4103286384976528, | |
| "grad_norm": 0.05509026348590851, | |
| "learning_rate": 2.7666098542514273e-05, | |
| "loss": 0.0096, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 3.425352112676056, | |
| "grad_norm": 0.25339803099632263, | |
| "learning_rate": 2.719762465481055e-05, | |
| "loss": 0.0123, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 3.44037558685446, | |
| "grad_norm": 0.052862461656332016, | |
| "learning_rate": 2.6731664694134473e-05, | |
| "loss": 0.0118, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 3.455399061032864, | |
| "grad_norm": 0.16749408841133118, | |
| "learning_rate": 2.6268270031859476e-05, | |
| "loss": 0.0115, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.455399061032864, | |
| "eval_loss": 0.035897351801395416, | |
| "eval_runtime": 221.6066, | |
| "eval_samples_per_second": 5.343, | |
| "eval_steps_per_second": 5.343, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 3.4704225352112674, | |
| "grad_norm": 0.28338125348091125, | |
| "learning_rate": 2.580749175653877e-05, | |
| "loss": 0.0097, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 3.4854460093896713, | |
| "grad_norm": 0.36096012592315674, | |
| "learning_rate": 2.5349380668272905e-05, | |
| "loss": 0.0122, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 3.500469483568075, | |
| "grad_norm": 0.20076704025268555, | |
| "learning_rate": 2.489398727310908e-05, | |
| "loss": 0.0096, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 3.5154929577464786, | |
| "grad_norm": 0.28074732422828674, | |
| "learning_rate": 2.4441361777473066e-05, | |
| "loss": 0.0088, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 3.5305164319248825, | |
| "grad_norm": 0.5027658343315125, | |
| "learning_rate": 2.3991554082633912e-05, | |
| "loss": 0.0111, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.5305164319248825, | |
| "eval_loss": 0.038713712245225906, | |
| "eval_runtime": 221.2043, | |
| "eval_samples_per_second": 5.353, | |
| "eval_steps_per_second": 5.353, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 3.5455399061032864, | |
| "grad_norm": 0.11489449441432953, | |
| "learning_rate": 2.354461377920239e-05, | |
| "loss": 0.009, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 3.5605633802816903, | |
| "grad_norm": 0.42795881628990173, | |
| "learning_rate": 2.3100590141663807e-05, | |
| "loss": 0.0123, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 3.575586854460094, | |
| "grad_norm": 0.34739652276039124, | |
| "learning_rate": 2.265953212294551e-05, | |
| "loss": 0.0143, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 3.5906103286384976, | |
| "grad_norm": 0.5267793536186218, | |
| "learning_rate": 2.2221488349019903e-05, | |
| "loss": 0.015, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 3.6056338028169015, | |
| "grad_norm": 0.2632468342781067, | |
| "learning_rate": 2.1786507113543457e-05, | |
| "loss": 0.0094, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.6056338028169015, | |
| "eval_loss": 0.03762707859277725, | |
| "eval_runtime": 222.4043, | |
| "eval_samples_per_second": 5.324, | |
| "eval_steps_per_second": 5.324, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 3.6206572769953054, | |
| "grad_norm": 0.15667402744293213, | |
| "learning_rate": 2.1354636372532523e-05, | |
| "loss": 0.0149, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 3.635680751173709, | |
| "grad_norm": 0.22469666600227356, | |
| "learning_rate": 2.092592373907617e-05, | |
| "loss": 0.0106, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 3.6507042253521127, | |
| "grad_norm": 0.2074466347694397, | |
| "learning_rate": 2.0500416478086932e-05, | |
| "loss": 0.0079, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 3.6657276995305166, | |
| "grad_norm": 0.21211783587932587, | |
| "learning_rate": 2.0078161501089954e-05, | |
| "loss": 0.0106, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 3.68075117370892, | |
| "grad_norm": 0.13224627077579498, | |
| "learning_rate": 1.9659205361050982e-05, | |
| "loss": 0.0051, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.68075117370892, | |
| "eval_loss": 0.03760899230837822, | |
| "eval_runtime": 222.6155, | |
| "eval_samples_per_second": 5.319, | |
| "eval_steps_per_second": 5.319, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 3.695774647887324, | |
| "grad_norm": 0.07832111418247223, | |
| "learning_rate": 1.924359424724408e-05, | |
| "loss": 0.0143, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 3.710798122065728, | |
| "grad_norm": 0.34279316663742065, | |
| "learning_rate": 1.8831373980159296e-05, | |
| "loss": 0.0129, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 3.7258215962441312, | |
| "grad_norm": 0.13393940031528473, | |
| "learning_rate": 1.8422590006450947e-05, | |
| "loss": 0.0062, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 3.740845070422535, | |
| "grad_norm": 0.05350172892212868, | |
| "learning_rate": 1.801728739392731e-05, | |
| "loss": 0.0087, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 3.755868544600939, | |
| "grad_norm": 0.0817003846168518, | |
| "learning_rate": 1.7615510826581904e-05, | |
| "loss": 0.0053, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.755868544600939, | |
| "eval_loss": 0.03752273693680763, | |
| "eval_runtime": 222.372, | |
| "eval_samples_per_second": 5.324, | |
| "eval_steps_per_second": 5.324, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.770892018779343, | |
| "grad_norm": 0.21435320377349854, | |
| "learning_rate": 1.7217304599667146e-05, | |
| "loss": 0.0111, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 3.7859154929577463, | |
| "grad_norm": 0.15997354686260223, | |
| "learning_rate": 1.6822712614810893e-05, | |
| "loss": 0.0101, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 3.80093896713615, | |
| "grad_norm": 0.182434543967247, | |
| "learning_rate": 1.643177837517631e-05, | |
| "loss": 0.0093, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 3.815962441314554, | |
| "grad_norm": 0.22743846476078033, | |
| "learning_rate": 1.6044544980665767e-05, | |
| "loss": 0.0125, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 3.830985915492958, | |
| "grad_norm": 0.33443447947502136, | |
| "learning_rate": 1.5661055123169126e-05, | |
| "loss": 0.0078, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.830985915492958, | |
| "eval_loss": 0.037725865840911865, | |
| "eval_runtime": 221.8726, | |
| "eval_samples_per_second": 5.336, | |
| "eval_steps_per_second": 5.336, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 3.8460093896713614, | |
| "grad_norm": 0.5252001285552979, | |
| "learning_rate": 1.5281351081856974e-05, | |
| "loss": 0.0118, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 3.8610328638497653, | |
| "grad_norm": 0.17551793158054352, | |
| "learning_rate": 1.4905474718519491e-05, | |
| "loss": 0.007, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 3.876056338028169, | |
| "grad_norm": 0.1480369120836258, | |
| "learning_rate": 1.453346747295119e-05, | |
| "loss": 0.0141, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 3.8910798122065726, | |
| "grad_norm": 0.19130997359752655, | |
| "learning_rate": 1.4165370358382274e-05, | |
| "loss": 0.0093, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 3.9061032863849765, | |
| "grad_norm": 0.1769220381975174, | |
| "learning_rate": 1.3801223956956994e-05, | |
| "loss": 0.0105, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.9061032863849765, | |
| "eval_loss": 0.0372486487030983, | |
| "eval_runtime": 221.9339, | |
| "eval_samples_per_second": 5.335, | |
| "eval_steps_per_second": 5.335, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 3.9211267605633804, | |
| "grad_norm": 0.11678332090377808, | |
| "learning_rate": 1.344106841525946e-05, | |
| "loss": 0.0141, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 3.936150234741784, | |
| "grad_norm": 0.24193385243415833, | |
| "learning_rate": 1.3084943439887659e-05, | |
| "loss": 0.0085, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 3.9511737089201877, | |
| "grad_norm": 0.10747163742780685, | |
| "learning_rate": 1.273288829307579e-05, | |
| "loss": 0.0054, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 3.9661971830985916, | |
| "grad_norm": 0.28762826323509216, | |
| "learning_rate": 1.2384941788365622e-05, | |
| "loss": 0.0078, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 3.981220657276995, | |
| "grad_norm": 0.11588622629642487, | |
| "learning_rate": 1.2041142286327477e-05, | |
| "loss": 0.0105, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 3.981220657276995, | |
| "eval_loss": 0.03705061599612236, | |
| "eval_runtime": 222.758, | |
| "eval_samples_per_second": 5.315, | |
| "eval_steps_per_second": 5.315, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 3.996244131455399, | |
| "grad_norm": 0.2062222808599472, | |
| "learning_rate": 1.170152769033095e-05, | |
| "loss": 0.0085, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 4.011267605633803, | |
| "grad_norm": 0.15399198234081268, | |
| "learning_rate": 1.1366135442366127e-05, | |
| "loss": 0.0057, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 4.026291079812206, | |
| "grad_norm": 0.12908077239990234, | |
| "learning_rate": 1.103500251891571e-05, | |
| "loss": 0.0074, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 4.041314553990611, | |
| "grad_norm": 0.06553179025650024, | |
| "learning_rate": 1.0708165426878325e-05, | |
| "loss": 0.0049, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 4.056338028169014, | |
| "grad_norm": 0.12032724171876907, | |
| "learning_rate": 1.0385660199543812e-05, | |
| "loss": 0.0064, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.056338028169014, | |
| "eval_loss": 0.038154859095811844, | |
| "eval_runtime": 220.5195, | |
| "eval_samples_per_second": 5.369, | |
| "eval_steps_per_second": 5.369, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 4.0713615023474174, | |
| "grad_norm": 0.06930918991565704, | |
| "learning_rate": 1.0067522392620537e-05, | |
| "loss": 0.0052, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 4.086384976525822, | |
| "grad_norm": 0.10133420675992966, | |
| "learning_rate": 9.753787080315385e-06, | |
| "loss": 0.0075, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 4.101408450704225, | |
| "grad_norm": 0.01755693554878235, | |
| "learning_rate": 9.444488851467042e-06, | |
| "loss": 0.0059, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 4.1164319248826295, | |
| "grad_norm": 0.1588931679725647, | |
| "learning_rate": 9.139661805732435e-06, | |
| "loss": 0.0042, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 4.131455399061033, | |
| "grad_norm": 0.39881432056427, | |
| "learning_rate": 8.839339549827397e-06, | |
| "loss": 0.0048, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.131455399061033, | |
| "eval_loss": 0.03982308879494667, | |
| "eval_runtime": 221.9797, | |
| "eval_samples_per_second": 5.334, | |
| "eval_steps_per_second": 5.334, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 4.146478873239436, | |
| "grad_norm": 0.07863107323646545, | |
| "learning_rate": 8.543555193821634e-06, | |
| "loss": 0.0075, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 4.161502347417841, | |
| "grad_norm": 0.25240081548690796, | |
| "learning_rate": 8.252341347488251e-06, | |
| "loss": 0.0043, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 4.176525821596244, | |
| "grad_norm": 0.22023944556713104, | |
| "learning_rate": 7.965730116708681e-06, | |
| "loss": 0.008, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 4.191549295774648, | |
| "grad_norm": 0.010785081423819065, | |
| "learning_rate": 7.68375309993304e-06, | |
| "loss": 0.0031, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 4.206572769953052, | |
| "grad_norm": 0.11607765406370163, | |
| "learning_rate": 7.406441384696372e-06, | |
| "loss": 0.0065, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.206572769953052, | |
| "eval_loss": 0.04066622629761696, | |
| "eval_runtime": 221.9129, | |
| "eval_samples_per_second": 5.335, | |
| "eval_steps_per_second": 5.335, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 4.221596244131455, | |
| "grad_norm": 0.05429501459002495, | |
| "learning_rate": 7.133825544191464e-06, | |
| "loss": 0.0095, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 4.236619718309859, | |
| "grad_norm": 0.09971319139003754, | |
| "learning_rate": 6.865935633897996e-06, | |
| "loss": 0.004, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 4.251643192488263, | |
| "grad_norm": 0.09597835689783096, | |
| "learning_rate": 6.602801188269081e-06, | |
| "loss": 0.0064, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 4.266666666666667, | |
| "grad_norm": 0.1952216774225235, | |
| "learning_rate": 6.344451217475183e-06, | |
| "loss": 0.0047, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 4.28169014084507, | |
| "grad_norm": 0.0561475045979023, | |
| "learning_rate": 6.090914204205655e-06, | |
| "loss": 0.0031, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.28169014084507, | |
| "eval_loss": 0.04169252887368202, | |
| "eval_runtime": 222.1513, | |
| "eval_samples_per_second": 5.33, | |
| "eval_steps_per_second": 5.33, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 4.296713615023474, | |
| "grad_norm": 0.2749840021133423, | |
| "learning_rate": 5.842218100528679e-06, | |
| "loss": 0.0052, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 4.311737089201878, | |
| "grad_norm": 0.22904397547245026, | |
| "learning_rate": 5.598390324809555e-06, | |
| "loss": 0.0046, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 4.326760563380281, | |
| "grad_norm": 0.26261669397354126, | |
| "learning_rate": 5.359457758687841e-06, | |
| "loss": 0.0089, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 4.341784037558686, | |
| "grad_norm": 0.21348492801189423, | |
| "learning_rate": 5.125446744113743e-06, | |
| "loss": 0.0057, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 4.356807511737089, | |
| "grad_norm": 0.0984061136841774, | |
| "learning_rate": 4.896383080443934e-06, | |
| "loss": 0.0028, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.356807511737089, | |
| "eval_loss": 0.041960135102272034, | |
| "eval_runtime": 222.6261, | |
| "eval_samples_per_second": 5.318, | |
| "eval_steps_per_second": 5.318, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 4.371830985915493, | |
| "grad_norm": 0.2619991898536682, | |
| "learning_rate": 4.672292021597174e-06, | |
| "loss": 0.0056, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 4.386854460093897, | |
| "grad_norm": 0.16088174283504486, | |
| "learning_rate": 4.4531982732702145e-06, | |
| "loss": 0.0064, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 4.4018779342723, | |
| "grad_norm": 0.3044176995754242, | |
| "learning_rate": 4.239125990213883e-06, | |
| "loss": 0.0039, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 4.416901408450705, | |
| "grad_norm": 0.06394259631633759, | |
| "learning_rate": 4.030098773570174e-06, | |
| "loss": 0.0016, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 4.431924882629108, | |
| "grad_norm": 0.15494537353515625, | |
| "learning_rate": 3.826139668270234e-06, | |
| "loss": 0.0043, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.431924882629108, | |
| "eval_loss": 0.042107198387384415, | |
| "eval_runtime": 222.671, | |
| "eval_samples_per_second": 5.317, | |
| "eval_steps_per_second": 5.317, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 4.446948356807511, | |
| "grad_norm": 0.1741316169500351, | |
| "learning_rate": 3.6272711604936504e-06, | |
| "loss": 0.0049, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 4.461971830985916, | |
| "grad_norm": 0.08207106590270996, | |
| "learning_rate": 3.433515175189428e-06, | |
| "loss": 0.0053, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 4.476995305164319, | |
| "grad_norm": 0.09723722189664841, | |
| "learning_rate": 3.2448930736588e-06, | |
| "loss": 0.0077, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 4.492018779342723, | |
| "grad_norm": 0.03407386317849159, | |
| "learning_rate": 3.061425651200117e-06, | |
| "loss": 0.0038, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 4.507042253521127, | |
| "grad_norm": 0.21065545082092285, | |
| "learning_rate": 2.883133134816296e-06, | |
| "loss": 0.0048, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.507042253521127, | |
| "eval_loss": 0.04240112751722336, | |
| "eval_runtime": 222.0883, | |
| "eval_samples_per_second": 5.331, | |
| "eval_steps_per_second": 5.331, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 4.52206572769953, | |
| "grad_norm": 0.06989271938800812, | |
| "learning_rate": 2.7100351809847326e-06, | |
| "loss": 0.0046, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 4.537089201877935, | |
| "grad_norm": 0.07983122766017914, | |
| "learning_rate": 2.542150873490251e-06, | |
| "loss": 0.0028, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 4.552112676056338, | |
| "grad_norm": 0.27960672974586487, | |
| "learning_rate": 2.3794987213211383e-06, | |
| "loss": 0.0055, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 4.567136150234742, | |
| "grad_norm": 0.2628881633281708, | |
| "learning_rate": 2.222096656628547e-06, | |
| "loss": 0.0035, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 4.582159624413146, | |
| "grad_norm": 0.13972419500350952, | |
| "learning_rate": 2.0699620327495174e-06, | |
| "loss": 0.0038, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.582159624413146, | |
| "eval_loss": 0.04280637204647064, | |
| "eval_runtime": 222.2913, | |
| "eval_samples_per_second": 5.326, | |
| "eval_steps_per_second": 5.326, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 4.597183098591549, | |
| "grad_norm": 0.017372630536556244, | |
| "learning_rate": 1.9231116222937996e-06, | |
| "loss": 0.0025, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 4.612206572769953, | |
| "grad_norm": 0.14479027688503265, | |
| "learning_rate": 1.7815616152946523e-06, | |
| "loss": 0.0045, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 4.627230046948357, | |
| "grad_norm": 0.03770596161484718, | |
| "learning_rate": 1.6453276174240195e-06, | |
| "loss": 0.004, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 4.642253521126761, | |
| "grad_norm": 0.19395627081394196, | |
| "learning_rate": 1.5144246482719114e-06, | |
| "loss": 0.0043, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 4.657276995305164, | |
| "grad_norm": 0.05098709091544151, | |
| "learning_rate": 1.3888671396905805e-06, | |
| "loss": 0.0041, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.657276995305164, | |
| "eval_loss": 0.0430205836892128, | |
| "eval_runtime": 222.6399, | |
| "eval_samples_per_second": 5.318, | |
| "eval_steps_per_second": 5.318, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 4.672300469483568, | |
| "grad_norm": 0.3063965439796448, | |
| "learning_rate": 1.2686689342034431e-06, | |
| "loss": 0.0082, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 4.687323943661972, | |
| "grad_norm": 0.21486127376556396, | |
| "learning_rate": 1.1538432834789227e-06, | |
| "loss": 0.0054, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 4.702347417840375, | |
| "grad_norm": 0.3423399329185486, | |
| "learning_rate": 1.044402846869491e-06, | |
| "loss": 0.0049, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 4.71737089201878, | |
| "grad_norm": 0.15926554799079895, | |
| "learning_rate": 9.403596900160073e-07, | |
| "loss": 0.0034, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 4.732394366197183, | |
| "grad_norm": 0.24265912175178528, | |
| "learning_rate": 8.417252835174749e-07, | |
| "loss": 0.0066, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.732394366197183, | |
| "eval_loss": 0.043101657181978226, | |
| "eval_runtime": 223.1112, | |
| "eval_samples_per_second": 5.307, | |
| "eval_steps_per_second": 5.307, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 4.7474178403755865, | |
| "grad_norm": 0.13980786502361298, | |
| "learning_rate": 7.48510501666455e-07, | |
| "loss": 0.0073, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 4.762441314553991, | |
| "grad_norm": 0.14822153747081757, | |
| "learning_rate": 6.607256212501578e-07, | |
| "loss": 0.0067, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 4.777464788732394, | |
| "grad_norm": 0.2598550021648407, | |
| "learning_rate": 5.783803204174654e-07, | |
| "loss": 0.0054, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 4.792488262910798, | |
| "grad_norm": 0.145918071269989, | |
| "learning_rate": 5.014836776119358e-07, | |
| "loss": 0.0051, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 4.807511737089202, | |
| "grad_norm": 0.152046799659729, | |
| "learning_rate": 4.300441705708924e-07, | |
| "loss": 0.003, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.807511737089202, | |
| "eval_loss": 0.043016187846660614, | |
| "eval_runtime": 222.4587, | |
| "eval_samples_per_second": 5.322, | |
| "eval_steps_per_second": 5.322, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 4.822535211267605, | |
| "grad_norm": 0.1859809011220932, | |
| "learning_rate": 3.6406967539078796e-07, | |
| "loss": 0.0042, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 4.83755868544601, | |
| "grad_norm": 0.10558822751045227, | |
| "learning_rate": 3.0356746565887715e-07, | |
| "loss": 0.0056, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 4.852582159624413, | |
| "grad_norm": 0.3504423201084137, | |
| "learning_rate": 2.485442116513026e-07, | |
| "loss": 0.0048, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 4.867605633802817, | |
| "grad_norm": 0.21484680473804474, | |
| "learning_rate": 1.9900597959770507e-07, | |
| "loss": 0.0079, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 4.882629107981221, | |
| "grad_norm": 0.07226985692977905, | |
| "learning_rate": 1.5495823101245866e-07, | |
| "loss": 0.0031, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.882629107981221, | |
| "eval_loss": 0.042939841747283936, | |
| "eval_runtime": 222.1162, | |
| "eval_samples_per_second": 5.331, | |
| "eval_steps_per_second": 5.331, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 4.897652582159624, | |
| "grad_norm": 0.0429568886756897, | |
| "learning_rate": 1.164058220925135e-07, | |
| "loss": 0.0043, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 4.912676056338028, | |
| "grad_norm": 0.3218914568424225, | |
| "learning_rate": 8.335300318201844e-08, | |
| "loss": 0.0074, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 4.927699530516432, | |
| "grad_norm": 0.10779738426208496, | |
| "learning_rate": 5.5803418303745917e-08, | |
| "loss": 0.005, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 4.942723004694836, | |
| "grad_norm": 0.0708744004368782, | |
| "learning_rate": 3.3760104757313284e-08, | |
| "loss": 0.003, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 4.957746478873239, | |
| "grad_norm": 0.19295717775821686, | |
| "learning_rate": 1.7225492784345156e-08, | |
| "loss": 0.0046, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 4.957746478873239, | |
| "eval_loss": 0.042976681143045425, | |
| "eval_runtime": 222.2614, | |
| "eval_samples_per_second": 5.327, | |
| "eval_steps_per_second": 5.327, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 4.972769953051643, | |
| "grad_norm": 0.10675567388534546, | |
| "learning_rate": 6.201405300532148e-09, | |
| "loss": 0.0058, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 4.987793427230047, | |
| "grad_norm": 0.09640572220087051, | |
| "learning_rate": 6.890576946805282e-10, | |
| "loss": 0.0043, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 4.995305164319249, | |
| "step": 3325, | |
| "total_flos": 8.987524405535048e+17, | |
| "train_loss": 0.049058449685125426, | |
| "train_runtime": 45792.1032, | |
| "train_samples_per_second": 1.163, | |
| "train_steps_per_second": 0.073 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 3325, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 5, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.987524405535048e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |