| { | |
| "best_metric": 0.9984836997725549, | |
| "best_model_checkpoint": "neunit-ks-531/checkpoint-742", | |
| "epoch": 4.986522911051213, | |
| "global_step": 925, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 3.225806451612903e-06, | |
| "loss": 1.09, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 6.451612903225806e-06, | |
| "loss": 1.0284, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 9.67741935483871e-06, | |
| "loss": 0.9034, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 1.2903225806451613e-05, | |
| "loss": 0.714, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 1.6129032258064517e-05, | |
| "loss": 0.5473, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 1.935483870967742e-05, | |
| "loss": 0.4807, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 2.258064516129032e-05, | |
| "loss": 0.3842, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 2.5806451612903226e-05, | |
| "loss": 0.3108, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 2.903225806451613e-05, | |
| "loss": 0.2356, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 2.9747596153846152e-05, | |
| "loss": 0.1893, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 2.938701923076923e-05, | |
| "loss": 0.1384, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 2.9026442307692306e-05, | |
| "loss": 0.1191, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 2.8665865384615385e-05, | |
| "loss": 0.0766, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 2.8305288461538463e-05, | |
| "loss": 0.0824, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 2.794471153846154e-05, | |
| "loss": 0.1141, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 2.7584134615384617e-05, | |
| "loss": 0.0616, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 2.7223557692307696e-05, | |
| "loss": 0.0573, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 2.686298076923077e-05, | |
| "loss": 0.0484, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.9844579226686884, | |
| "eval_loss": 0.056594911962747574, | |
| "eval_runtime": 3.9857, | |
| "eval_samples_per_second": 661.869, | |
| "eval_steps_per_second": 20.825, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 2.6502403846153846e-05, | |
| "loss": 0.0326, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 2.614182692307692e-05, | |
| "loss": 0.0512, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 2.578125e-05, | |
| "loss": 0.0518, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 2.542067307692308e-05, | |
| "loss": 0.0316, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 2.5060096153846154e-05, | |
| "loss": 0.0261, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 2.4699519230769232e-05, | |
| "loss": 0.0229, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 2.4338942307692307e-05, | |
| "loss": 0.0265, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 2.3978365384615386e-05, | |
| "loss": 0.0228, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 2.3617788461538465e-05, | |
| "loss": 0.0185, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 2.3257211538461536e-05, | |
| "loss": 0.0156, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 2.2896634615384615e-05, | |
| "loss": 0.0127, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 2.2536057692307694e-05, | |
| "loss": 0.0226, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 2.217548076923077e-05, | |
| "loss": 0.0138, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 2.1814903846153847e-05, | |
| "loss": 0.0224, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 2.1454326923076923e-05, | |
| "loss": 0.0154, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 2.109375e-05, | |
| "loss": 0.017, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 2.073317307692308e-05, | |
| "loss": 0.0128, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 2.0372596153846155e-05, | |
| "loss": 0.0152, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 2.0012019230769233e-05, | |
| "loss": 0.0086, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.9909021986353298, | |
| "eval_loss": 0.04098781570792198, | |
| "eval_runtime": 3.7805, | |
| "eval_samples_per_second": 697.785, | |
| "eval_steps_per_second": 21.955, | |
| "step": 371 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 1.9651442307692305e-05, | |
| "loss": 0.0131, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 1.9290865384615384e-05, | |
| "loss": 0.0108, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 1.8930288461538462e-05, | |
| "loss": 0.0063, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 1.8569711538461538e-05, | |
| "loss": 0.0103, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 1.8209134615384616e-05, | |
| "loss": 0.0055, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 1.7848557692307695e-05, | |
| "loss": 0.0083, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 1.748798076923077e-05, | |
| "loss": 0.0112, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 1.712740384615385e-05, | |
| "loss": 0.008, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "learning_rate": 1.6766826923076924e-05, | |
| "loss": 0.0065, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 1.640625e-05, | |
| "loss": 0.0082, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "learning_rate": 1.6045673076923078e-05, | |
| "loss": 0.0097, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 1.5685096153846153e-05, | |
| "loss": 0.0073, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 1.532451923076923e-05, | |
| "loss": 0.0156, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 1.4963942307692308e-05, | |
| "loss": 0.0049, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 1.4603365384615385e-05, | |
| "loss": 0.0077, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 1.4242788461538462e-05, | |
| "loss": 0.005, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 1.3882211538461539e-05, | |
| "loss": 0.0045, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "learning_rate": 1.3521634615384616e-05, | |
| "loss": 0.007, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.9901440485216073, | |
| "eval_loss": 0.04609154537320137, | |
| "eval_runtime": 3.7832, | |
| "eval_samples_per_second": 697.296, | |
| "eval_steps_per_second": 21.939, | |
| "step": 556 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "learning_rate": 1.3161057692307693e-05, | |
| "loss": 0.0032, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "learning_rate": 1.280048076923077e-05, | |
| "loss": 0.007, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 3.13, | |
| "learning_rate": 1.2439903846153846e-05, | |
| "loss": 0.0128, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 1.2079326923076923e-05, | |
| "loss": 0.0105, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 1.171875e-05, | |
| "loss": 0.0051, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 1.1358173076923077e-05, | |
| "loss": 0.0133, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 1.0997596153846154e-05, | |
| "loss": 0.0094, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "learning_rate": 1.0637019230769231e-05, | |
| "loss": 0.0061, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "learning_rate": 1.0276442307692308e-05, | |
| "loss": 0.0054, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 9.915865384615385e-06, | |
| "loss": 0.0044, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "learning_rate": 9.555288461538462e-06, | |
| "loss": 0.0032, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 3.61, | |
| "learning_rate": 9.19471153846154e-06, | |
| "loss": 0.0057, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 8.834134615384615e-06, | |
| "loss": 0.0046, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "learning_rate": 8.473557692307692e-06, | |
| "loss": 0.0052, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 3.77, | |
| "learning_rate": 8.112980769230769e-06, | |
| "loss": 0.0043, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 7.752403846153848e-06, | |
| "loss": 0.0066, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 3.88, | |
| "learning_rate": 7.391826923076924e-06, | |
| "loss": 0.0027, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "learning_rate": 7.03125e-06, | |
| "loss": 0.0027, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 6.670673076923077e-06, | |
| "loss": 0.0044, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.9984836997725549, | |
| "eval_loss": 0.00841992162168026, | |
| "eval_runtime": 3.7804, | |
| "eval_samples_per_second": 697.802, | |
| "eval_steps_per_second": 21.955, | |
| "step": 742 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "learning_rate": 6.3100961538461544e-06, | |
| "loss": 0.0023, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 5.9495192307692305e-06, | |
| "loss": 0.0065, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "learning_rate": 5.588942307692308e-06, | |
| "loss": 0.0025, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 4.2, | |
| "learning_rate": 5.228365384615384e-06, | |
| "loss": 0.0026, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 4.26, | |
| "learning_rate": 4.867788461538462e-06, | |
| "loss": 0.0025, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 4.31, | |
| "learning_rate": 4.507211538461538e-06, | |
| "loss": 0.0023, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 4.37, | |
| "learning_rate": 4.146634615384616e-06, | |
| "loss": 0.0022, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 4.42, | |
| "learning_rate": 3.7860576923076922e-06, | |
| "loss": 0.0049, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 4.47, | |
| "learning_rate": 3.425480769230769e-06, | |
| "loss": 0.0125, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "learning_rate": 3.064903846153846e-06, | |
| "loss": 0.0023, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 4.58, | |
| "learning_rate": 2.704326923076923e-06, | |
| "loss": 0.0032, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 4.64, | |
| "learning_rate": 2.3437500000000002e-06, | |
| "loss": 0.0025, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 1.983173076923077e-06, | |
| "loss": 0.0041, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 4.74, | |
| "learning_rate": 1.622596153846154e-06, | |
| "loss": 0.0025, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 4.8, | |
| "learning_rate": 1.2620192307692309e-06, | |
| "loss": 0.0026, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 4.85, | |
| "learning_rate": 9.014423076923078e-07, | |
| "loss": 0.0031, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 4.91, | |
| "learning_rate": 5.408653846153846e-07, | |
| "loss": 0.0026, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "learning_rate": 1.8028846153846156e-07, | |
| "loss": 0.0032, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "eval_accuracy": 0.9984836997725549, | |
| "eval_loss": 0.008393567055463791, | |
| "eval_runtime": 3.7999, | |
| "eval_samples_per_second": 694.227, | |
| "eval_steps_per_second": 21.843, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "step": 925, | |
| "total_flos": 2.0985718924353213e+18, | |
| "train_loss": 0.07950978533641712, | |
| "train_runtime": 480.1024, | |
| "train_samples_per_second": 247.218, | |
| "train_steps_per_second": 1.927 | |
| } | |
| ], | |
| "max_steps": 925, | |
| "num_train_epochs": 5, | |
| "total_flos": 2.0985718924353213e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |