| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 10.0, | |
| "global_step": 958, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 0.0004998655874501178, | |
| "loss": 0.1139, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0004994624943343398, | |
| "loss": 0.0546, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 0.0004987911540988544, | |
| "loss": 0.0536, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.0004978522886360845, | |
| "loss": 0.0525, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0004966469075084369, | |
| "loss": 0.0528, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 0.0004951763068627193, | |
| "loss": 0.0515, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.0004934420680363928, | |
| "loss": 0.0498, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 0.0004914460558571595, | |
| "loss": 0.05, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.0004891904166377121, | |
| "loss": 0.0548, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.0004866775758678035, | |
| "loss": 0.0493, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00048391023560611637, | |
| "loss": 0.0471, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00048089137157473837, | |
| "loss": 0.0507, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00047762422995936715, | |
| "loss": 0.0522, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00047411232391868566, | |
| "loss": 0.0546, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.0004703594298066606, | |
| "loss": 0.0505, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.0004663695831118284, | |
| "loss": 0.0535, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00046214707411793227, | |
| "loss": 0.0499, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.0004576964432905783, | |
| "loss": 0.0483, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00045302247639487117, | |
| "loss": 0.0496, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00044813019934927903, | |
| "loss": 0.0464, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.0004430248728212613, | |
| "loss": 0.0556, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00043771198657047085, | |
| "loss": 0.047, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.0004321972535456134, | |
| "loss": 0.0508, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 0.0004264866037413112, | |
| "loss": 0.0518, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.0004205861778215777, | |
| "loss": 0.0504, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 0.000414502320516759, | |
| "loss": 0.0476, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00040824157380104276, | |
| "loss": 0.0491, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 0.00040181066985787084, | |
| "loss": 0.0508, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 0.00039521652384081956, | |
| "loss": 0.0494, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.0003884662264377326, | |
| "loss": 0.0479, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 0.0003815670362461011, | |
| "loss": 0.0522, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.00037452637196789146, | |
| "loss": 0.0508, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 0.00036735180443221157, | |
| "loss": 0.0495, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.0003600510484543956, | |
| "loss": 0.0525, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 0.00035263195454026, | |
| "loss": 0.0512, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.0003451025004444512, | |
| "loss": 0.0523, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.0003374707825919631, | |
| "loss": 0.047, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 0.0003297450073720481, | |
| "loss": 0.0539, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 0.000321933482313883, | |
| "loss": 0.0531, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 0.0003140446071534803, | |
| "loss": 0.0499, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.0003060868648014479, | |
| "loss": 0.0531, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 0.00029806881222131274, | |
| "loss": 0.053, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.00028999907122821374, | |
| "loss": 0.0479, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.00028188631921786104, | |
| "loss": 0.0525, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 0.00027373927983572903, | |
| "loss": 0.0526, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.0002655667135965163, | |
| "loss": 0.0483, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 0.0002573774084639612, | |
| "loss": 0.052, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.00024918017040114084, | |
| "loss": 0.0481, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 0.00024098381390141524, | |
| "loss": 0.0462, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.00023279715251019938, | |
| "loss": 0.0437, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 0.00022462898934775432, | |
| "loss": 0.05, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 0.0002164881076431881, | |
| "loss": 0.0466, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 0.00020838326128984626, | |
| "loss": 0.0479, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 0.00020032316543224692, | |
| "loss": 0.0509, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 0.00019231648709468226, | |
| "loss": 0.0469, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 0.00018437183586156365, | |
| "loss": 0.0472, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00017649775461953248, | |
| "loss": 0.0495, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 0.00016870271037129053, | |
| "loss": 0.0464, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.0001609950851310287, | |
| "loss": 0.0494, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 0.0001533831669112435, | |
| "loss": 0.0487, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 0.00014587514081063424, | |
| "loss": 0.0471, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.0001384790802126631, | |
| "loss": 0.0512, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 0.00013120293810424256, | |
| "loss": 0.0445, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 0.0001240545385238853, | |
| "loss": 0.0447, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 0.00011704156814851283, | |
| "loss": 0.0481, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 0.00011017156802796823, | |
| "loss": 0.0474, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 0.00010345192547612278, | |
| "loss": 0.0434, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 9.688986612729387e-05, | |
| "loss": 0.0464, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 9.049244616651797e-05, | |
| "loss": 0.0472, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 8.426654474203194e-05, | |
| "loss": 0.046, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 7.821885656812198e-05, | |
| "loss": 0.0465, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 7.235588472629473e-05, | |
| "loss": 0.0434, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 6.668393367251147e-05, | |
| "loss": 0.0465, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 6.120910245800385e-05, | |
| "loss": 0.0457, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 5.593727817096153e-05, | |
| "loss": 0.0498, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 5.0874129606144015e-05, | |
| "loss": 0.0453, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 4.6025101169223675e-05, | |
| "loss": 0.0481, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 4.139540702241373e-05, | |
| "loss": 0.0467, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 3.6990025477677825e-05, | |
| "loss": 0.0439, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 3.281369364354897e-05, | |
| "loss": 0.0428, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 2.8870902331314925e-05, | |
| "loss": 0.0464, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 2.516589122604718e-05, | |
| "loss": 0.0509, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 2.1702644327665803e-05, | |
| "loss": 0.0477, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 1.8484885666942618e-05, | |
| "loss": 0.05, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 1.551607530104951e-05, | |
| "loss": 0.046, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 1.2799405592957697e-05, | |
| "loss": 0.0477, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 1.0337797778688457e-05, | |
| "loss": 0.0479, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 8.133898826106685e-06, | |
| "loss": 0.0502, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 6.190078588635684e-06, | |
| "loss": 0.0495, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 4.508427256952413e-06, | |
| "loss": 0.0496, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 3.0907531114049427e-06, | |
| "loss": 0.0503, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 1.9385805775677112e-06, | |
| "loss": 0.0505, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 1.0531485870261537e-06, | |
| "loss": 0.0442, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 4.3540924515300673e-07, | |
| "loss": 0.0456, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 8.602680730912526e-08, | |
| "loss": 0.0447, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 958, | |
| "total_flos": 2.3298774185688433e+18, | |
| "train_loss": 0.04974690033025682, | |
| "train_runtime": 16786.4105, | |
| "train_samples_per_second": 3.651, | |
| "train_steps_per_second": 0.057 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 958, | |
| "num_train_epochs": 2, | |
| "save_steps": 150, | |
| "total_flos": 2.3298774185688433e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |