| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.956383822363204, | |
| "global_step": 25000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.00019825535289452815, | |
| "learning_rate": 4.999801744647106e-05, | |
| "loss": 1.8367067575454712, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.09912767644726407, | |
| "learning_rate": 4.900872323552736e-05, | |
| "loss": 1.3704102541019538, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.19825535289452814, | |
| "learning_rate": 4.8017446471054725e-05, | |
| "loss": 1.251982177734375, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.2973830293417922, | |
| "learning_rate": 4.702616970658208e-05, | |
| "loss": 1.198458984375, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.3965107057890563, | |
| "learning_rate": 4.6034892942109434e-05, | |
| "loss": 1.162800048828125, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.4956383822363204, | |
| "learning_rate": 4.50436161776368e-05, | |
| "loss": 1.13400244140625, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.5947660586835845, | |
| "learning_rate": 4.405233941316416e-05, | |
| "loss": 1.11087890625, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.6938937351308485, | |
| "learning_rate": 4.306106264869152e-05, | |
| "loss": 1.081130615234375, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.7930214115781126, | |
| "learning_rate": 4.206978588421887e-05, | |
| "loss": 1.06674853515625, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.8921490880253767, | |
| "learning_rate": 4.1078509119746235e-05, | |
| "loss": 1.0396920166015624, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.9912767644726408, | |
| "learning_rate": 4.0087232355273596e-05, | |
| "loss": 1.04483740234375, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 1.019631266593933, | |
| "eval_runtime": 100.3363, | |
| "eval_samples_per_second": 48.337, | |
| "step": 5044 | |
| }, | |
| { | |
| "epoch": 1.0904044409199047, | |
| "learning_rate": 3.909595559080095e-05, | |
| "loss": 0.9977613525390625, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 1.189532117367169, | |
| "learning_rate": 3.810467882632831e-05, | |
| "loss": 1.0038666381835937, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 1.2886597938144329, | |
| "learning_rate": 3.7113402061855674e-05, | |
| "loss": 0.9831958618164063, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 1.387787470261697, | |
| "learning_rate": 3.612212529738303e-05, | |
| "loss": 0.9712546997070313, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 1.4869151467089612, | |
| "learning_rate": 3.513084853291039e-05, | |
| "loss": 0.9686679077148438, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 1.5860428231562254, | |
| "learning_rate": 3.413957176843775e-05, | |
| "loss": 0.9620776977539063, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 1.6851704996034893, | |
| "learning_rate": 3.3148295003965106e-05, | |
| "loss": 0.9629462280273438, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 1.7842981760507532, | |
| "learning_rate": 3.215701823949247e-05, | |
| "loss": 0.9611924438476562, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 1.8834258524980174, | |
| "learning_rate": 3.116574147501983e-05, | |
| "loss": 0.9391554565429687, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 1.9825535289452816, | |
| "learning_rate": 3.0174464710547183e-05, | |
| "loss": 0.914335693359375, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.9528790712356567, | |
| "eval_runtime": 97.3788, | |
| "eval_samples_per_second": 49.806, | |
| "step": 10088 | |
| }, | |
| { | |
| "epoch": 2.0816812053925458, | |
| "learning_rate": 2.9183187946074548e-05, | |
| "loss": 0.9097555541992187, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 2.1808088818398095, | |
| "learning_rate": 2.8191911181601906e-05, | |
| "loss": 0.9029483642578126, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 2.2799365582870736, | |
| "learning_rate": 2.720063441712926e-05, | |
| "loss": 0.915561279296875, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 2.379064234734338, | |
| "learning_rate": 2.6209357652656625e-05, | |
| "loss": 0.8945721435546875, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 2.478191911181602, | |
| "learning_rate": 2.521808088818398e-05, | |
| "loss": 0.8839064331054688, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 2.5773195876288657, | |
| "learning_rate": 2.422680412371134e-05, | |
| "loss": 0.8842523193359375, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 2.6764472640761303, | |
| "learning_rate": 2.3235527359238703e-05, | |
| "loss": 0.9003340454101563, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 2.775574940523394, | |
| "learning_rate": 2.2244250594766057e-05, | |
| "loss": 0.9045031127929688, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 2.874702616970658, | |
| "learning_rate": 2.125297383029342e-05, | |
| "loss": 0.8826250610351563, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 2.9738302934179224, | |
| "learning_rate": 2.0261697065820777e-05, | |
| "loss": 0.8844521484375, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 0.9211767911911011, | |
| "eval_runtime": 97.3999, | |
| "eval_samples_per_second": 49.795, | |
| "step": 15132 | |
| }, | |
| { | |
| "epoch": 3.0729579698651865, | |
| "learning_rate": 1.9270420301348138e-05, | |
| "loss": 0.86255712890625, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 3.1720856463124503, | |
| "learning_rate": 1.8279143536875496e-05, | |
| "loss": 0.8561475219726562, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 3.2712133227597144, | |
| "learning_rate": 1.7287866772402854e-05, | |
| "loss": 0.8540064086914062, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 3.3703409992069786, | |
| "learning_rate": 1.6296590007930216e-05, | |
| "loss": 0.8526674194335937, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 3.4694686756542428, | |
| "learning_rate": 1.5305313243457574e-05, | |
| "loss": 0.8742741088867187, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 3.5685963521015065, | |
| "learning_rate": 1.4314036478984932e-05, | |
| "loss": 0.8503397827148438, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 3.6677240285487707, | |
| "learning_rate": 1.3322759714512293e-05, | |
| "loss": 0.8602479858398437, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 3.766851704996035, | |
| "learning_rate": 1.2331482950039653e-05, | |
| "loss": 0.8511823120117188, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 3.865979381443299, | |
| "learning_rate": 1.134020618556701e-05, | |
| "loss": 0.8461943969726563, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 3.965107057890563, | |
| "learning_rate": 1.0348929421094369e-05, | |
| "loss": 0.8488275756835938, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.9062384963035583, | |
| "eval_runtime": 102.3438, | |
| "eval_samples_per_second": 47.389, | |
| "step": 20176 | |
| }, | |
| { | |
| "epoch": 4.064234734337827, | |
| "learning_rate": 9.357652656621729e-06, | |
| "loss": 0.8355828247070313, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 4.1633624107850915, | |
| "learning_rate": 8.366375892149088e-06, | |
| "loss": 0.818786865234375, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 4.262490087232355, | |
| "learning_rate": 7.375099127676448e-06, | |
| "loss": 0.8390009765625, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 4.361617763679619, | |
| "learning_rate": 6.383822363203806e-06, | |
| "loss": 0.8276206665039062, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 4.460745440126884, | |
| "learning_rate": 5.3925455987311665e-06, | |
| "loss": 0.8275084228515625, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 4.559873116574147, | |
| "learning_rate": 4.401268834258525e-06, | |
| "loss": 0.8400993041992187, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 4.659000793021412, | |
| "learning_rate": 3.4099920697858843e-06, | |
| "loss": 0.8316345825195313, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 4.758128469468676, | |
| "learning_rate": 2.4187153053132435e-06, | |
| "loss": 0.8403876953125, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 4.857256145915939, | |
| "learning_rate": 1.4274385408406028e-06, | |
| "loss": 0.8376466674804688, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 4.956383822363204, | |
| "learning_rate": 4.3616177636796196e-07, | |
| "loss": 0.8198423461914063, | |
| "step": 25000 | |
| } | |
| ], | |
| "max_steps": 25220, | |
| "num_train_epochs": 5, | |
| "total_flos": 76452759802478592, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |