| { | |
| "best_metric": 1.8623046875, | |
| "best_model_checkpoint": "bert_12_layer_model_v2_complete_training/checkpoint-450000", | |
| "epoch": 5.0, | |
| "global_step": 457720, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 3.3715366784496464e-05, | |
| "loss": 7.3168, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 3.748913176609214e-05, | |
| "loss": 6.6227, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 3.9686644919105525e-05, | |
| "loss": 6.5293, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.12520067118919e-05, | |
| "loss": 6.4621, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.246120561428155e-05, | |
| "loss": 6.423, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.345314745008792e-05, | |
| "loss": 6.3848, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.428842793791915e-05, | |
| "loss": 6.3631, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.501488165769166e-05, | |
| "loss": 6.3323, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.5653079262541536e-05, | |
| "loss": 6.3111, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.622625682029237e-05, | |
| "loss": 6.3021, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.6742676283733686e-05, | |
| "loss": 6.277, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.7216022395887685e-05, | |
| "loss": 6.2674, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.7649711804211924e-05, | |
| "loss": 6.2475, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.805285726627034e-05, | |
| "loss": 6.2337, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.842667223534965e-05, | |
| "loss": 6.2266, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.8777756603491424e-05, | |
| "loss": 6.2155, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.910622835154613e-05, | |
| "loss": 6.2127, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.941716313408369e-05, | |
| "loss": 6.2033, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.971010421672582e-05, | |
| "loss": 6.1884, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.9989131766092136e-05, | |
| "loss": 6.1798, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "eval_accuracy": 0.14852960767779255, | |
| "eval_loss": 6.171875, | |
| "eval_runtime": 1078.8913, | |
| "eval_samples_per_second": 285.808, | |
| "eval_steps_per_second": 4.467, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.994673009916912e-05, | |
| "loss": 6.1762, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.9890891628696506e-05, | |
| "loss": 6.1649, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.983527651210578e-05, | |
| "loss": 6.1515, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.9779438041633164e-05, | |
| "loss": 6.1443, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.972382292504244e-05, | |
| "loss": 6.142, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.966798445456982e-05, | |
| "loss": 6.1324, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.96123693379791e-05, | |
| "loss": 6.1284, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.955653086750648e-05, | |
| "loss": 6.1214, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 4.9500915750915755e-05, | |
| "loss": 6.1157, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 4.944507728044314e-05, | |
| "loss": 6.1087, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 4.938946216385241e-05, | |
| "loss": 6.0982, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 4.9333623693379796e-05, | |
| "loss": 6.0918, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 4.9278008576789064e-05, | |
| "loss": 6.0853, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 4.9222170106316454e-05, | |
| "loss": 6.0825, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 4.916655498972572e-05, | |
| "loss": 6.075, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 4.9110716519253105e-05, | |
| "loss": 6.0639, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 4.905510140266238e-05, | |
| "loss": 6.0632, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 4.8999262932189763e-05, | |
| "loss": 6.0566, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 4.894364781559904e-05, | |
| "loss": 6.0534, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 4.888780934512642e-05, | |
| "loss": 6.0527, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "eval_accuracy": 0.1501731560033556, | |
| "eval_loss": 6.046875, | |
| "eval_runtime": 1109.6875, | |
| "eval_samples_per_second": 277.876, | |
| "eval_steps_per_second": 4.343, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 4.8832194228535696e-05, | |
| "loss": 6.0424, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 4.877635575806307e-05, | |
| "loss": 6.0454, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 4.8720740641472354e-05, | |
| "loss": 6.038, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 4.866490217099973e-05, | |
| "loss": 6.0292, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.8609287054409006e-05, | |
| "loss": 6.0285, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.855344858393639e-05, | |
| "loss": 6.0209, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.8497833467345664e-05, | |
| "loss": 6.0183, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.844199499687305e-05, | |
| "loss": 6.0132, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 4.838637988028232e-05, | |
| "loss": 6.0076, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 4.8330541409809705e-05, | |
| "loss": 5.9644, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 4.827492629321898e-05, | |
| "loss": 5.8913, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 4.821908782274636e-05, | |
| "loss": 5.8423, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 4.816347270615564e-05, | |
| "loss": 5.8085, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 4.810763423568302e-05, | |
| "loss": 5.7797, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 4.805201911909229e-05, | |
| "loss": 5.7515, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 4.799618064861968e-05, | |
| "loss": 5.7142, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 4.794056553202895e-05, | |
| "loss": 5.6929, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 4.788472706155634e-05, | |
| "loss": 5.6603, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 4.7829111944965605e-05, | |
| "loss": 5.6395, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 4.777327347449299e-05, | |
| "loss": 5.6176, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "eval_accuracy": 0.1772248767905883, | |
| "eval_loss": 5.5703125, | |
| "eval_runtime": 1165.3244, | |
| "eval_samples_per_second": 264.61, | |
| "eval_steps_per_second": 4.135, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 4.771765835790226e-05, | |
| "loss": 5.5758, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 4.7661819887429646e-05, | |
| "loss": 5.4849, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 4.760620477083892e-05, | |
| "loss": 5.348, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 4.75503663003663e-05, | |
| "loss": 5.2098, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 4.749475118377558e-05, | |
| "loss": 5.1005, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 4.7438912713302956e-05, | |
| "loss": 5.0023, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 4.738329759671224e-05, | |
| "loss": 4.9191, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 4.7327459126239614e-05, | |
| "loss": 4.8237, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 4.727184400964889e-05, | |
| "loss": 4.7396, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 4.721600553917627e-05, | |
| "loss": 4.6683, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 4.7160390422585546e-05, | |
| "loss": 4.5571, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 4.710455195211293e-05, | |
| "loss": 4.419, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 4.7048936835522204e-05, | |
| "loss": 4.2976, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 4.699309836504959e-05, | |
| "loss": 4.2061, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 4.6937483248458856e-05, | |
| "loss": 4.1266, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 4.6881644777986246e-05, | |
| "loss": 4.0604, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 4.6826029661395514e-05, | |
| "loss": 4.0129, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 4.6770191190922904e-05, | |
| "loss": 3.96, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 4.671457607433217e-05, | |
| "loss": 3.9183, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 4.665873760385956e-05, | |
| "loss": 3.8786, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "eval_accuracy": 0.3851306885682685, | |
| "eval_loss": 3.744140625, | |
| "eval_runtime": 1141.9652, | |
| "eval_samples_per_second": 270.022, | |
| "eval_steps_per_second": 4.22, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 4.660312248726883e-05, | |
| "loss": 3.8396, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 4.654728401679622e-05, | |
| "loss": 3.8043, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 4.649166890020549e-05, | |
| "loss": 3.7731, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 4.643583042973287e-05, | |
| "loss": 3.7382, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 4.6380215313142146e-05, | |
| "loss": 3.7107, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 4.632437684266953e-05, | |
| "loss": 3.6783, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 4.6268761726078804e-05, | |
| "loss": 3.6592, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 4.621292325560618e-05, | |
| "loss": 3.6362, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 4.615730813901546e-05, | |
| "loss": 3.6159, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 4.610146966854284e-05, | |
| "loss": 3.5905, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 4.604585455195212e-05, | |
| "loss": 3.5683, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 4.5990016081479496e-05, | |
| "loss": 3.5432, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 4.593440096488877e-05, | |
| "loss": 3.527, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 4.5878562494416154e-05, | |
| "loss": 3.5073, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 4.582294737782543e-05, | |
| "loss": 3.4948, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 4.576710890735281e-05, | |
| "loss": 3.4776, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 4.571149379076208e-05, | |
| "loss": 3.4541, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 4.565565532028947e-05, | |
| "loss": 3.443, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 4.560004020369874e-05, | |
| "loss": 3.4296, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 4.554420173322613e-05, | |
| "loss": 3.4104, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_accuracy": 0.4326803997069381, | |
| "eval_loss": 3.310546875, | |
| "eval_runtime": 1153.5787, | |
| "eval_samples_per_second": 267.304, | |
| "eval_steps_per_second": 4.177, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 4.5488586616635397e-05, | |
| "loss": 3.3905, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 4.5432748146162787e-05, | |
| "loss": 3.3848, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 4.5377133029572055e-05, | |
| "loss": 3.3768, | |
| "step": 51500 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 4.5321294559099445e-05, | |
| "loss": 3.3554, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 4.526567944250871e-05, | |
| "loss": 3.3403, | |
| "step": 52500 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 4.5209840972036096e-05, | |
| "loss": 3.3207, | |
| "step": 53000 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 4.515422585544537e-05, | |
| "loss": 3.3162, | |
| "step": 53500 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 4.5098387384972754e-05, | |
| "loss": 3.2997, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 4.504277226838203e-05, | |
| "loss": 3.2948, | |
| "step": 54500 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 4.498693379790941e-05, | |
| "loss": 3.2763, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 4.493131868131869e-05, | |
| "loss": 3.2682, | |
| "step": 55500 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 4.487548021084606e-05, | |
| "loss": 3.2571, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 4.4819865094255345e-05, | |
| "loss": 3.243, | |
| "step": 56500 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 4.476402662378272e-05, | |
| "loss": 3.2327, | |
| "step": 57000 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 4.4708411507192e-05, | |
| "loss": 3.229, | |
| "step": 57500 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 4.465257303671938e-05, | |
| "loss": 3.2023, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 4.4596957920128654e-05, | |
| "loss": 3.2021, | |
| "step": 58500 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 4.454111944965604e-05, | |
| "loss": 3.1926, | |
| "step": 59000 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 4.448550433306531e-05, | |
| "loss": 3.1862, | |
| "step": 59500 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 4.4429665862592695e-05, | |
| "loss": 3.1802, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "eval_accuracy": 0.4600636447202696, | |
| "eval_loss": 3.078125, | |
| "eval_runtime": 1125.3252, | |
| "eval_samples_per_second": 274.015, | |
| "eval_steps_per_second": 4.282, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 4.437405074600196e-05, | |
| "loss": 3.164, | |
| "step": 60500 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 4.431821227552935e-05, | |
| "loss": 3.1534, | |
| "step": 61000 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 4.426259715893862e-05, | |
| "loss": 3.1509, | |
| "step": 61500 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 4.420675868846601e-05, | |
| "loss": 3.1336, | |
| "step": 62000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 4.415114357187528e-05, | |
| "loss": 3.1277, | |
| "step": 62500 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 4.409530510140266e-05, | |
| "loss": 3.1124, | |
| "step": 63000 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 4.403968998481194e-05, | |
| "loss": 3.1036, | |
| "step": 63500 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 4.398385151433932e-05, | |
| "loss": 3.1021, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 4.3928236397748595e-05, | |
| "loss": 3.0982, | |
| "step": 64500 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 4.387239792727598e-05, | |
| "loss": 3.0872, | |
| "step": 65000 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 4.3816782810685253e-05, | |
| "loss": 3.0762, | |
| "step": 65500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 4.376094434021264e-05, | |
| "loss": 3.0731, | |
| "step": 66000 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 4.370532922362191e-05, | |
| "loss": 3.0649, | |
| "step": 66500 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 4.3649490753149295e-05, | |
| "loss": 3.0516, | |
| "step": 67000 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 4.359387563655857e-05, | |
| "loss": 3.0461, | |
| "step": 67500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 4.3538037166085946e-05, | |
| "loss": 3.0406, | |
| "step": 68000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 4.348242204949523e-05, | |
| "loss": 3.0346, | |
| "step": 68500 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 4.3426583579022604e-05, | |
| "loss": 3.0305, | |
| "step": 69000 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 4.337096846243188e-05, | |
| "loss": 3.0183, | |
| "step": 69500 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 4.331512999195926e-05, | |
| "loss": 3.0115, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "eval_accuracy": 0.4804065862365683, | |
| "eval_loss": 2.9140625, | |
| "eval_runtime": 1148.0843, | |
| "eval_samples_per_second": 268.583, | |
| "eval_steps_per_second": 4.197, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 4.325951487536854e-05, | |
| "loss": 3.0062, | |
| "step": 70500 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 4.320367640489592e-05, | |
| "loss": 2.9965, | |
| "step": 71000 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 4.3148061288305195e-05, | |
| "loss": 2.996, | |
| "step": 71500 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 4.309222281783258e-05, | |
| "loss": 2.9863, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 4.3036607701241846e-05, | |
| "loss": 2.9806, | |
| "step": 72500 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 4.2980769230769236e-05, | |
| "loss": 2.9721, | |
| "step": 73000 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 4.2925154114178504e-05, | |
| "loss": 2.9673, | |
| "step": 73500 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 4.286931564370589e-05, | |
| "loss": 2.9626, | |
| "step": 74000 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 4.281370052711516e-05, | |
| "loss": 2.9566, | |
| "step": 74500 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 4.2757862056642545e-05, | |
| "loss": 2.9434, | |
| "step": 75000 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 4.270224694005182e-05, | |
| "loss": 2.9452, | |
| "step": 75500 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 4.2646408469579203e-05, | |
| "loss": 2.9393, | |
| "step": 76000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 4.259079335298848e-05, | |
| "loss": 2.9264, | |
| "step": 76500 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 4.253495488251586e-05, | |
| "loss": 2.9295, | |
| "step": 77000 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 4.2479339765925136e-05, | |
| "loss": 2.9244, | |
| "step": 77500 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 4.242350129545252e-05, | |
| "loss": 2.9103, | |
| "step": 78000 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 4.2367886178861794e-05, | |
| "loss": 2.909, | |
| "step": 78500 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 4.231204770838918e-05, | |
| "loss": 2.9023, | |
| "step": 79000 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 4.2256432591798446e-05, | |
| "loss": 2.897, | |
| "step": 79500 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 4.220059412132583e-05, | |
| "loss": 2.8893, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "eval_accuracy": 0.49558027459098025, | |
| "eval_loss": 2.79296875, | |
| "eval_runtime": 1161.4766, | |
| "eval_samples_per_second": 265.486, | |
| "eval_steps_per_second": 4.149, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 4.2144979004735104e-05, | |
| "loss": 2.8842, | |
| "step": 80500 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 4.208914053426249e-05, | |
| "loss": 2.8806, | |
| "step": 81000 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 4.203352541767176e-05, | |
| "loss": 2.878, | |
| "step": 81500 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 4.1977686947199145e-05, | |
| "loss": 2.866, | |
| "step": 82000 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 4.192207183060842e-05, | |
| "loss": 2.868, | |
| "step": 82500 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 4.18662333601358e-05, | |
| "loss": 2.8598, | |
| "step": 83000 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 4.181061824354508e-05, | |
| "loss": 2.8617, | |
| "step": 83500 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 4.1754779773072454e-05, | |
| "loss": 2.8433, | |
| "step": 84000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 4.169916465648173e-05, | |
| "loss": 2.8513, | |
| "step": 84500 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 4.164332618600911e-05, | |
| "loss": 2.8363, | |
| "step": 85000 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 4.158771106941839e-05, | |
| "loss": 2.8388, | |
| "step": 85500 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 4.153187259894577e-05, | |
| "loss": 2.8346, | |
| "step": 86000 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 4.1476257482355045e-05, | |
| "loss": 2.8296, | |
| "step": 86500 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 4.142041901188243e-05, | |
| "loss": 2.8238, | |
| "step": 87000 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 4.13648038952917e-05, | |
| "loss": 2.8172, | |
| "step": 87500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 4.1308965424819086e-05, | |
| "loss": 2.8156, | |
| "step": 88000 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 4.125335030822836e-05, | |
| "loss": 2.8063, | |
| "step": 88500 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 4.1197511837755744e-05, | |
| "loss": 2.8078, | |
| "step": 89000 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 4.114189672116502e-05, | |
| "loss": 2.7965, | |
| "step": 89500 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 4.10860582506924e-05, | |
| "loss": 2.7983, | |
| "step": 90000 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "eval_accuracy": 0.5080531872849392, | |
| "eval_loss": 2.697265625, | |
| "eval_runtime": 1166.2832, | |
| "eval_samples_per_second": 264.392, | |
| "eval_steps_per_second": 4.132, | |
| "step": 90000 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 4.103044313410167e-05, | |
| "loss": 2.7939, | |
| "step": 90500 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 4.097460466362906e-05, | |
| "loss": 2.785, | |
| "step": 91000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 4.091898954703833e-05, | |
| "loss": 2.7767, | |
| "step": 91500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 4.086315107656571e-05, | |
| "loss": 2.7715, | |
| "step": 92000 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 4.0807535959974986e-05, | |
| "loss": 2.7693, | |
| "step": 92500 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 4.075169748950237e-05, | |
| "loss": 2.756, | |
| "step": 93000 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 4.0696082372911645e-05, | |
| "loss": 2.7613, | |
| "step": 93500 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 4.064024390243902e-05, | |
| "loss": 2.7611, | |
| "step": 94000 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 4.05846287858483e-05, | |
| "loss": 2.7548, | |
| "step": 94500 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 4.052879031537568e-05, | |
| "loss": 2.7462, | |
| "step": 95000 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 4.047317519878496e-05, | |
| "loss": 2.7428, | |
| "step": 95500 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 4.041733672831234e-05, | |
| "loss": 2.7396, | |
| "step": 96000 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 4.036172161172161e-05, | |
| "loss": 2.7386, | |
| "step": 96500 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 4.0305883141248995e-05, | |
| "loss": 2.7342, | |
| "step": 97000 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 4.025026802465827e-05, | |
| "loss": 2.7322, | |
| "step": 97500 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 4.019442955418565e-05, | |
| "loss": 2.7236, | |
| "step": 98000 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 4.013881443759493e-05, | |
| "loss": 2.7217, | |
| "step": 98500 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 4.008297596712231e-05, | |
| "loss": 2.709, | |
| "step": 99000 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 4.0027360850531586e-05, | |
| "loss": 2.7157, | |
| "step": 99500 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 3.997152238005897e-05, | |
| "loss": 2.7039, | |
| "step": 100000 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "eval_accuracy": 0.5214649672921134, | |
| "eval_loss": 2.6015625, | |
| "eval_runtime": 1419.9277, | |
| "eval_samples_per_second": 217.163, | |
| "eval_steps_per_second": 3.394, | |
| "step": 100000 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 3.991590726346824e-05, | |
| "loss": 2.7006, | |
| "step": 100500 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 3.986006879299563e-05, | |
| "loss": 2.6918, | |
| "step": 101000 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 3.9804453676404895e-05, | |
| "loss": 2.6905, | |
| "step": 101500 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 3.9748615205932285e-05, | |
| "loss": 2.6767, | |
| "step": 102000 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 3.969300008934155e-05, | |
| "loss": 2.6684, | |
| "step": 102500 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 3.963716161886894e-05, | |
| "loss": 2.6593, | |
| "step": 103000 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 3.958154650227821e-05, | |
| "loss": 2.6453, | |
| "step": 103500 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 3.9525708031805594e-05, | |
| "loss": 2.6396, | |
| "step": 104000 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 3.947009291521487e-05, | |
| "loss": 2.6349, | |
| "step": 104500 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 3.9414254444742246e-05, | |
| "loss": 2.6305, | |
| "step": 105000 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 3.935863932815153e-05, | |
| "loss": 2.6234, | |
| "step": 105500 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 3.9302800857678904e-05, | |
| "loss": 2.6105, | |
| "step": 106000 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 3.9247185741088185e-05, | |
| "loss": 2.6056, | |
| "step": 106500 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 3.919134727061556e-05, | |
| "loss": 2.6018, | |
| "step": 107000 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 3.9135732154024843e-05, | |
| "loss": 2.5896, | |
| "step": 107500 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 3.907989368355222e-05, | |
| "loss": 2.5921, | |
| "step": 108000 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 3.9024278566961495e-05, | |
| "loss": 2.5853, | |
| "step": 108500 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 3.896844009648888e-05, | |
| "loss": 2.5732, | |
| "step": 109000 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 3.891282497989815e-05, | |
| "loss": 2.5678, | |
| "step": 109500 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 3.8856986509425536e-05, | |
| "loss": 2.5658, | |
| "step": 110000 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "eval_accuracy": 0.5448239716997432, | |
| "eval_loss": 2.455078125, | |
| "eval_runtime": 1156.6399, | |
| "eval_samples_per_second": 266.596, | |
| "eval_steps_per_second": 4.166, | |
| "step": 110000 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 3.8801371392834804e-05, | |
| "loss": 2.5615, | |
| "step": 110500 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 3.8745532922362194e-05, | |
| "loss": 2.5516, | |
| "step": 111000 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 3.868991780577146e-05, | |
| "loss": 2.5538, | |
| "step": 111500 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 3.863407933529885e-05, | |
| "loss": 2.5485, | |
| "step": 112000 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 3.857846421870812e-05, | |
| "loss": 2.5425, | |
| "step": 112500 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 3.852262574823551e-05, | |
| "loss": 2.5413, | |
| "step": 113000 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 3.846701063164478e-05, | |
| "loss": 2.533, | |
| "step": 113500 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 3.841117216117217e-05, | |
| "loss": 2.5268, | |
| "step": 114000 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 3.8355557044581436e-05, | |
| "loss": 2.5288, | |
| "step": 114500 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 3.829971857410882e-05, | |
| "loss": 2.5193, | |
| "step": 115000 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 3.8244103457518094e-05, | |
| "loss": 2.5148, | |
| "step": 115500 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 3.818826498704548e-05, | |
| "loss": 2.5149, | |
| "step": 116000 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 3.813264987045475e-05, | |
| "loss": 2.5089, | |
| "step": 116500 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 3.807681139998213e-05, | |
| "loss": 2.5051, | |
| "step": 117000 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 3.802119628339141e-05, | |
| "loss": 2.4994, | |
| "step": 117500 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 3.7965357812918787e-05, | |
| "loss": 2.4975, | |
| "step": 118000 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 3.790974269632807e-05, | |
| "loss": 2.4973, | |
| "step": 118500 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 3.7853904225855445e-05, | |
| "loss": 2.49, | |
| "step": 119000 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 3.7798289109264726e-05, | |
| "loss": 2.4893, | |
| "step": 119500 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 3.77424506387921e-05, | |
| "loss": 2.4846, | |
| "step": 120000 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "eval_accuracy": 0.5575897132745044, | |
| "eval_loss": 2.373046875, | |
| "eval_runtime": 1158.688, | |
| "eval_samples_per_second": 266.125, | |
| "eval_steps_per_second": 4.159, | |
| "step": 120000 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 3.768683552220138e-05, | |
| "loss": 2.4811, | |
| "step": 120500 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 3.763099705172876e-05, | |
| "loss": 2.4738, | |
| "step": 121000 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 3.757538193513803e-05, | |
| "loss": 2.4697, | |
| "step": 121500 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 3.751954346466542e-05, | |
| "loss": 2.466, | |
| "step": 122000 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 3.746392834807469e-05, | |
| "loss": 2.4581, | |
| "step": 122500 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 3.740808987760208e-05, | |
| "loss": 2.4666, | |
| "step": 123000 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 3.7352474761011345e-05, | |
| "loss": 2.4585, | |
| "step": 123500 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 3.7296636290538735e-05, | |
| "loss": 2.4605, | |
| "step": 124000 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 3.7241021173948e-05, | |
| "loss": 2.4512, | |
| "step": 124500 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 3.718518270347539e-05, | |
| "loss": 2.4536, | |
| "step": 125000 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 3.712956758688466e-05, | |
| "loss": 2.4465, | |
| "step": 125500 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 3.7073729116412044e-05, | |
| "loss": 2.4468, | |
| "step": 126000 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 3.701811399982132e-05, | |
| "loss": 2.4401, | |
| "step": 126500 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 3.69622755293487e-05, | |
| "loss": 2.4435, | |
| "step": 127000 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 3.690666041275798e-05, | |
| "loss": 2.4338, | |
| "step": 127500 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 3.685082194228536e-05, | |
| "loss": 2.431, | |
| "step": 128000 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 3.6795206825694635e-05, | |
| "loss": 2.4314, | |
| "step": 128500 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 3.673936835522201e-05, | |
| "loss": 2.4304, | |
| "step": 129000 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 3.668375323863129e-05, | |
| "loss": 2.4251, | |
| "step": 129500 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 3.662791476815867e-05, | |
| "loss": 2.4284, | |
| "step": 130000 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "eval_accuracy": 0.5663108295591563, | |
| "eval_loss": 2.31640625, | |
| "eval_runtime": 1123.8513, | |
| "eval_samples_per_second": 274.374, | |
| "eval_steps_per_second": 4.288, | |
| "step": 130000 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 3.657229965156795e-05, | |
| "loss": 2.4186, | |
| "step": 130500 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 3.651646118109533e-05, | |
| "loss": 2.4175, | |
| "step": 131000 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 3.64608460645046e-05, | |
| "loss": 2.419, | |
| "step": 131500 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 3.6405007594031985e-05, | |
| "loss": 2.4102, | |
| "step": 132000 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 3.634939247744126e-05, | |
| "loss": 2.4141, | |
| "step": 132500 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 3.6293554006968643e-05, | |
| "loss": 2.4113, | |
| "step": 133000 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 3.623793889037791e-05, | |
| "loss": 2.4101, | |
| "step": 133500 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 3.61821004199053e-05, | |
| "loss": 2.4115, | |
| "step": 134000 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 3.612648530331457e-05, | |
| "loss": 2.4031, | |
| "step": 134500 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 3.607064683284196e-05, | |
| "loss": 2.4077, | |
| "step": 135000 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 3.601503171625123e-05, | |
| "loss": 2.3965, | |
| "step": 135500 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 3.595919324577861e-05, | |
| "loss": 2.3925, | |
| "step": 136000 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 3.5903578129187886e-05, | |
| "loss": 2.3889, | |
| "step": 136500 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 3.584773965871527e-05, | |
| "loss": 2.385, | |
| "step": 137000 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 3.5792124542124544e-05, | |
| "loss": 2.3869, | |
| "step": 137500 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 3.573628607165193e-05, | |
| "loss": 2.3868, | |
| "step": 138000 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 3.56806709550612e-05, | |
| "loss": 2.3853, | |
| "step": 138500 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 3.5624832484588585e-05, | |
| "loss": 2.3829, | |
| "step": 139000 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 3.556921736799786e-05, | |
| "loss": 2.3826, | |
| "step": 139500 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 3.551337889752524e-05, | |
| "loss": 2.3723, | |
| "step": 140000 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "eval_accuracy": 0.5726494894199156, | |
| "eval_loss": 2.2734375, | |
| "eval_runtime": 1199.6259, | |
| "eval_samples_per_second": 257.043, | |
| "eval_steps_per_second": 4.017, | |
| "step": 140000 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 3.545776378093452e-05, | |
| "loss": 2.3741, | |
| "step": 140500 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 3.5401925310461894e-05, | |
| "loss": 2.377, | |
| "step": 141000 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 3.5346310193871176e-05, | |
| "loss": 2.3667, | |
| "step": 141500 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 3.529047172339855e-05, | |
| "loss": 2.3684, | |
| "step": 142000 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 3.523485660680783e-05, | |
| "loss": 2.3676, | |
| "step": 142500 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 3.517901813633521e-05, | |
| "loss": 2.3665, | |
| "step": 143000 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 3.5123403019744485e-05, | |
| "loss": 2.3671, | |
| "step": 143500 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 3.506756454927187e-05, | |
| "loss": 2.3632, | |
| "step": 144000 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 3.501194943268114e-05, | |
| "loss": 2.3629, | |
| "step": 144500 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 3.4956110962208526e-05, | |
| "loss": 2.3608, | |
| "step": 145000 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 3.4900495845617794e-05, | |
| "loss": 2.358, | |
| "step": 145500 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 3.4844657375145184e-05, | |
| "loss": 2.3542, | |
| "step": 146000 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 3.478904225855445e-05, | |
| "loss": 2.3491, | |
| "step": 146500 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 3.4733203788081836e-05, | |
| "loss": 2.3547, | |
| "step": 147000 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 3.467758867149111e-05, | |
| "loss": 2.342, | |
| "step": 147500 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 3.4621750201018494e-05, | |
| "loss": 2.3466, | |
| "step": 148000 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 3.456613508442777e-05, | |
| "loss": 2.3514, | |
| "step": 148500 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 3.451029661395515e-05, | |
| "loss": 2.3442, | |
| "step": 149000 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 3.4454681497364427e-05, | |
| "loss": 2.3381, | |
| "step": 149500 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 3.439884302689181e-05, | |
| "loss": 2.3382, | |
| "step": 150000 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "eval_accuracy": 0.578659265524958, | |
| "eval_loss": 2.234375, | |
| "eval_runtime": 1153.4604, | |
| "eval_samples_per_second": 267.331, | |
| "eval_steps_per_second": 4.178, | |
| "step": 150000 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 3.4343227910301085e-05, | |
| "loss": 2.3388, | |
| "step": 150500 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 3.428738943982847e-05, | |
| "loss": 2.3338, | |
| "step": 151000 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 3.423177432323774e-05, | |
| "loss": 2.3294, | |
| "step": 151500 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 3.4175935852765126e-05, | |
| "loss": 2.3321, | |
| "step": 152000 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 3.4120320736174394e-05, | |
| "loss": 2.3284, | |
| "step": 152500 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 3.406448226570178e-05, | |
| "loss": 2.3289, | |
| "step": 153000 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 3.400886714911105e-05, | |
| "loss": 2.3261, | |
| "step": 153500 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 3.3953028678638435e-05, | |
| "loss": 2.3236, | |
| "step": 154000 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 3.389741356204771e-05, | |
| "loss": 2.3273, | |
| "step": 154500 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 3.384157509157509e-05, | |
| "loss": 2.3252, | |
| "step": 155000 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 3.378595997498437e-05, | |
| "loss": 2.3183, | |
| "step": 155500 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 3.373012150451175e-05, | |
| "loss": 2.3221, | |
| "step": 156000 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 3.3674506387921026e-05, | |
| "loss": 2.3161, | |
| "step": 156500 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 3.36186679174484e-05, | |
| "loss": 2.3116, | |
| "step": 157000 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 3.356305280085768e-05, | |
| "loss": 2.3156, | |
| "step": 157500 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 3.350721433038506e-05, | |
| "loss": 2.3084, | |
| "step": 158000 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 3.3451599213794335e-05, | |
| "loss": 2.3133, | |
| "step": 158500 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 3.339576074332172e-05, | |
| "loss": 2.3071, | |
| "step": 159000 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 3.334014562673099e-05, | |
| "loss": 2.3062, | |
| "step": 159500 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 3.3284307156258376e-05, | |
| "loss": 2.3084, | |
| "step": 160000 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "eval_accuracy": 0.5828903316963407, | |
| "eval_loss": 2.203125, | |
| "eval_runtime": 1108.8947, | |
| "eval_samples_per_second": 278.075, | |
| "eval_steps_per_second": 4.346, | |
| "step": 160000 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 3.322869203966765e-05, | |
| "loss": 2.308, | |
| "step": 160500 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 3.3172853569195035e-05, | |
| "loss": 2.3002, | |
| "step": 161000 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 3.311723845260431e-05, | |
| "loss": 2.3019, | |
| "step": 161500 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 3.306139998213169e-05, | |
| "loss": 2.3005, | |
| "step": 162000 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 3.300578486554097e-05, | |
| "loss": 2.297, | |
| "step": 162500 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 3.294994639506835e-05, | |
| "loss": 2.2935, | |
| "step": 163000 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 3.289433127847762e-05, | |
| "loss": 2.2973, | |
| "step": 163500 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 3.283849280800501e-05, | |
| "loss": 2.2976, | |
| "step": 164000 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 3.278287769141428e-05, | |
| "loss": 2.2893, | |
| "step": 164500 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 3.272703922094166e-05, | |
| "loss": 2.2884, | |
| "step": 165000 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 3.2671424104350935e-05, | |
| "loss": 2.2901, | |
| "step": 165500 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 3.261558563387832e-05, | |
| "loss": 2.2905, | |
| "step": 166000 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 3.255997051728759e-05, | |
| "loss": 2.2869, | |
| "step": 166500 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 3.2504132046814976e-05, | |
| "loss": 2.2831, | |
| "step": 167000 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 3.244851693022425e-05, | |
| "loss": 2.2877, | |
| "step": 167500 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 3.239267845975163e-05, | |
| "loss": 2.2818, | |
| "step": 168000 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 3.233706334316091e-05, | |
| "loss": 2.2816, | |
| "step": 168500 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 3.2281224872688285e-05, | |
| "loss": 2.2795, | |
| "step": 169000 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 3.222560975609756e-05, | |
| "loss": 2.2815, | |
| "step": 169500 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 3.216977128562494e-05, | |
| "loss": 2.2773, | |
| "step": 170000 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "eval_accuracy": 0.5871798298147288, | |
| "eval_loss": 2.17578125, | |
| "eval_runtime": 1172.3989, | |
| "eval_samples_per_second": 263.013, | |
| "eval_steps_per_second": 4.11, | |
| "step": 170000 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 3.211415616903422e-05, | |
| "loss": 2.27, | |
| "step": 170500 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 3.20583176985616e-05, | |
| "loss": 2.2723, | |
| "step": 171000 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 3.2002702581970876e-05, | |
| "loss": 2.2735, | |
| "step": 171500 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 3.194686411149826e-05, | |
| "loss": 2.2721, | |
| "step": 172000 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 3.1891248994907534e-05, | |
| "loss": 2.2682, | |
| "step": 172500 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 3.183541052443492e-05, | |
| "loss": 2.2682, | |
| "step": 173000 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 3.1779795407844185e-05, | |
| "loss": 2.2682, | |
| "step": 173500 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 3.1723956937371575e-05, | |
| "loss": 2.2679, | |
| "step": 174000 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 3.1668341820780843e-05, | |
| "loss": 2.2609, | |
| "step": 174500 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 3.1612503350308233e-05, | |
| "loss": 2.2611, | |
| "step": 175000 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 3.15568882337175e-05, | |
| "loss": 2.2625, | |
| "step": 175500 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 3.150104976324489e-05, | |
| "loss": 2.2589, | |
| "step": 176000 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 3.144543464665416e-05, | |
| "loss": 2.2585, | |
| "step": 176500 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 3.138959617618154e-05, | |
| "loss": 2.2527, | |
| "step": 177000 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 3.133398105959082e-05, | |
| "loss": 2.2508, | |
| "step": 177500 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 3.12781425891182e-05, | |
| "loss": 2.2526, | |
| "step": 178000 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 3.1222527472527476e-05, | |
| "loss": 2.2454, | |
| "step": 178500 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 3.116668900205485e-05, | |
| "loss": 2.2515, | |
| "step": 179000 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 3.1111073885464134e-05, | |
| "loss": 2.2536, | |
| "step": 179500 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 3.105523541499151e-05, | |
| "loss": 2.2492, | |
| "step": 180000 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "eval_accuracy": 0.5908707471657701, | |
| "eval_loss": 2.1484375, | |
| "eval_runtime": 1104.0527, | |
| "eval_samples_per_second": 279.295, | |
| "eval_steps_per_second": 4.365, | |
| "step": 180000 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 3.099962029840079e-05, | |
| "loss": 2.2476, | |
| "step": 180500 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 3.094378182792817e-05, | |
| "loss": 2.2394, | |
| "step": 181000 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 3.088816671133744e-05, | |
| "loss": 2.2467, | |
| "step": 181500 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 3.0832328240864826e-05, | |
| "loss": 2.2448, | |
| "step": 182000 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 3.07767131242741e-05, | |
| "loss": 2.2393, | |
| "step": 182500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 3.0720874653801484e-05, | |
| "loss": 2.2405, | |
| "step": 183000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 3.066525953721076e-05, | |
| "loss": 2.2345, | |
| "step": 183500 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "learning_rate": 3.060942106673814e-05, | |
| "loss": 2.2393, | |
| "step": 184000 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 3.055380595014741e-05, | |
| "loss": 2.2374, | |
| "step": 184500 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 3.04979674796748e-05, | |
| "loss": 2.2343, | |
| "step": 185000 | |
| }, | |
| { | |
| "epoch": 2.03, | |
| "learning_rate": 3.044235236308407e-05, | |
| "loss": 2.2304, | |
| "step": 185500 | |
| }, | |
| { | |
| "epoch": 2.03, | |
| "learning_rate": 3.0386513892611458e-05, | |
| "loss": 2.2328, | |
| "step": 186000 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 3.0330898776020726e-05, | |
| "loss": 2.2348, | |
| "step": 186500 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 3.0275060305548113e-05, | |
| "loss": 2.2267, | |
| "step": 187000 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 3.0219445188957384e-05, | |
| "loss": 2.2261, | |
| "step": 187500 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 3.0163606718484767e-05, | |
| "loss": 2.2244, | |
| "step": 188000 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "learning_rate": 3.0107991601894042e-05, | |
| "loss": 2.2262, | |
| "step": 188500 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "learning_rate": 3.0052153131421422e-05, | |
| "loss": 2.2225, | |
| "step": 189000 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 2.99965380148307e-05, | |
| "loss": 2.2229, | |
| "step": 189500 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 2.994069954435808e-05, | |
| "loss": 2.2261, | |
| "step": 190000 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "eval_accuracy": 0.5943238319324563, | |
| "eval_loss": 2.123046875, | |
| "eval_runtime": 1195.159, | |
| "eval_samples_per_second": 258.004, | |
| "eval_steps_per_second": 4.032, | |
| "step": 190000 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 2.988508442776736e-05, | |
| "loss": 2.22, | |
| "step": 190500 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 2.9829245957294738e-05, | |
| "loss": 2.2136, | |
| "step": 191000 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 2.9773630840704013e-05, | |
| "loss": 2.2215, | |
| "step": 191500 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 2.9717792370231396e-05, | |
| "loss": 2.211, | |
| "step": 192000 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 2.966217725364067e-05, | |
| "loss": 2.2163, | |
| "step": 192500 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 2.9606338783168054e-05, | |
| "loss": 2.2117, | |
| "step": 193000 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 2.955072366657733e-05, | |
| "loss": 2.2179, | |
| "step": 193500 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 2.949488519610471e-05, | |
| "loss": 2.2176, | |
| "step": 194000 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 2.943927007951398e-05, | |
| "loss": 2.2149, | |
| "step": 194500 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "learning_rate": 2.9383431609041367e-05, | |
| "loss": 2.2128, | |
| "step": 195000 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "learning_rate": 2.932781649245064e-05, | |
| "loss": 2.2059, | |
| "step": 195500 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "learning_rate": 2.9271978021978025e-05, | |
| "loss": 2.2123, | |
| "step": 196000 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 2.9216362905387296e-05, | |
| "loss": 2.2066, | |
| "step": 196500 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 2.9160524434914683e-05, | |
| "loss": 2.2072, | |
| "step": 197000 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 2.9104909318323954e-05, | |
| "loss": 2.2063, | |
| "step": 197500 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 2.904907084785134e-05, | |
| "loss": 2.2054, | |
| "step": 198000 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 2.899345573126061e-05, | |
| "loss": 2.2032, | |
| "step": 198500 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 2.8937617260787992e-05, | |
| "loss": 2.1995, | |
| "step": 199000 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 2.8882002144197267e-05, | |
| "loss": 2.2018, | |
| "step": 199500 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 2.882616367372465e-05, | |
| "loss": 2.1961, | |
| "step": 200000 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "eval_accuracy": 0.5975533594798018, | |
| "eval_loss": 2.1015625, | |
| "eval_runtime": 1253.116, | |
| "eval_samples_per_second": 246.071, | |
| "eval_steps_per_second": 3.846, | |
| "step": 200000 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "learning_rate": 2.8770548557133925e-05, | |
| "loss": 2.2031, | |
| "step": 200500 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 2.8714710086661305e-05, | |
| "loss": 2.2029, | |
| "step": 201000 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 2.8659094970070583e-05, | |
| "loss": 2.1957, | |
| "step": 201500 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 2.8603256499597963e-05, | |
| "loss": 2.2022, | |
| "step": 202000 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 2.854764138300724e-05, | |
| "loss": 2.1946, | |
| "step": 202500 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 2.849180291253462e-05, | |
| "loss": 2.1959, | |
| "step": 203000 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 2.8436187795943896e-05, | |
| "loss": 2.1911, | |
| "step": 203500 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "learning_rate": 2.838034932547128e-05, | |
| "loss": 2.1922, | |
| "step": 204000 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "learning_rate": 2.832473420888055e-05, | |
| "loss": 2.1917, | |
| "step": 204500 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 2.8268895738407937e-05, | |
| "loss": 2.1853, | |
| "step": 205000 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 2.8213280621817205e-05, | |
| "loss": 2.189, | |
| "step": 205500 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "learning_rate": 2.8157442151344592e-05, | |
| "loss": 2.1869, | |
| "step": 206000 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 2.8101827034753863e-05, | |
| "loss": 2.182, | |
| "step": 206500 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 2.804598856428125e-05, | |
| "loss": 2.189, | |
| "step": 207000 | |
| }, | |
| { | |
| "epoch": 2.27, | |
| "learning_rate": 2.799037344769052e-05, | |
| "loss": 2.1849, | |
| "step": 207500 | |
| }, | |
| { | |
| "epoch": 2.27, | |
| "learning_rate": 2.7934534977217908e-05, | |
| "loss": 2.1859, | |
| "step": 208000 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 2.787891986062718e-05, | |
| "loss": 2.1852, | |
| "step": 208500 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 2.782308139015456e-05, | |
| "loss": 2.1819, | |
| "step": 209000 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "learning_rate": 2.7767466273563837e-05, | |
| "loss": 2.1806, | |
| "step": 209500 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "learning_rate": 2.7711627803091217e-05, | |
| "loss": 2.1838, | |
| "step": 210000 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "eval_accuracy": 0.6004074262116587, | |
| "eval_loss": 2.08203125, | |
| "eval_runtime": 1303.2811, | |
| "eval_samples_per_second": 236.6, | |
| "eval_steps_per_second": 3.698, | |
| "step": 210000 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "learning_rate": 2.7656012686500492e-05, | |
| "loss": 2.181, | |
| "step": 210500 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "learning_rate": 2.7600174216027875e-05, | |
| "loss": 2.1811, | |
| "step": 211000 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 2.754455909943715e-05, | |
| "loss": 2.1749, | |
| "step": 211500 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 2.7488720628964533e-05, | |
| "loss": 2.1756, | |
| "step": 212000 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 2.7433105512373808e-05, | |
| "loss": 2.1799, | |
| "step": 212500 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 2.7377267041901188e-05, | |
| "loss": 2.1769, | |
| "step": 213000 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 2.7321651925310466e-05, | |
| "loss": 2.1757, | |
| "step": 213500 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 2.7265813454837846e-05, | |
| "loss": 2.1682, | |
| "step": 214000 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 2.7210198338247124e-05, | |
| "loss": 2.1728, | |
| "step": 214500 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 2.7154359867774504e-05, | |
| "loss": 2.168, | |
| "step": 215000 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 2.7098744751183775e-05, | |
| "loss": 2.1687, | |
| "step": 215500 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "learning_rate": 2.7042906280711162e-05, | |
| "loss": 2.1653, | |
| "step": 216000 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "learning_rate": 2.6987291164120433e-05, | |
| "loss": 2.166, | |
| "step": 216500 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 2.693145269364782e-05, | |
| "loss": 2.1648, | |
| "step": 217000 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 2.6875837577057088e-05, | |
| "loss": 2.1605, | |
| "step": 217500 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 2.6819999106584475e-05, | |
| "loss": 2.1664, | |
| "step": 218000 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 2.6764383989993746e-05, | |
| "loss": 2.1656, | |
| "step": 218500 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 2.6708545519521133e-05, | |
| "loss": 2.1603, | |
| "step": 219000 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 2.6652930402930404e-05, | |
| "loss": 2.1596, | |
| "step": 219500 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 2.6597091932457784e-05, | |
| "loss": 2.164, | |
| "step": 220000 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "eval_accuracy": 0.6030687006414185, | |
| "eval_loss": 2.064453125, | |
| "eval_runtime": 1207.9573, | |
| "eval_samples_per_second": 255.271, | |
| "eval_steps_per_second": 3.989, | |
| "step": 220000 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "learning_rate": 2.6541476815867062e-05, | |
| "loss": 2.1569, | |
| "step": 220500 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "learning_rate": 2.6485638345394442e-05, | |
| "loss": 2.1631, | |
| "step": 221000 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "learning_rate": 2.643002322880372e-05, | |
| "loss": 2.1595, | |
| "step": 221500 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 2.63741847583311e-05, | |
| "loss": 2.1522, | |
| "step": 222000 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 2.6318569641740375e-05, | |
| "loss": 2.1537, | |
| "step": 222500 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 2.6262731171267758e-05, | |
| "loss": 2.1528, | |
| "step": 223000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 2.6207116054677033e-05, | |
| "loss": 2.156, | |
| "step": 223500 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 2.6151277584204416e-05, | |
| "loss": 2.1547, | |
| "step": 224000 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 2.609566246761369e-05, | |
| "loss": 2.1529, | |
| "step": 224500 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 2.603982399714107e-05, | |
| "loss": 2.1536, | |
| "step": 225000 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 2.5984208880550342e-05, | |
| "loss": 2.146, | |
| "step": 225500 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "learning_rate": 2.592837041007773e-05, | |
| "loss": 2.15, | |
| "step": 226000 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "learning_rate": 2.5872755293487e-05, | |
| "loss": 2.1504, | |
| "step": 226500 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "learning_rate": 2.5816916823014387e-05, | |
| "loss": 2.1475, | |
| "step": 227000 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 2.5761301706423658e-05, | |
| "loss": 2.1517, | |
| "step": 227500 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 2.5705463235951045e-05, | |
| "loss": 2.1447, | |
| "step": 228000 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 2.5649848119360316e-05, | |
| "loss": 2.1413, | |
| "step": 228500 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 2.5594009648887703e-05, | |
| "loss": 2.1433, | |
| "step": 229000 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 2.553839453229697e-05, | |
| "loss": 2.1476, | |
| "step": 229500 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 2.5482556061824354e-05, | |
| "loss": 2.1456, | |
| "step": 230000 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "eval_accuracy": 0.6052384630896759, | |
| "eval_loss": 2.046875, | |
| "eval_runtime": 1225.601, | |
| "eval_samples_per_second": 251.596, | |
| "eval_steps_per_second": 3.932, | |
| "step": 230000 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 2.542694094523363e-05, | |
| "loss": 2.144, | |
| "step": 230500 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 2.5371102474761012e-05, | |
| "loss": 2.1397, | |
| "step": 231000 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 2.5315487358170287e-05, | |
| "loss": 2.1469, | |
| "step": 231500 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 2.5259648887697667e-05, | |
| "loss": 2.1434, | |
| "step": 232000 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "learning_rate": 2.5204033771106945e-05, | |
| "loss": 2.1388, | |
| "step": 232500 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 2.5148195300634325e-05, | |
| "loss": 2.1403, | |
| "step": 233000 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 2.5092580184043603e-05, | |
| "loss": 2.1349, | |
| "step": 233500 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 2.5036741713570983e-05, | |
| "loss": 2.1363, | |
| "step": 234000 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 2.4981126596980258e-05, | |
| "loss": 2.143, | |
| "step": 234500 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 2.492528812650764e-05, | |
| "loss": 2.1419, | |
| "step": 235000 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 2.4869673009916912e-05, | |
| "loss": 2.1412, | |
| "step": 235500 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 2.48138345394443e-05, | |
| "loss": 2.1327, | |
| "step": 236000 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 2.475821942285357e-05, | |
| "loss": 2.1315, | |
| "step": 236500 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "learning_rate": 2.4702380952380953e-05, | |
| "loss": 2.1338, | |
| "step": 237000 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "learning_rate": 2.4646765835790228e-05, | |
| "loss": 2.1345, | |
| "step": 237500 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 2.4590927365317608e-05, | |
| "loss": 2.1344, | |
| "step": 238000 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 2.4535312248726883e-05, | |
| "loss": 2.1287, | |
| "step": 238500 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 2.4479473778254266e-05, | |
| "loss": 2.1346, | |
| "step": 239000 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 2.442385866166354e-05, | |
| "loss": 2.1285, | |
| "step": 239500 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 2.4368020191190924e-05, | |
| "loss": 2.1308, | |
| "step": 240000 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "eval_accuracy": 0.6080040884730269, | |
| "eval_loss": 2.029296875, | |
| "eval_runtime": 1742.4401, | |
| "eval_samples_per_second": 176.968, | |
| "eval_steps_per_second": 2.766, | |
| "step": 240000 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "learning_rate": 2.43124050746002e-05, | |
| "loss": 2.13, | |
| "step": 240500 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "learning_rate": 2.4256566604127582e-05, | |
| "loss": 2.1307, | |
| "step": 241000 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 2.4200951487536854e-05, | |
| "loss": 2.1279, | |
| "step": 241500 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 2.414511301706424e-05, | |
| "loss": 2.1236, | |
| "step": 242000 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 2.408949790047351e-05, | |
| "loss": 2.12, | |
| "step": 242500 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 2.4033659430000895e-05, | |
| "loss": 2.1255, | |
| "step": 243000 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "learning_rate": 2.397804431341017e-05, | |
| "loss": 2.1237, | |
| "step": 243500 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 2.392220584293755e-05, | |
| "loss": 2.1213, | |
| "step": 244000 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 2.3866590726346824e-05, | |
| "loss": 2.1227, | |
| "step": 244500 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 2.3810752255874208e-05, | |
| "loss": 2.1262, | |
| "step": 245000 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 2.3755137139283482e-05, | |
| "loss": 2.1176, | |
| "step": 245500 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 2.3699298668810866e-05, | |
| "loss": 2.1184, | |
| "step": 246000 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 2.364368355222014e-05, | |
| "loss": 2.1175, | |
| "step": 246500 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 2.3587845081747524e-05, | |
| "loss": 2.122, | |
| "step": 247000 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 2.3532229965156795e-05, | |
| "loss": 2.1119, | |
| "step": 247500 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 2.347639149468418e-05, | |
| "loss": 2.1136, | |
| "step": 248000 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 2.3420776378093453e-05, | |
| "loss": 2.1187, | |
| "step": 248500 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "learning_rate": 2.3364937907620836e-05, | |
| "loss": 2.1187, | |
| "step": 249000 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 2.3309322791030108e-05, | |
| "loss": 2.1161, | |
| "step": 249500 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 2.325348432055749e-05, | |
| "loss": 2.1161, | |
| "step": 250000 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "eval_accuracy": 0.6101299952350318, | |
| "eval_loss": 2.013671875, | |
| "eval_runtime": 1209.7923, | |
| "eval_samples_per_second": 254.883, | |
| "eval_steps_per_second": 3.983, | |
| "step": 250000 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "learning_rate": 2.3197645850084877e-05, | |
| "loss": 2.1108, | |
| "step": 250500 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "learning_rate": 2.314203073349415e-05, | |
| "loss": 2.1159, | |
| "step": 251000 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 2.3086192263021532e-05, | |
| "loss": 2.1105, | |
| "step": 251500 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 2.3030577146430807e-05, | |
| "loss": 2.1081, | |
| "step": 252000 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 2.297473867595819e-05, | |
| "loss": 2.1085, | |
| "step": 252500 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 2.2919123559367465e-05, | |
| "loss": 2.1148, | |
| "step": 253000 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 2.2863285088894845e-05, | |
| "loss": 2.1079, | |
| "step": 253500 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 2.280766997230412e-05, | |
| "loss": 2.113, | |
| "step": 254000 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "learning_rate": 2.2751831501831503e-05, | |
| "loss": 2.1102, | |
| "step": 254500 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 2.2696216385240778e-05, | |
| "loss": 2.1081, | |
| "step": 255000 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 2.264037791476816e-05, | |
| "loss": 2.1121, | |
| "step": 255500 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 2.2584762798177432e-05, | |
| "loss": 2.1069, | |
| "step": 256000 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 2.252892432770482e-05, | |
| "loss": 2.105, | |
| "step": 256500 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "learning_rate": 2.247330921111409e-05, | |
| "loss": 2.1047, | |
| "step": 257000 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "learning_rate": 2.2417470740641474e-05, | |
| "loss": 2.1006, | |
| "step": 257500 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 2.236185562405075e-05, | |
| "loss": 2.1086, | |
| "step": 258000 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 2.2306017153578128e-05, | |
| "loss": 2.0998, | |
| "step": 258500 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 2.2250402036987403e-05, | |
| "loss": 2.1076, | |
| "step": 259000 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 2.2194563566514786e-05, | |
| "loss": 2.1039, | |
| "step": 259500 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "learning_rate": 2.213894844992406e-05, | |
| "loss": 2.1052, | |
| "step": 260000 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "eval_accuracy": 0.6120208554253012, | |
| "eval_loss": 2.001953125, | |
| "eval_runtime": 1054.4543, | |
| "eval_samples_per_second": 292.432, | |
| "eval_steps_per_second": 4.57, | |
| "step": 260000 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 2.2083109979451444e-05, | |
| "loss": 2.098, | |
| "step": 260500 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 2.202749486286072e-05, | |
| "loss": 2.0984, | |
| "step": 261000 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 2.1971656392388102e-05, | |
| "loss": 2.1033, | |
| "step": 261500 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 2.1916041275797374e-05, | |
| "loss": 2.0997, | |
| "step": 262000 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "learning_rate": 2.186020280532476e-05, | |
| "loss": 2.0943, | |
| "step": 262500 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "learning_rate": 2.1804587688734032e-05, | |
| "loss": 2.0861, | |
| "step": 263000 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "learning_rate": 2.1748749218261415e-05, | |
| "loss": 2.0976, | |
| "step": 263500 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "learning_rate": 2.1693134101670686e-05, | |
| "loss": 2.0934, | |
| "step": 264000 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 2.163729563119807e-05, | |
| "loss": 2.0899, | |
| "step": 264500 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 2.1581680514607344e-05, | |
| "loss": 2.0975, | |
| "step": 265000 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 2.1525842044134728e-05, | |
| "loss": 2.0973, | |
| "step": 265500 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 2.1470226927544002e-05, | |
| "loss": 2.0963, | |
| "step": 266000 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 2.1414388457071386e-05, | |
| "loss": 2.0907, | |
| "step": 266500 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "learning_rate": 2.135877334048066e-05, | |
| "loss": 2.0895, | |
| "step": 267000 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "learning_rate": 2.1302934870008044e-05, | |
| "loss": 2.088, | |
| "step": 267500 | |
| }, | |
| { | |
| "epoch": 2.93, | |
| "learning_rate": 2.1247319753417315e-05, | |
| "loss": 2.087, | |
| "step": 268000 | |
| }, | |
| { | |
| "epoch": 2.93, | |
| "learning_rate": 2.11914812829447e-05, | |
| "loss": 2.0894, | |
| "step": 268500 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 2.113586616635397e-05, | |
| "loss": 2.0888, | |
| "step": 269000 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 2.1080027695881356e-05, | |
| "loss": 2.0838, | |
| "step": 269500 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "learning_rate": 2.1024412579290628e-05, | |
| "loss": 2.0856, | |
| "step": 270000 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "eval_accuracy": 0.6142498653012093, | |
| "eval_loss": 1.990234375, | |
| "eval_runtime": 1044.3766, | |
| "eval_samples_per_second": 295.254, | |
| "eval_steps_per_second": 4.614, | |
| "step": 270000 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "learning_rate": 2.096857410881801e-05, | |
| "loss": 2.0933, | |
| "step": 270500 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "learning_rate": 2.0912958992227286e-05, | |
| "loss": 2.0858, | |
| "step": 271000 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 2.085712052175467e-05, | |
| "loss": 2.0825, | |
| "step": 271500 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 2.0801505405163944e-05, | |
| "loss": 2.0779, | |
| "step": 272000 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 2.0745666934691327e-05, | |
| "loss": 2.078, | |
| "step": 272500 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 2.0690051818100602e-05, | |
| "loss": 2.0848, | |
| "step": 273000 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 2.0634213347627982e-05, | |
| "loss": 2.0816, | |
| "step": 273500 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 2.0578598231037257e-05, | |
| "loss": 2.0816, | |
| "step": 274000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 2.052275976056464e-05, | |
| "loss": 2.0815, | |
| "step": 274500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 2.046714464397391e-05, | |
| "loss": 2.0842, | |
| "step": 275000 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "learning_rate": 2.0411306173501298e-05, | |
| "loss": 2.0743, | |
| "step": 275500 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "learning_rate": 2.035569105691057e-05, | |
| "loss": 2.0809, | |
| "step": 276000 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "learning_rate": 2.0299852586437952e-05, | |
| "loss": 2.078, | |
| "step": 276500 | |
| }, | |
| { | |
| "epoch": 3.03, | |
| "learning_rate": 2.0244237469847227e-05, | |
| "loss": 2.0771, | |
| "step": 277000 | |
| }, | |
| { | |
| "epoch": 3.03, | |
| "learning_rate": 2.018839899937461e-05, | |
| "loss": 2.0744, | |
| "step": 277500 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "learning_rate": 2.0132783882783885e-05, | |
| "loss": 2.0766, | |
| "step": 278000 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "learning_rate": 2.007694541231127e-05, | |
| "loss": 2.074, | |
| "step": 278500 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 2.0021330295720543e-05, | |
| "loss": 2.0783, | |
| "step": 279000 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 1.9965491825247923e-05, | |
| "loss": 2.0759, | |
| "step": 279500 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "learning_rate": 1.9909876708657198e-05, | |
| "loss": 2.0743, | |
| "step": 280000 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "eval_accuracy": 0.6159319542506481, | |
| "eval_loss": 1.9775390625, | |
| "eval_runtime": 1055.3371, | |
| "eval_samples_per_second": 292.187, | |
| "eval_steps_per_second": 4.566, | |
| "step": 280000 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "learning_rate": 1.985403823818458e-05, | |
| "loss": 2.0781, | |
| "step": 280500 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "learning_rate": 1.9798423121593853e-05, | |
| "loss": 2.0757, | |
| "step": 281000 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 1.974258465112124e-05, | |
| "loss": 2.0722, | |
| "step": 281500 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 1.968696953453051e-05, | |
| "loss": 2.0736, | |
| "step": 282000 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "learning_rate": 1.9631131064057894e-05, | |
| "loss": 2.064, | |
| "step": 282500 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "learning_rate": 1.957551594746717e-05, | |
| "loss": 2.0652, | |
| "step": 283000 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "learning_rate": 1.9519677476994552e-05, | |
| "loss": 2.0628, | |
| "step": 283500 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "learning_rate": 1.9464062360403827e-05, | |
| "loss": 2.069, | |
| "step": 284000 | |
| }, | |
| { | |
| "epoch": 3.11, | |
| "learning_rate": 1.9408223889931207e-05, | |
| "loss": 2.0627, | |
| "step": 284500 | |
| }, | |
| { | |
| "epoch": 3.11, | |
| "learning_rate": 1.935260877334048e-05, | |
| "loss": 2.0675, | |
| "step": 285000 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 1.9296770302867865e-05, | |
| "loss": 2.0697, | |
| "step": 285500 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 1.924115518627714e-05, | |
| "loss": 2.0659, | |
| "step": 286000 | |
| }, | |
| { | |
| "epoch": 3.13, | |
| "learning_rate": 1.9185316715804523e-05, | |
| "loss": 2.0704, | |
| "step": 286500 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "learning_rate": 1.9129701599213794e-05, | |
| "loss": 2.0663, | |
| "step": 287000 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "learning_rate": 1.907386312874118e-05, | |
| "loss": 2.0673, | |
| "step": 287500 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "learning_rate": 1.9018248012150452e-05, | |
| "loss": 2.0637, | |
| "step": 288000 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "learning_rate": 1.8962409541677835e-05, | |
| "loss": 2.0562, | |
| "step": 288500 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 1.890679442508711e-05, | |
| "loss": 2.0616, | |
| "step": 289000 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 1.885095595461449e-05, | |
| "loss": 2.0647, | |
| "step": 289500 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "learning_rate": 1.8795340838023765e-05, | |
| "loss": 2.0598, | |
| "step": 290000 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "eval_accuracy": 0.6171213473795026, | |
| "eval_loss": 1.9677734375, | |
| "eval_runtime": 1057.101, | |
| "eval_samples_per_second": 291.7, | |
| "eval_steps_per_second": 4.559, | |
| "step": 290000 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "learning_rate": 1.8739502367551148e-05, | |
| "loss": 2.0678, | |
| "step": 290500 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 1.8683887250960423e-05, | |
| "loss": 2.0628, | |
| "step": 291000 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 1.8628048780487806e-05, | |
| "loss": 2.0625, | |
| "step": 291500 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "learning_rate": 1.857243366389708e-05, | |
| "loss": 2.0597, | |
| "step": 292000 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "learning_rate": 1.8516595193424464e-05, | |
| "loss": 2.054, | |
| "step": 292500 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "learning_rate": 1.8460980076833735e-05, | |
| "loss": 2.0521, | |
| "step": 293000 | |
| }, | |
| { | |
| "epoch": 3.21, | |
| "learning_rate": 1.8405141606361122e-05, | |
| "loss": 2.0619, | |
| "step": 293500 | |
| }, | |
| { | |
| "epoch": 3.21, | |
| "learning_rate": 1.8349526489770393e-05, | |
| "loss": 2.0588, | |
| "step": 294000 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 1.8293688019297773e-05, | |
| "loss": 2.0538, | |
| "step": 294500 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 1.823807290270705e-05, | |
| "loss": 2.0571, | |
| "step": 295000 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 1.818223443223443e-05, | |
| "loss": 2.0547, | |
| "step": 295500 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 1.8126619315643706e-05, | |
| "loss": 2.0563, | |
| "step": 296000 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "learning_rate": 1.807078084517109e-05, | |
| "loss": 2.0484, | |
| "step": 296500 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "learning_rate": 1.8015165728580364e-05, | |
| "loss": 2.0563, | |
| "step": 297000 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "learning_rate": 1.7959327258107747e-05, | |
| "loss": 2.0537, | |
| "step": 297500 | |
| }, | |
| { | |
| "epoch": 3.26, | |
| "learning_rate": 1.7903712141517022e-05, | |
| "loss": 2.0554, | |
| "step": 298000 | |
| }, | |
| { | |
| "epoch": 3.26, | |
| "learning_rate": 1.7847873671044405e-05, | |
| "loss": 2.0508, | |
| "step": 298500 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 1.7792258554453677e-05, | |
| "loss": 2.0515, | |
| "step": 299000 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 1.7736420083981063e-05, | |
| "loss": 2.0529, | |
| "step": 299500 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "learning_rate": 1.7680804967390335e-05, | |
| "loss": 2.0492, | |
| "step": 300000 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "eval_accuracy": 0.6189548216499007, | |
| "eval_loss": 1.9560546875, | |
| "eval_runtime": 1033.7042, | |
| "eval_samples_per_second": 298.302, | |
| "eval_steps_per_second": 4.662, | |
| "step": 300000 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "learning_rate": 1.7624966496917715e-05, | |
| "loss": 2.0521, | |
| "step": 300500 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 1.756935138032699e-05, | |
| "loss": 2.049, | |
| "step": 301000 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 1.7513512909854373e-05, | |
| "loss": 2.0561, | |
| "step": 301500 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "learning_rate": 1.7457897793263648e-05, | |
| "loss": 2.0503, | |
| "step": 302000 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "learning_rate": 1.740205932279103e-05, | |
| "loss": 2.0487, | |
| "step": 302500 | |
| }, | |
| { | |
| "epoch": 3.31, | |
| "learning_rate": 1.7346444206200306e-05, | |
| "loss": 2.0489, | |
| "step": 303000 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "learning_rate": 1.729060573572769e-05, | |
| "loss": 2.0521, | |
| "step": 303500 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "learning_rate": 1.7234990619136964e-05, | |
| "loss": 2.0519, | |
| "step": 304000 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 1.7179152148664347e-05, | |
| "loss": 2.0493, | |
| "step": 304500 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 1.7123537032073618e-05, | |
| "loss": 2.0487, | |
| "step": 305000 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 1.7067698561601e-05, | |
| "loss": 2.0428, | |
| "step": 305500 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 1.7012083445010273e-05, | |
| "loss": 2.0482, | |
| "step": 306000 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "learning_rate": 1.6956244974537656e-05, | |
| "loss": 2.0427, | |
| "step": 306500 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "learning_rate": 1.690062985794693e-05, | |
| "loss": 2.0488, | |
| "step": 307000 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 1.6844791387474314e-05, | |
| "loss": 2.0423, | |
| "step": 307500 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 1.678917627088359e-05, | |
| "loss": 2.0426, | |
| "step": 308000 | |
| }, | |
| { | |
| "epoch": 3.37, | |
| "learning_rate": 1.6733337800410972e-05, | |
| "loss": 2.0402, | |
| "step": 308500 | |
| }, | |
| { | |
| "epoch": 3.38, | |
| "learning_rate": 1.6677722683820247e-05, | |
| "loss": 2.0465, | |
| "step": 309000 | |
| }, | |
| { | |
| "epoch": 3.38, | |
| "learning_rate": 1.662188421334763e-05, | |
| "loss": 2.0397, | |
| "step": 309500 | |
| }, | |
| { | |
| "epoch": 3.39, | |
| "learning_rate": 1.6566269096756905e-05, | |
| "loss": 2.0395, | |
| "step": 310000 | |
| }, | |
| { | |
| "epoch": 3.39, | |
| "eval_accuracy": 0.6202979048164721, | |
| "eval_loss": 1.9453125, | |
| "eval_runtime": 1032.8224, | |
| "eval_samples_per_second": 298.557, | |
| "eval_steps_per_second": 4.666, | |
| "step": 310000 | |
| }, | |
| { | |
| "epoch": 3.39, | |
| "learning_rate": 1.6510430626284285e-05, | |
| "loss": 2.0398, | |
| "step": 310500 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "learning_rate": 1.6454815509693556e-05, | |
| "loss": 2.0433, | |
| "step": 311000 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "learning_rate": 1.6398977039220943e-05, | |
| "loss": 2.0405, | |
| "step": 311500 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 1.6343361922630214e-05, | |
| "loss": 2.0379, | |
| "step": 312000 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 1.6287523452157598e-05, | |
| "loss": 2.0427, | |
| "step": 312500 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "learning_rate": 1.6231908335566872e-05, | |
| "loss": 2.0372, | |
| "step": 313000 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "learning_rate": 1.6176069865094256e-05, | |
| "loss": 2.0357, | |
| "step": 313500 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 1.612045474850353e-05, | |
| "loss": 2.0375, | |
| "step": 314000 | |
| }, | |
| { | |
| "epoch": 3.44, | |
| "learning_rate": 1.6064616278030914e-05, | |
| "loss": 2.0395, | |
| "step": 314500 | |
| }, | |
| { | |
| "epoch": 3.44, | |
| "learning_rate": 1.600900116144019e-05, | |
| "loss": 2.0443, | |
| "step": 315000 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "learning_rate": 1.5953162690967568e-05, | |
| "loss": 2.0344, | |
| "step": 315500 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "learning_rate": 1.5897547574376843e-05, | |
| "loss": 2.0369, | |
| "step": 316000 | |
| }, | |
| { | |
| "epoch": 3.46, | |
| "learning_rate": 1.5841709103904226e-05, | |
| "loss": 2.0364, | |
| "step": 316500 | |
| }, | |
| { | |
| "epoch": 3.46, | |
| "learning_rate": 1.5786093987313498e-05, | |
| "loss": 2.0337, | |
| "step": 317000 | |
| }, | |
| { | |
| "epoch": 3.47, | |
| "learning_rate": 1.5730255516840884e-05, | |
| "loss": 2.0381, | |
| "step": 317500 | |
| }, | |
| { | |
| "epoch": 3.47, | |
| "learning_rate": 1.5674640400250156e-05, | |
| "loss": 2.0333, | |
| "step": 318000 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "learning_rate": 1.561880192977754e-05, | |
| "loss": 2.0311, | |
| "step": 318500 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "learning_rate": 1.5563186813186814e-05, | |
| "loss": 2.0311, | |
| "step": 319000 | |
| }, | |
| { | |
| "epoch": 3.49, | |
| "learning_rate": 1.5507348342714197e-05, | |
| "loss": 2.0287, | |
| "step": 319500 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 1.5451733226123472e-05, | |
| "loss": 2.0328, | |
| "step": 320000 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "eval_accuracy": 0.6217200609754877, | |
| "eval_loss": 1.9365234375, | |
| "eval_runtime": 1035.5409, | |
| "eval_samples_per_second": 297.773, | |
| "eval_steps_per_second": 4.654, | |
| "step": 320000 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 1.539589475565085e-05, | |
| "loss": 2.0322, | |
| "step": 320500 | |
| }, | |
| { | |
| "epoch": 3.51, | |
| "learning_rate": 1.534027963906013e-05, | |
| "loss": 2.0326, | |
| "step": 321000 | |
| }, | |
| { | |
| "epoch": 3.51, | |
| "learning_rate": 1.528444116858751e-05, | |
| "loss": 2.033, | |
| "step": 321500 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 1.5228826051996783e-05, | |
| "loss": 2.0315, | |
| "step": 322000 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 1.5172987581524168e-05, | |
| "loss": 2.0346, | |
| "step": 322500 | |
| }, | |
| { | |
| "epoch": 3.53, | |
| "learning_rate": 1.511737246493344e-05, | |
| "loss": 2.0256, | |
| "step": 323000 | |
| }, | |
| { | |
| "epoch": 3.53, | |
| "learning_rate": 1.5061533994460824e-05, | |
| "loss": 2.0242, | |
| "step": 323500 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 1.5005918877870099e-05, | |
| "loss": 2.028, | |
| "step": 324000 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 1.4950080407397482e-05, | |
| "loss": 2.0291, | |
| "step": 324500 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "learning_rate": 1.4894465290806755e-05, | |
| "loss": 2.0266, | |
| "step": 325000 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "learning_rate": 1.483862682033414e-05, | |
| "loss": 2.027, | |
| "step": 325500 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "learning_rate": 1.4783011703743413e-05, | |
| "loss": 2.0302, | |
| "step": 326000 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "learning_rate": 1.4727173233270795e-05, | |
| "loss": 2.0297, | |
| "step": 326500 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "learning_rate": 1.4671558116680068e-05, | |
| "loss": 2.0317, | |
| "step": 327000 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "learning_rate": 1.4615719646207451e-05, | |
| "loss": 2.0252, | |
| "step": 327500 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "learning_rate": 1.4560104529616724e-05, | |
| "loss": 2.0277, | |
| "step": 328000 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "learning_rate": 1.4504266059144109e-05, | |
| "loss": 2.0303, | |
| "step": 328500 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "learning_rate": 1.4448650942553382e-05, | |
| "loss": 2.0247, | |
| "step": 329000 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "learning_rate": 1.4392812472080765e-05, | |
| "loss": 2.0228, | |
| "step": 329500 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "learning_rate": 1.433719735549004e-05, | |
| "loss": 2.0204, | |
| "step": 330000 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "eval_accuracy": 0.623003453753276, | |
| "eval_loss": 1.9287109375, | |
| "eval_runtime": 1034.59, | |
| "eval_samples_per_second": 298.047, | |
| "eval_steps_per_second": 4.658, | |
| "step": 330000 | |
| }, | |
| { | |
| "epoch": 3.61, | |
| "learning_rate": 1.4281358885017423e-05, | |
| "loss": 2.0244, | |
| "step": 330500 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "learning_rate": 1.4225743768426697e-05, | |
| "loss": 2.0269, | |
| "step": 331000 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "learning_rate": 1.4169905297954078e-05, | |
| "loss": 2.0196, | |
| "step": 331500 | |
| }, | |
| { | |
| "epoch": 3.63, | |
| "learning_rate": 1.4114290181363351e-05, | |
| "loss": 2.0203, | |
| "step": 332000 | |
| }, | |
| { | |
| "epoch": 3.63, | |
| "learning_rate": 1.4058451710890736e-05, | |
| "loss": 2.0221, | |
| "step": 332500 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 1.400283659430001e-05, | |
| "loss": 2.0196, | |
| "step": 333000 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 1.3946998123827392e-05, | |
| "loss": 2.029, | |
| "step": 333500 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "learning_rate": 1.3891383007236666e-05, | |
| "loss": 2.0204, | |
| "step": 334000 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "learning_rate": 1.383554453676405e-05, | |
| "loss": 2.0221, | |
| "step": 334500 | |
| }, | |
| { | |
| "epoch": 3.66, | |
| "learning_rate": 1.3779929420173324e-05, | |
| "loss": 2.0165, | |
| "step": 335000 | |
| }, | |
| { | |
| "epoch": 3.66, | |
| "learning_rate": 1.3724090949700707e-05, | |
| "loss": 2.0222, | |
| "step": 335500 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 1.3668475833109982e-05, | |
| "loss": 2.0171, | |
| "step": 336000 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "learning_rate": 1.3612637362637362e-05, | |
| "loss": 2.0201, | |
| "step": 336500 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "learning_rate": 1.3557022246046636e-05, | |
| "loss": 2.0166, | |
| "step": 337000 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 1.350118377557402e-05, | |
| "loss": 2.0137, | |
| "step": 337500 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 1.3445568658983293e-05, | |
| "loss": 2.0142, | |
| "step": 338000 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "learning_rate": 1.3389730188510678e-05, | |
| "loss": 2.0155, | |
| "step": 338500 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "learning_rate": 1.333411507191995e-05, | |
| "loss": 2.0177, | |
| "step": 339000 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 1.3278276601447334e-05, | |
| "loss": 2.0164, | |
| "step": 339500 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 1.3222661484856607e-05, | |
| "loss": 2.0142, | |
| "step": 340000 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "eval_accuracy": 0.6242913468679321, | |
| "eval_loss": 1.919921875, | |
| "eval_runtime": 1063.0076, | |
| "eval_samples_per_second": 290.079, | |
| "eval_steps_per_second": 4.533, | |
| "step": 340000 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "learning_rate": 1.3166823014383992e-05, | |
| "loss": 2.0135, | |
| "step": 340500 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "learning_rate": 1.3111207897793265e-05, | |
| "loss": 2.018, | |
| "step": 341000 | |
| }, | |
| { | |
| "epoch": 3.73, | |
| "learning_rate": 1.3055369427320647e-05, | |
| "loss": 2.012, | |
| "step": 341500 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 1.2999754310729923e-05, | |
| "loss": 2.0122, | |
| "step": 342000 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 1.2943915840257303e-05, | |
| "loss": 2.0152, | |
| "step": 342500 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "learning_rate": 1.2888300723666578e-05, | |
| "loss": 2.0103, | |
| "step": 343000 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "learning_rate": 1.2832462253193961e-05, | |
| "loss": 2.0105, | |
| "step": 343500 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 1.2776847136603234e-05, | |
| "loss": 2.0157, | |
| "step": 344000 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 1.2721008666130619e-05, | |
| "loss": 2.0107, | |
| "step": 344500 | |
| }, | |
| { | |
| "epoch": 3.77, | |
| "learning_rate": 1.2665393549539892e-05, | |
| "loss": 2.0136, | |
| "step": 345000 | |
| }, | |
| { | |
| "epoch": 3.77, | |
| "learning_rate": 1.2609555079067275e-05, | |
| "loss": 2.008, | |
| "step": 345500 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "learning_rate": 1.2553939962476548e-05, | |
| "loss": 2.0046, | |
| "step": 346000 | |
| }, | |
| { | |
| "epoch": 3.79, | |
| "learning_rate": 1.2498101492003932e-05, | |
| "loss": 2.0123, | |
| "step": 346500 | |
| }, | |
| { | |
| "epoch": 3.79, | |
| "learning_rate": 1.2442486375413205e-05, | |
| "loss": 2.0093, | |
| "step": 347000 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "learning_rate": 1.238664790494059e-05, | |
| "loss": 2.0109, | |
| "step": 347500 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "learning_rate": 1.2331032788349863e-05, | |
| "loss": 2.0073, | |
| "step": 348000 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "learning_rate": 1.2275194317877244e-05, | |
| "loss": 2.0074, | |
| "step": 348500 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "learning_rate": 1.221957920128652e-05, | |
| "loss": 2.0117, | |
| "step": 349000 | |
| }, | |
| { | |
| "epoch": 3.82, | |
| "learning_rate": 1.2163740730813902e-05, | |
| "loss": 2.0087, | |
| "step": 349500 | |
| }, | |
| { | |
| "epoch": 3.82, | |
| "learning_rate": 1.2108125614223176e-05, | |
| "loss": 2.0021, | |
| "step": 350000 | |
| }, | |
| { | |
| "epoch": 3.82, | |
| "eval_accuracy": 0.6256724294842592, | |
| "eval_loss": 1.912109375, | |
| "eval_runtime": 1058.3425, | |
| "eval_samples_per_second": 291.357, | |
| "eval_steps_per_second": 4.553, | |
| "step": 350000 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 1.2052287143750559e-05, | |
| "loss": 2.0188, | |
| "step": 350500 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 1.1996672027159834e-05, | |
| "loss": 2.0063, | |
| "step": 351000 | |
| }, | |
| { | |
| "epoch": 3.84, | |
| "learning_rate": 1.1940833556687215e-05, | |
| "loss": 2.0098, | |
| "step": 351500 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 1.188521844009649e-05, | |
| "loss": 2.003, | |
| "step": 352000 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 1.1829379969623873e-05, | |
| "loss": 2.0034, | |
| "step": 352500 | |
| }, | |
| { | |
| "epoch": 3.86, | |
| "learning_rate": 1.1773764853033146e-05, | |
| "loss": 2.006, | |
| "step": 353000 | |
| }, | |
| { | |
| "epoch": 3.86, | |
| "learning_rate": 1.171792638256053e-05, | |
| "loss": 2.0067, | |
| "step": 353500 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 1.1662311265969803e-05, | |
| "loss": 2.0051, | |
| "step": 354000 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 1.1606472795497186e-05, | |
| "loss": 2.0044, | |
| "step": 354500 | |
| }, | |
| { | |
| "epoch": 3.88, | |
| "learning_rate": 1.155085767890646e-05, | |
| "loss": 2.0028, | |
| "step": 355000 | |
| }, | |
| { | |
| "epoch": 3.88, | |
| "learning_rate": 1.1495019208433844e-05, | |
| "loss": 2.0097, | |
| "step": 355500 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "learning_rate": 1.1439404091843117e-05, | |
| "loss": 2.0048, | |
| "step": 356000 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "learning_rate": 1.13835656213705e-05, | |
| "loss": 2.0042, | |
| "step": 356500 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 1.1327950504779773e-05, | |
| "loss": 2.0022, | |
| "step": 357000 | |
| }, | |
| { | |
| "epoch": 3.91, | |
| "learning_rate": 1.1272112034307156e-05, | |
| "loss": 2.0073, | |
| "step": 357500 | |
| }, | |
| { | |
| "epoch": 3.91, | |
| "learning_rate": 1.1216496917716431e-05, | |
| "loss": 1.9993, | |
| "step": 358000 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 1.1160658447243813e-05, | |
| "loss": 1.9994, | |
| "step": 358500 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 1.1105043330653088e-05, | |
| "loss": 1.9986, | |
| "step": 359000 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "learning_rate": 1.104920486018047e-05, | |
| "loss": 2.0043, | |
| "step": 359500 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "learning_rate": 1.0993589743589744e-05, | |
| "loss": 2.006, | |
| "step": 360000 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "eval_accuracy": 0.6264209623539464, | |
| "eval_loss": 1.904296875, | |
| "eval_runtime": 1061.3214, | |
| "eval_samples_per_second": 290.54, | |
| "eval_steps_per_second": 4.541, | |
| "step": 360000 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "learning_rate": 1.0937751273117127e-05, | |
| "loss": 2.0034, | |
| "step": 360500 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "learning_rate": 1.0882136156526402e-05, | |
| "loss": 2.0003, | |
| "step": 361000 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "learning_rate": 1.0826297686053783e-05, | |
| "loss": 2.0065, | |
| "step": 361500 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "learning_rate": 1.0770682569463057e-05, | |
| "loss": 2.0013, | |
| "step": 362000 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "learning_rate": 1.0714844098990442e-05, | |
| "loss": 1.9985, | |
| "step": 362500 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "learning_rate": 1.0659228982399715e-05, | |
| "loss": 2.0011, | |
| "step": 363000 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "learning_rate": 1.0603390511927098e-05, | |
| "loss": 1.9928, | |
| "step": 363500 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "learning_rate": 1.0547775395336373e-05, | |
| "loss": 1.9994, | |
| "step": 364000 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "learning_rate": 1.0491936924863754e-05, | |
| "loss": 2.0, | |
| "step": 364500 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 1.0436321808273027e-05, | |
| "loss": 1.9952, | |
| "step": 365000 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 1.0380483337800412e-05, | |
| "loss": 2.0017, | |
| "step": 365500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 1.0324868221209685e-05, | |
| "loss": 1.9979, | |
| "step": 366000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 1.0269029750737069e-05, | |
| "loss": 1.9937, | |
| "step": 366500 | |
| }, | |
| { | |
| "epoch": 4.01, | |
| "learning_rate": 1.0213414634146342e-05, | |
| "loss": 1.993, | |
| "step": 367000 | |
| }, | |
| { | |
| "epoch": 4.01, | |
| "learning_rate": 1.0157576163673725e-05, | |
| "loss": 1.9951, | |
| "step": 367500 | |
| }, | |
| { | |
| "epoch": 4.02, | |
| "learning_rate": 1.0101961047082998e-05, | |
| "loss": 1.993, | |
| "step": 368000 | |
| }, | |
| { | |
| "epoch": 4.03, | |
| "learning_rate": 1.0046122576610383e-05, | |
| "loss": 1.9916, | |
| "step": 368500 | |
| }, | |
| { | |
| "epoch": 4.03, | |
| "learning_rate": 9.990507460019656e-06, | |
| "loss": 1.997, | |
| "step": 369000 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "learning_rate": 9.93466898954704e-06, | |
| "loss": 1.9905, | |
| "step": 369500 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "learning_rate": 9.879053872956312e-06, | |
| "loss": 1.9917, | |
| "step": 370000 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "eval_accuracy": 0.6274146236467327, | |
| "eval_loss": 1.8984375, | |
| "eval_runtime": 1044.7732, | |
| "eval_samples_per_second": 295.142, | |
| "eval_steps_per_second": 4.612, | |
| "step": 370000 | |
| }, | |
| { | |
| "epoch": 4.05, | |
| "learning_rate": 9.823215402483696e-06, | |
| "loss": 1.9881, | |
| "step": 370500 | |
| }, | |
| { | |
| "epoch": 4.05, | |
| "learning_rate": 9.767600285892969e-06, | |
| "loss": 1.9896, | |
| "step": 371000 | |
| }, | |
| { | |
| "epoch": 4.06, | |
| "learning_rate": 9.711761815420352e-06, | |
| "loss": 1.9941, | |
| "step": 371500 | |
| }, | |
| { | |
| "epoch": 4.06, | |
| "learning_rate": 9.656146698829627e-06, | |
| "loss": 1.991, | |
| "step": 372000 | |
| }, | |
| { | |
| "epoch": 4.07, | |
| "learning_rate": 9.60030822835701e-06, | |
| "loss": 1.9904, | |
| "step": 372500 | |
| }, | |
| { | |
| "epoch": 4.07, | |
| "learning_rate": 9.544693111766283e-06, | |
| "loss": 1.992, | |
| "step": 373000 | |
| }, | |
| { | |
| "epoch": 4.08, | |
| "learning_rate": 9.488854641293666e-06, | |
| "loss": 1.9917, | |
| "step": 373500 | |
| }, | |
| { | |
| "epoch": 4.09, | |
| "learning_rate": 9.43323952470294e-06, | |
| "loss": 1.9916, | |
| "step": 374000 | |
| }, | |
| { | |
| "epoch": 4.09, | |
| "learning_rate": 9.377401054230323e-06, | |
| "loss": 1.9854, | |
| "step": 374500 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 9.321785937639596e-06, | |
| "loss": 1.9907, | |
| "step": 375000 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 9.26594746716698e-06, | |
| "loss": 1.9885, | |
| "step": 375500 | |
| }, | |
| { | |
| "epoch": 4.11, | |
| "learning_rate": 9.210332350576254e-06, | |
| "loss": 1.9847, | |
| "step": 376000 | |
| }, | |
| { | |
| "epoch": 4.11, | |
| "learning_rate": 9.154493880103637e-06, | |
| "loss": 1.9888, | |
| "step": 376500 | |
| }, | |
| { | |
| "epoch": 4.12, | |
| "learning_rate": 9.09887876351291e-06, | |
| "loss": 1.988, | |
| "step": 377000 | |
| }, | |
| { | |
| "epoch": 4.12, | |
| "learning_rate": 9.043040293040293e-06, | |
| "loss": 1.9891, | |
| "step": 377500 | |
| }, | |
| { | |
| "epoch": 4.13, | |
| "learning_rate": 8.987425176449567e-06, | |
| "loss": 1.9868, | |
| "step": 378000 | |
| }, | |
| { | |
| "epoch": 4.13, | |
| "learning_rate": 8.931586705976951e-06, | |
| "loss": 1.9925, | |
| "step": 378500 | |
| }, | |
| { | |
| "epoch": 4.14, | |
| "learning_rate": 8.875971589386225e-06, | |
| "loss": 1.9871, | |
| "step": 379000 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "learning_rate": 8.820133118913606e-06, | |
| "loss": 1.986, | |
| "step": 379500 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "learning_rate": 8.764518002322881e-06, | |
| "loss": 1.9881, | |
| "step": 380000 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "eval_accuracy": 0.6284240406230363, | |
| "eval_loss": 1.8916015625, | |
| "eval_runtime": 1042.4014, | |
| "eval_samples_per_second": 295.813, | |
| "eval_steps_per_second": 4.623, | |
| "step": 380000 | |
| }, | |
| { | |
| "epoch": 4.16, | |
| "learning_rate": 8.708679531850264e-06, | |
| "loss": 1.9818, | |
| "step": 380500 | |
| }, | |
| { | |
| "epoch": 4.16, | |
| "learning_rate": 8.653064415259537e-06, | |
| "loss": 1.9863, | |
| "step": 381000 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "learning_rate": 8.597225944786922e-06, | |
| "loss": 1.9852, | |
| "step": 381500 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "learning_rate": 8.541610828196195e-06, | |
| "loss": 1.9832, | |
| "step": 382000 | |
| }, | |
| { | |
| "epoch": 4.18, | |
| "learning_rate": 8.485772357723577e-06, | |
| "loss": 1.9852, | |
| "step": 382500 | |
| }, | |
| { | |
| "epoch": 4.18, | |
| "learning_rate": 8.430157241132852e-06, | |
| "loss": 1.9855, | |
| "step": 383000 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "learning_rate": 8.374318770660235e-06, | |
| "loss": 1.9854, | |
| "step": 383500 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "learning_rate": 8.318703654069508e-06, | |
| "loss": 1.9792, | |
| "step": 384000 | |
| }, | |
| { | |
| "epoch": 4.2, | |
| "learning_rate": 8.262865183596891e-06, | |
| "loss": 1.9818, | |
| "step": 384500 | |
| }, | |
| { | |
| "epoch": 4.21, | |
| "learning_rate": 8.207250067006166e-06, | |
| "loss": 1.9831, | |
| "step": 385000 | |
| }, | |
| { | |
| "epoch": 4.21, | |
| "learning_rate": 8.151411596533547e-06, | |
| "loss": 1.9811, | |
| "step": 385500 | |
| }, | |
| { | |
| "epoch": 4.22, | |
| "learning_rate": 8.095796479942822e-06, | |
| "loss": 1.9786, | |
| "step": 386000 | |
| }, | |
| { | |
| "epoch": 4.22, | |
| "learning_rate": 8.039958009470205e-06, | |
| "loss": 1.9884, | |
| "step": 386500 | |
| }, | |
| { | |
| "epoch": 4.23, | |
| "learning_rate": 7.984342892879479e-06, | |
| "loss": 1.9838, | |
| "step": 387000 | |
| }, | |
| { | |
| "epoch": 4.23, | |
| "learning_rate": 7.928504422406862e-06, | |
| "loss": 1.9821, | |
| "step": 387500 | |
| }, | |
| { | |
| "epoch": 4.24, | |
| "learning_rate": 7.872889305816135e-06, | |
| "loss": 1.9871, | |
| "step": 388000 | |
| }, | |
| { | |
| "epoch": 4.24, | |
| "learning_rate": 7.817050835343518e-06, | |
| "loss": 1.9848, | |
| "step": 388500 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "learning_rate": 7.761435718752793e-06, | |
| "loss": 1.9854, | |
| "step": 389000 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "learning_rate": 7.705597248280176e-06, | |
| "loss": 1.9811, | |
| "step": 389500 | |
| }, | |
| { | |
| "epoch": 4.26, | |
| "learning_rate": 7.64998213168945e-06, | |
| "loss": 1.9843, | |
| "step": 390000 | |
| }, | |
| { | |
| "epoch": 4.26, | |
| "eval_accuracy": 0.6291420100522126, | |
| "eval_loss": 1.88671875, | |
| "eval_runtime": 1042.3765, | |
| "eval_samples_per_second": 295.82, | |
| "eval_steps_per_second": 4.623, | |
| "step": 390000 | |
| }, | |
| { | |
| "epoch": 4.27, | |
| "learning_rate": 7.594143661216832e-06, | |
| "loss": 1.9827, | |
| "step": 390500 | |
| }, | |
| { | |
| "epoch": 4.27, | |
| "learning_rate": 7.538528544626106e-06, | |
| "loss": 1.9825, | |
| "step": 391000 | |
| }, | |
| { | |
| "epoch": 4.28, | |
| "learning_rate": 7.48269007415349e-06, | |
| "loss": 1.9772, | |
| "step": 391500 | |
| }, | |
| { | |
| "epoch": 4.28, | |
| "learning_rate": 7.427074957562763e-06, | |
| "loss": 1.9788, | |
| "step": 392000 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "learning_rate": 7.371236487090145e-06, | |
| "loss": 1.9803, | |
| "step": 392500 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "learning_rate": 7.31562137049942e-06, | |
| "loss": 1.9762, | |
| "step": 393000 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "learning_rate": 7.259782900026802e-06, | |
| "loss": 1.9797, | |
| "step": 393500 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "learning_rate": 7.204167783436076e-06, | |
| "loss": 1.98, | |
| "step": 394000 | |
| }, | |
| { | |
| "epoch": 4.31, | |
| "learning_rate": 7.1483293129634604e-06, | |
| "loss": 1.9782, | |
| "step": 394500 | |
| }, | |
| { | |
| "epoch": 4.31, | |
| "learning_rate": 7.092714196372734e-06, | |
| "loss": 1.9711, | |
| "step": 395000 | |
| }, | |
| { | |
| "epoch": 4.32, | |
| "learning_rate": 7.036875725900116e-06, | |
| "loss": 1.9765, | |
| "step": 395500 | |
| }, | |
| { | |
| "epoch": 4.33, | |
| "learning_rate": 6.98126060930939e-06, | |
| "loss": 1.9789, | |
| "step": 396000 | |
| }, | |
| { | |
| "epoch": 4.33, | |
| "learning_rate": 6.925422138836773e-06, | |
| "loss": 1.9769, | |
| "step": 396500 | |
| }, | |
| { | |
| "epoch": 4.34, | |
| "learning_rate": 6.869807022246047e-06, | |
| "loss": 1.9765, | |
| "step": 397000 | |
| }, | |
| { | |
| "epoch": 4.34, | |
| "learning_rate": 6.8139685517734294e-06, | |
| "loss": 1.9808, | |
| "step": 397500 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "learning_rate": 6.758353435182704e-06, | |
| "loss": 1.9831, | |
| "step": 398000 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "learning_rate": 6.702514964710087e-06, | |
| "loss": 1.9783, | |
| "step": 398500 | |
| }, | |
| { | |
| "epoch": 4.36, | |
| "learning_rate": 6.646899848119361e-06, | |
| "loss": 1.9754, | |
| "step": 399000 | |
| }, | |
| { | |
| "epoch": 4.36, | |
| "learning_rate": 6.591061377646744e-06, | |
| "loss": 1.973, | |
| "step": 399500 | |
| }, | |
| { | |
| "epoch": 4.37, | |
| "learning_rate": 6.535446261056018e-06, | |
| "loss": 1.977, | |
| "step": 400000 | |
| }, | |
| { | |
| "epoch": 4.37, | |
| "eval_accuracy": 0.6300586017850129, | |
| "eval_loss": 1.880859375, | |
| "eval_runtime": 1042.6143, | |
| "eval_samples_per_second": 295.753, | |
| "eval_steps_per_second": 4.622, | |
| "step": 400000 | |
| }, | |
| { | |
| "epoch": 4.37, | |
| "learning_rate": 6.4796077905834e-06, | |
| "loss": 1.9774, | |
| "step": 400500 | |
| }, | |
| { | |
| "epoch": 4.38, | |
| "learning_rate": 6.423992673992673e-06, | |
| "loss": 1.9796, | |
| "step": 401000 | |
| }, | |
| { | |
| "epoch": 4.39, | |
| "learning_rate": 6.368154203520057e-06, | |
| "loss": 1.976, | |
| "step": 401500 | |
| }, | |
| { | |
| "epoch": 4.39, | |
| "learning_rate": 6.312539086929331e-06, | |
| "loss": 1.9758, | |
| "step": 402000 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "learning_rate": 6.2567006164567145e-06, | |
| "loss": 1.9818, | |
| "step": 402500 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "learning_rate": 6.201085499865988e-06, | |
| "loss": 1.9732, | |
| "step": 403000 | |
| }, | |
| { | |
| "epoch": 4.41, | |
| "learning_rate": 6.145247029393371e-06, | |
| "loss": 1.9763, | |
| "step": 403500 | |
| }, | |
| { | |
| "epoch": 4.41, | |
| "learning_rate": 6.089631912802645e-06, | |
| "loss": 1.9756, | |
| "step": 404000 | |
| }, | |
| { | |
| "epoch": 4.42, | |
| "learning_rate": 6.033793442330028e-06, | |
| "loss": 1.9711, | |
| "step": 404500 | |
| }, | |
| { | |
| "epoch": 4.42, | |
| "learning_rate": 5.978178325739302e-06, | |
| "loss": 1.9705, | |
| "step": 405000 | |
| }, | |
| { | |
| "epoch": 4.43, | |
| "learning_rate": 5.922339855266685e-06, | |
| "loss": 1.9762, | |
| "step": 405500 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "learning_rate": 5.866724738675958e-06, | |
| "loss": 1.9721, | |
| "step": 406000 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "learning_rate": 5.8108862682033416e-06, | |
| "loss": 1.9738, | |
| "step": 406500 | |
| }, | |
| { | |
| "epoch": 4.45, | |
| "learning_rate": 5.755271151612615e-06, | |
| "loss": 1.9709, | |
| "step": 407000 | |
| }, | |
| { | |
| "epoch": 4.45, | |
| "learning_rate": 5.699432681139998e-06, | |
| "loss": 1.967, | |
| "step": 407500 | |
| }, | |
| { | |
| "epoch": 4.46, | |
| "learning_rate": 5.643817564549273e-06, | |
| "loss": 1.9784, | |
| "step": 408000 | |
| }, | |
| { | |
| "epoch": 4.46, | |
| "learning_rate": 5.587979094076656e-06, | |
| "loss": 1.9764, | |
| "step": 408500 | |
| }, | |
| { | |
| "epoch": 4.47, | |
| "learning_rate": 5.532363977485929e-06, | |
| "loss": 1.9702, | |
| "step": 409000 | |
| }, | |
| { | |
| "epoch": 4.47, | |
| "learning_rate": 5.476525507013312e-06, | |
| "loss": 1.9732, | |
| "step": 409500 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "learning_rate": 5.4209103904225854e-06, | |
| "loss": 1.9697, | |
| "step": 410000 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "eval_accuracy": 0.6306101079076281, | |
| "eval_loss": 1.876953125, | |
| "eval_runtime": 1037.0911, | |
| "eval_samples_per_second": 297.328, | |
| "eval_steps_per_second": 4.647, | |
| "step": 410000 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "learning_rate": 5.365071919949969e-06, | |
| "loss": 1.976, | |
| "step": 410500 | |
| }, | |
| { | |
| "epoch": 4.49, | |
| "learning_rate": 5.309456803359243e-06, | |
| "loss": 1.9726, | |
| "step": 411000 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "learning_rate": 5.253618332886626e-06, | |
| "loss": 1.9745, | |
| "step": 411500 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "learning_rate": 5.198003216295899e-06, | |
| "loss": 1.9744, | |
| "step": 412000 | |
| }, | |
| { | |
| "epoch": 4.51, | |
| "learning_rate": 5.142164745823282e-06, | |
| "loss": 1.975, | |
| "step": 412500 | |
| }, | |
| { | |
| "epoch": 4.51, | |
| "learning_rate": 5.086549629232556e-06, | |
| "loss": 1.9705, | |
| "step": 413000 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "learning_rate": 5.030711158759939e-06, | |
| "loss": 1.9658, | |
| "step": 413500 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "learning_rate": 4.975096042169213e-06, | |
| "loss": 1.9701, | |
| "step": 414000 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "learning_rate": 4.9192575716965965e-06, | |
| "loss": 1.9623, | |
| "step": 414500 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "learning_rate": 4.86364245510587e-06, | |
| "loss": 1.9731, | |
| "step": 415000 | |
| }, | |
| { | |
| "epoch": 4.54, | |
| "learning_rate": 4.807803984633253e-06, | |
| "loss": 1.9727, | |
| "step": 415500 | |
| }, | |
| { | |
| "epoch": 4.54, | |
| "learning_rate": 4.752188868042527e-06, | |
| "loss": 1.9624, | |
| "step": 416000 | |
| }, | |
| { | |
| "epoch": 4.55, | |
| "learning_rate": 4.69635039756991e-06, | |
| "loss": 1.9669, | |
| "step": 416500 | |
| }, | |
| { | |
| "epoch": 4.56, | |
| "learning_rate": 4.640735280979184e-06, | |
| "loss": 1.9641, | |
| "step": 417000 | |
| }, | |
| { | |
| "epoch": 4.56, | |
| "learning_rate": 4.584896810506567e-06, | |
| "loss": 1.9664, | |
| "step": 417500 | |
| }, | |
| { | |
| "epoch": 4.57, | |
| "learning_rate": 4.52928169391584e-06, | |
| "loss": 1.9722, | |
| "step": 418000 | |
| }, | |
| { | |
| "epoch": 4.57, | |
| "learning_rate": 4.4734432234432235e-06, | |
| "loss": 1.9646, | |
| "step": 418500 | |
| }, | |
| { | |
| "epoch": 4.58, | |
| "learning_rate": 4.4178281068524975e-06, | |
| "loss": 1.971, | |
| "step": 419000 | |
| }, | |
| { | |
| "epoch": 4.58, | |
| "learning_rate": 4.361989636379881e-06, | |
| "loss": 1.9666, | |
| "step": 419500 | |
| }, | |
| { | |
| "epoch": 4.59, | |
| "learning_rate": 4.306374519789154e-06, | |
| "loss": 1.9655, | |
| "step": 420000 | |
| }, | |
| { | |
| "epoch": 4.59, | |
| "eval_accuracy": 0.6312552391745295, | |
| "eval_loss": 1.8740234375, | |
| "eval_runtime": 1117.5437, | |
| "eval_samples_per_second": 275.923, | |
| "eval_steps_per_second": 4.312, | |
| "step": 420000 | |
| }, | |
| { | |
| "epoch": 4.59, | |
| "learning_rate": 4.250536049316537e-06, | |
| "loss": 1.9681, | |
| "step": 420500 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "learning_rate": 4.194920932725811e-06, | |
| "loss": 1.969, | |
| "step": 421000 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "learning_rate": 4.139082462253194e-06, | |
| "loss": 1.9663, | |
| "step": 421500 | |
| }, | |
| { | |
| "epoch": 4.61, | |
| "learning_rate": 4.083467345662468e-06, | |
| "loss": 1.9684, | |
| "step": 422000 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 4.0276288751898514e-06, | |
| "loss": 1.9675, | |
| "step": 422500 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 3.972013758599125e-06, | |
| "loss": 1.9649, | |
| "step": 423000 | |
| }, | |
| { | |
| "epoch": 4.63, | |
| "learning_rate": 3.916175288126508e-06, | |
| "loss": 1.9665, | |
| "step": 423500 | |
| }, | |
| { | |
| "epoch": 4.63, | |
| "learning_rate": 3.860560171535781e-06, | |
| "loss": 1.9664, | |
| "step": 424000 | |
| }, | |
| { | |
| "epoch": 4.64, | |
| "learning_rate": 3.8047217010631645e-06, | |
| "loss": 1.9637, | |
| "step": 424500 | |
| }, | |
| { | |
| "epoch": 4.64, | |
| "learning_rate": 3.7491065844724385e-06, | |
| "loss": 1.963, | |
| "step": 425000 | |
| }, | |
| { | |
| "epoch": 4.65, | |
| "learning_rate": 3.6932681139998217e-06, | |
| "loss": 1.9623, | |
| "step": 425500 | |
| }, | |
| { | |
| "epoch": 4.65, | |
| "learning_rate": 3.6376529974090953e-06, | |
| "loss": 1.9651, | |
| "step": 426000 | |
| }, | |
| { | |
| "epoch": 4.66, | |
| "learning_rate": 3.5818145269364785e-06, | |
| "loss": 1.9647, | |
| "step": 426500 | |
| }, | |
| { | |
| "epoch": 4.66, | |
| "learning_rate": 3.526199410345752e-06, | |
| "loss": 1.9617, | |
| "step": 427000 | |
| }, | |
| { | |
| "epoch": 4.67, | |
| "learning_rate": 3.4703609398731352e-06, | |
| "loss": 1.9672, | |
| "step": 427500 | |
| }, | |
| { | |
| "epoch": 4.68, | |
| "learning_rate": 3.414745823282409e-06, | |
| "loss": 1.9645, | |
| "step": 428000 | |
| }, | |
| { | |
| "epoch": 4.68, | |
| "learning_rate": 3.358907352809792e-06, | |
| "loss": 1.9624, | |
| "step": 428500 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 3.303292236219065e-06, | |
| "loss": 1.9681, | |
| "step": 429000 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 3.2474537657464483e-06, | |
| "loss": 1.9648, | |
| "step": 429500 | |
| }, | |
| { | |
| "epoch": 4.7, | |
| "learning_rate": 3.1918386491557228e-06, | |
| "loss": 1.9649, | |
| "step": 430000 | |
| }, | |
| { | |
| "epoch": 4.7, | |
| "eval_accuracy": 0.6319568161801343, | |
| "eval_loss": 1.869140625, | |
| "eval_runtime": 1118.6445, | |
| "eval_samples_per_second": 275.651, | |
| "eval_steps_per_second": 4.308, | |
| "step": 430000 | |
| }, | |
| { | |
| "epoch": 4.7, | |
| "learning_rate": 3.136000178683106e-06, | |
| "loss": 1.9599, | |
| "step": 430500 | |
| }, | |
| { | |
| "epoch": 4.71, | |
| "learning_rate": 3.0803850620923795e-06, | |
| "loss": 1.9689, | |
| "step": 431000 | |
| }, | |
| { | |
| "epoch": 4.71, | |
| "learning_rate": 3.0245465916197627e-06, | |
| "loss": 1.9665, | |
| "step": 431500 | |
| }, | |
| { | |
| "epoch": 4.72, | |
| "learning_rate": 2.968931475029036e-06, | |
| "loss": 1.9623, | |
| "step": 432000 | |
| }, | |
| { | |
| "epoch": 4.72, | |
| "learning_rate": 2.913093004556419e-06, | |
| "loss": 1.9615, | |
| "step": 432500 | |
| }, | |
| { | |
| "epoch": 4.73, | |
| "learning_rate": 2.857477887965693e-06, | |
| "loss": 1.9636, | |
| "step": 433000 | |
| }, | |
| { | |
| "epoch": 4.74, | |
| "learning_rate": 2.8016394174930762e-06, | |
| "loss": 1.956, | |
| "step": 433500 | |
| }, | |
| { | |
| "epoch": 4.74, | |
| "learning_rate": 2.74602430090235e-06, | |
| "loss": 1.9613, | |
| "step": 434000 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 2.690185830429733e-06, | |
| "loss": 1.9601, | |
| "step": 434500 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 2.6345707138390066e-06, | |
| "loss": 1.9599, | |
| "step": 435000 | |
| }, | |
| { | |
| "epoch": 4.76, | |
| "learning_rate": 2.5787322433663898e-06, | |
| "loss": 1.9652, | |
| "step": 435500 | |
| }, | |
| { | |
| "epoch": 4.76, | |
| "learning_rate": 2.5231171267756633e-06, | |
| "loss": 1.9626, | |
| "step": 436000 | |
| }, | |
| { | |
| "epoch": 4.77, | |
| "learning_rate": 2.4672786563030465e-06, | |
| "loss": 1.9586, | |
| "step": 436500 | |
| }, | |
| { | |
| "epoch": 4.77, | |
| "learning_rate": 2.4116635397123205e-06, | |
| "loss": 1.9618, | |
| "step": 437000 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "learning_rate": 2.3558250692397037e-06, | |
| "loss": 1.9611, | |
| "step": 437500 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "learning_rate": 2.3002099526489773e-06, | |
| "loss": 1.9603, | |
| "step": 438000 | |
| }, | |
| { | |
| "epoch": 4.79, | |
| "learning_rate": 2.2443714821763605e-06, | |
| "loss": 1.9565, | |
| "step": 438500 | |
| }, | |
| { | |
| "epoch": 4.8, | |
| "learning_rate": 2.188756365585634e-06, | |
| "loss": 1.9609, | |
| "step": 439000 | |
| }, | |
| { | |
| "epoch": 4.8, | |
| "learning_rate": 2.1329178951130172e-06, | |
| "loss": 1.9569, | |
| "step": 439500 | |
| }, | |
| { | |
| "epoch": 4.81, | |
| "learning_rate": 2.077302778522291e-06, | |
| "loss": 1.9622, | |
| "step": 440000 | |
| }, | |
| { | |
| "epoch": 4.81, | |
| "eval_accuracy": 0.6323656721212422, | |
| "eval_loss": 1.8662109375, | |
| "eval_runtime": 1118.8914, | |
| "eval_samples_per_second": 275.591, | |
| "eval_steps_per_second": 4.307, | |
| "step": 440000 | |
| }, | |
| { | |
| "epoch": 4.81, | |
| "learning_rate": 2.021464308049674e-06, | |
| "loss": 1.9538, | |
| "step": 440500 | |
| }, | |
| { | |
| "epoch": 4.82, | |
| "learning_rate": 1.965849191458948e-06, | |
| "loss": 1.9593, | |
| "step": 441000 | |
| }, | |
| { | |
| "epoch": 4.82, | |
| "learning_rate": 1.910010720986331e-06, | |
| "loss": 1.9642, | |
| "step": 441500 | |
| }, | |
| { | |
| "epoch": 4.83, | |
| "learning_rate": 1.8543956043956045e-06, | |
| "loss": 1.9562, | |
| "step": 442000 | |
| }, | |
| { | |
| "epoch": 4.83, | |
| "learning_rate": 1.7985571339229877e-06, | |
| "loss": 1.9582, | |
| "step": 442500 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "learning_rate": 1.742942017332261e-06, | |
| "loss": 1.9576, | |
| "step": 443000 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "learning_rate": 1.6871035468596447e-06, | |
| "loss": 1.9566, | |
| "step": 443500 | |
| }, | |
| { | |
| "epoch": 4.85, | |
| "learning_rate": 1.6314884302689183e-06, | |
| "loss": 1.96, | |
| "step": 444000 | |
| }, | |
| { | |
| "epoch": 4.86, | |
| "learning_rate": 1.5756499597963012e-06, | |
| "loss": 1.963, | |
| "step": 444500 | |
| }, | |
| { | |
| "epoch": 4.86, | |
| "learning_rate": 1.520034843205575e-06, | |
| "loss": 1.959, | |
| "step": 445000 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "learning_rate": 1.4641963727329582e-06, | |
| "loss": 1.9565, | |
| "step": 445500 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "learning_rate": 1.4085812561422318e-06, | |
| "loss": 1.9556, | |
| "step": 446000 | |
| }, | |
| { | |
| "epoch": 4.88, | |
| "learning_rate": 1.352742785669615e-06, | |
| "loss": 1.9551, | |
| "step": 446500 | |
| }, | |
| { | |
| "epoch": 4.88, | |
| "learning_rate": 1.2971276690788888e-06, | |
| "loss": 1.958, | |
| "step": 447000 | |
| }, | |
| { | |
| "epoch": 4.89, | |
| "learning_rate": 1.241289198606272e-06, | |
| "loss": 1.9601, | |
| "step": 447500 | |
| }, | |
| { | |
| "epoch": 4.89, | |
| "learning_rate": 1.1856740820155455e-06, | |
| "loss": 1.9605, | |
| "step": 448000 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "learning_rate": 1.1298356115429287e-06, | |
| "loss": 1.9554, | |
| "step": 448500 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "learning_rate": 1.0742204949522025e-06, | |
| "loss": 1.9547, | |
| "step": 449000 | |
| }, | |
| { | |
| "epoch": 4.91, | |
| "learning_rate": 1.0183820244795855e-06, | |
| "loss": 1.9543, | |
| "step": 449500 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "learning_rate": 9.62766907888859e-07, | |
| "loss": 1.9539, | |
| "step": 450000 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "eval_accuracy": 0.6327838084914335, | |
| "eval_loss": 1.8623046875, | |
| "eval_runtime": 1037.1505, | |
| "eval_samples_per_second": 297.311, | |
| "eval_steps_per_second": 4.646, | |
| "step": 450000 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "learning_rate": 9.069284374162424e-07, | |
| "loss": 1.9567, | |
| "step": 450500 | |
| }, | |
| { | |
| "epoch": 4.93, | |
| "learning_rate": 8.513133208255159e-07, | |
| "loss": 1.9581, | |
| "step": 451000 | |
| }, | |
| { | |
| "epoch": 4.93, | |
| "learning_rate": 7.954748503528992e-07, | |
| "loss": 1.9539, | |
| "step": 451500 | |
| }, | |
| { | |
| "epoch": 4.94, | |
| "learning_rate": 7.398597337621729e-07, | |
| "loss": 1.958, | |
| "step": 452000 | |
| }, | |
| { | |
| "epoch": 4.94, | |
| "learning_rate": 6.84021263289556e-07, | |
| "loss": 1.9592, | |
| "step": 452500 | |
| }, | |
| { | |
| "epoch": 4.95, | |
| "learning_rate": 6.284061466988297e-07, | |
| "loss": 1.9561, | |
| "step": 453000 | |
| }, | |
| { | |
| "epoch": 4.95, | |
| "learning_rate": 5.725676762262128e-07, | |
| "loss": 1.9556, | |
| "step": 453500 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "learning_rate": 5.169525596354864e-07, | |
| "loss": 1.9576, | |
| "step": 454000 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "learning_rate": 4.611140891628697e-07, | |
| "loss": 1.9512, | |
| "step": 454500 | |
| }, | |
| { | |
| "epoch": 4.97, | |
| "learning_rate": 4.054989725721433e-07, | |
| "loss": 1.9578, | |
| "step": 455000 | |
| }, | |
| { | |
| "epoch": 4.98, | |
| "learning_rate": 3.496605020995265e-07, | |
| "loss": 1.9538, | |
| "step": 455500 | |
| }, | |
| { | |
| "epoch": 4.98, | |
| "learning_rate": 2.9404538550880015e-07, | |
| "loss": 1.9564, | |
| "step": 456000 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "learning_rate": 2.3820691503618333e-07, | |
| "loss": 1.9589, | |
| "step": 456500 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "learning_rate": 1.82591798445457e-07, | |
| "loss": 1.9533, | |
| "step": 457000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 1.2675332797284018e-07, | |
| "loss": 1.9602, | |
| "step": 457500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 457720, | |
| "total_flos": 7.298372203161059e+18, | |
| "train_loss": 2.572186891088575, | |
| "train_runtime": 307115.8978, | |
| "train_samples_per_second": 95.384, | |
| "train_steps_per_second": 1.49 | |
| } | |
| ], | |
| "max_steps": 457720, | |
| "num_train_epochs": 5, | |
| "total_flos": 7.298372203161059e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |