| { | |
| "best_metric": 1.71484375, | |
| "best_model_checkpoint": "bert_12_layer_model_v1_complete_training/checkpoint-450000", | |
| "epoch": 5.0, | |
| "global_step": 457720, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 3.3715366784496464e-05, | |
| "loss": 7.3181, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 3.748913176609214e-05, | |
| "loss": 6.6228, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 3.9686644919105525e-05, | |
| "loss": 6.5274, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 4.12520067118919e-05, | |
| "loss": 6.4586, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.246120561428155e-05, | |
| "loss": 6.4182, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 4.345314745008792e-05, | |
| "loss": 6.3793, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.428842793791915e-05, | |
| "loss": 6.3585, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 4.501488165769166e-05, | |
| "loss": 6.3289, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.5653079262541536e-05, | |
| "loss": 6.3082, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 4.622625682029237e-05, | |
| "loss": 6.2991, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 4.6742676283733686e-05, | |
| "loss": 6.2733, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.7216022395887685e-05, | |
| "loss": 6.2641, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 4.7649711804211924e-05, | |
| "loss": 6.2454, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.805285726627034e-05, | |
| "loss": 6.2317, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 4.842667223534965e-05, | |
| "loss": 6.225, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.8777756603491424e-05, | |
| "loss": 6.2133, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.910622835154613e-05, | |
| "loss": 6.2112, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.941716313408369e-05, | |
| "loss": 6.2017, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.971010421672582e-05, | |
| "loss": 6.1863, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.9989131766092136e-05, | |
| "loss": 6.1779, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "eval_accuracy": 0.1486846615077511, | |
| "eval_loss": 6.171875, | |
| "eval_runtime": 1132.2269, | |
| "eval_samples_per_second": 272.345, | |
| "eval_steps_per_second": 4.256, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.994673009916912e-05, | |
| "loss": 6.1746, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.9890891628696506e-05, | |
| "loss": 6.1631, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.983527651210578e-05, | |
| "loss": 6.1478, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.9779438041633164e-05, | |
| "loss": 6.122, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.972382292504244e-05, | |
| "loss": 6.0631, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.966798445456982e-05, | |
| "loss": 5.9931, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.96123693379791e-05, | |
| "loss": 5.9445, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.955653086750648e-05, | |
| "loss": 5.9015, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 4.9500915750915755e-05, | |
| "loss": 5.8657, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 4.944507728044314e-05, | |
| "loss": 5.8324, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 4.938946216385241e-05, | |
| "loss": 5.7922, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 4.9333623693379796e-05, | |
| "loss": 5.7422, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 4.9278008576789064e-05, | |
| "loss": 5.656, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 4.9222170106316454e-05, | |
| "loss": 5.4694, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 4.916655498972572e-05, | |
| "loss": 5.2826, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 4.9110716519253105e-05, | |
| "loss": 5.1224, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 4.905510140266238e-05, | |
| "loss": 4.9978, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 4.8999262932189763e-05, | |
| "loss": 4.8805, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 4.894364781559904e-05, | |
| "loss": 4.7842, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 4.888780934512642e-05, | |
| "loss": 4.6914, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "eval_accuracy": 0.31781246345401765, | |
| "eval_loss": 4.50390625, | |
| "eval_runtime": 1170.7642, | |
| "eval_samples_per_second": 263.38, | |
| "eval_steps_per_second": 4.116, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 4.8832194228535696e-05, | |
| "loss": 4.5744, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 4.877635575806307e-05, | |
| "loss": 4.429, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 4.8720740641472354e-05, | |
| "loss": 4.2966, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 4.866490217099973e-05, | |
| "loss": 4.1822, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.8609287054409006e-05, | |
| "loss": 4.0959, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.855344858393639e-05, | |
| "loss": 4.0111, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.8497833467345664e-05, | |
| "loss": 3.9325, | |
| "step": 23500 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.844199499687305e-05, | |
| "loss": 3.8522, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 4.838637988028232e-05, | |
| "loss": 3.7721, | |
| "step": 24500 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 4.8330541409809705e-05, | |
| "loss": 3.6958, | |
| "step": 25000 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 4.827492629321898e-05, | |
| "loss": 3.6262, | |
| "step": 25500 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 4.821908782274636e-05, | |
| "loss": 3.5627, | |
| "step": 26000 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 4.816347270615564e-05, | |
| "loss": 3.5099, | |
| "step": 26500 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 4.810763423568302e-05, | |
| "loss": 3.4574, | |
| "step": 27000 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 4.805201911909229e-05, | |
| "loss": 3.4123, | |
| "step": 27500 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 4.799618064861968e-05, | |
| "loss": 3.3626, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 4.794056553202895e-05, | |
| "loss": 3.3279, | |
| "step": 28500 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 4.788472706155634e-05, | |
| "loss": 3.2847, | |
| "step": 29000 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 4.7829111944965605e-05, | |
| "loss": 3.2545, | |
| "step": 29500 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 4.777327347449299e-05, | |
| "loss": 3.2325, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "eval_accuracy": 0.4772067036025076, | |
| "eval_loss": 3.09765625, | |
| "eval_runtime": 1211.4769, | |
| "eval_samples_per_second": 254.529, | |
| "eval_steps_per_second": 3.978, | |
| "step": 30000 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 4.771765835790226e-05, | |
| "loss": 3.1891, | |
| "step": 30500 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 4.7661819887429646e-05, | |
| "loss": 3.1672, | |
| "step": 31000 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 4.760620477083892e-05, | |
| "loss": 3.1438, | |
| "step": 31500 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 4.75503663003663e-05, | |
| "loss": 3.1156, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 4.749475118377558e-05, | |
| "loss": 3.0926, | |
| "step": 32500 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 4.7438912713302956e-05, | |
| "loss": 3.0655, | |
| "step": 33000 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 4.738329759671224e-05, | |
| "loss": 3.0478, | |
| "step": 33500 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 4.7327459126239614e-05, | |
| "loss": 3.0258, | |
| "step": 34000 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 4.727184400964889e-05, | |
| "loss": 2.9978, | |
| "step": 34500 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 4.721600553917627e-05, | |
| "loss": 2.9823, | |
| "step": 35000 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 4.7160390422585546e-05, | |
| "loss": 2.967, | |
| "step": 35500 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 4.710455195211293e-05, | |
| "loss": 2.945, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 4.7048936835522204e-05, | |
| "loss": 2.9286, | |
| "step": 36500 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 4.699309836504959e-05, | |
| "loss": 2.9146, | |
| "step": 37000 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 4.6937483248458856e-05, | |
| "loss": 2.8956, | |
| "step": 37500 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 4.6881644777986246e-05, | |
| "loss": 2.8819, | |
| "step": 38000 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 4.6826029661395514e-05, | |
| "loss": 2.8697, | |
| "step": 38500 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 4.6770191190922904e-05, | |
| "loss": 2.8571, | |
| "step": 39000 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 4.671457607433217e-05, | |
| "loss": 2.8425, | |
| "step": 39500 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 4.665873760385956e-05, | |
| "loss": 2.831, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "eval_accuracy": 0.5223783031240715, | |
| "eval_loss": 2.7265625, | |
| "eval_runtime": 1229.432, | |
| "eval_samples_per_second": 250.812, | |
| "eval_steps_per_second": 3.92, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 4.660312248726883e-05, | |
| "loss": 2.8148, | |
| "step": 40500 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 4.654728401679622e-05, | |
| "loss": 2.8, | |
| "step": 41000 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 4.649166890020549e-05, | |
| "loss": 2.7909, | |
| "step": 41500 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 4.643583042973287e-05, | |
| "loss": 2.7752, | |
| "step": 42000 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 4.6380215313142146e-05, | |
| "loss": 2.7642, | |
| "step": 42500 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 4.632437684266953e-05, | |
| "loss": 2.7494, | |
| "step": 43000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 4.6268761726078804e-05, | |
| "loss": 2.7433, | |
| "step": 43500 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 4.621292325560618e-05, | |
| "loss": 2.7307, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 4.615730813901546e-05, | |
| "loss": 2.7294, | |
| "step": 44500 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 4.610146966854284e-05, | |
| "loss": 2.7154, | |
| "step": 45000 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 4.604585455195212e-05, | |
| "loss": 2.7007, | |
| "step": 45500 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 4.5990016081479496e-05, | |
| "loss": 2.6921, | |
| "step": 46000 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 4.593440096488877e-05, | |
| "loss": 2.6842, | |
| "step": 46500 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 4.5878562494416154e-05, | |
| "loss": 2.6767, | |
| "step": 47000 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 4.582294737782543e-05, | |
| "loss": 2.6696, | |
| "step": 47500 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 4.576710890735281e-05, | |
| "loss": 2.6598, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 4.571149379076208e-05, | |
| "loss": 2.6462, | |
| "step": 48500 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 4.565565532028947e-05, | |
| "loss": 2.6451, | |
| "step": 49000 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 4.560004020369874e-05, | |
| "loss": 2.6378, | |
| "step": 49500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 4.554420173322613e-05, | |
| "loss": 2.6262, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_accuracy": 0.545525046728465, | |
| "eval_loss": 2.537109375, | |
| "eval_runtime": 1265.0645, | |
| "eval_samples_per_second": 243.747, | |
| "eval_steps_per_second": 3.809, | |
| "step": 50000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 4.5488586616635397e-05, | |
| "loss": 2.616, | |
| "step": 50500 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 4.5432748146162787e-05, | |
| "loss": 2.6109, | |
| "step": 51000 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 4.5377133029572055e-05, | |
| "loss": 2.6142, | |
| "step": 51500 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 4.5321294559099445e-05, | |
| "loss": 2.5987, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 4.526567944250871e-05, | |
| "loss": 2.5894, | |
| "step": 52500 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 4.5209840972036096e-05, | |
| "loss": 2.5782, | |
| "step": 53000 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 4.515422585544537e-05, | |
| "loss": 2.5747, | |
| "step": 53500 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 4.5098387384972754e-05, | |
| "loss": 2.5671, | |
| "step": 54000 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 4.504277226838203e-05, | |
| "loss": 2.565, | |
| "step": 54500 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 4.498693379790941e-05, | |
| "loss": 2.5533, | |
| "step": 55000 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 4.493131868131869e-05, | |
| "loss": 2.5541, | |
| "step": 55500 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 4.487548021084606e-05, | |
| "loss": 2.5446, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 4.4819865094255345e-05, | |
| "loss": 2.5328, | |
| "step": 56500 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 4.476402662378272e-05, | |
| "loss": 2.5298, | |
| "step": 57000 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 4.4708411507192e-05, | |
| "loss": 2.5278, | |
| "step": 57500 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 4.465257303671938e-05, | |
| "loss": 2.5095, | |
| "step": 58000 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 4.4596957920128654e-05, | |
| "loss": 2.5112, | |
| "step": 58500 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 4.454111944965604e-05, | |
| "loss": 2.5037, | |
| "step": 59000 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 4.448550433306531e-05, | |
| "loss": 2.5049, | |
| "step": 59500 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 4.4429665862592695e-05, | |
| "loss": 2.5006, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "eval_accuracy": 0.561378824934529, | |
| "eval_loss": 2.4140625, | |
| "eval_runtime": 1237.7775, | |
| "eval_samples_per_second": 249.121, | |
| "eval_steps_per_second": 3.893, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 4.437405074600196e-05, | |
| "loss": 2.4905, | |
| "step": 60500 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 4.431821227552935e-05, | |
| "loss": 2.4854, | |
| "step": 61000 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 4.426259715893862e-05, | |
| "loss": 2.4858, | |
| "step": 61500 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 4.420675868846601e-05, | |
| "loss": 2.472, | |
| "step": 62000 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 4.415114357187528e-05, | |
| "loss": 2.4715, | |
| "step": 62500 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 4.409530510140266e-05, | |
| "loss": 2.4633, | |
| "step": 63000 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 4.403968998481194e-05, | |
| "loss": 2.4549, | |
| "step": 63500 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 4.398385151433932e-05, | |
| "loss": 2.4558, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 4.3928236397748595e-05, | |
| "loss": 2.4529, | |
| "step": 64500 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 4.387239792727598e-05, | |
| "loss": 2.4473, | |
| "step": 65000 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 4.3816782810685253e-05, | |
| "loss": 2.4402, | |
| "step": 65500 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 4.376094434021264e-05, | |
| "loss": 2.441, | |
| "step": 66000 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 4.370532922362191e-05, | |
| "loss": 2.4376, | |
| "step": 66500 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 4.3649490753149295e-05, | |
| "loss": 2.4264, | |
| "step": 67000 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 4.359387563655857e-05, | |
| "loss": 2.4239, | |
| "step": 67500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 4.3538037166085946e-05, | |
| "loss": 2.4179, | |
| "step": 68000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 4.348242204949523e-05, | |
| "loss": 2.4189, | |
| "step": 68500 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 4.3426583579022604e-05, | |
| "loss": 2.416, | |
| "step": 69000 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 4.337096846243188e-05, | |
| "loss": 2.4088, | |
| "step": 69500 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 4.331512999195926e-05, | |
| "loss": 2.4062, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "eval_accuracy": 0.5734350887860105, | |
| "eval_loss": 2.32421875, | |
| "eval_runtime": 1141.7265, | |
| "eval_samples_per_second": 270.079, | |
| "eval_steps_per_second": 4.221, | |
| "step": 70000 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 4.325951487536854e-05, | |
| "loss": 2.4003, | |
| "step": 70500 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 4.320367640489592e-05, | |
| "loss": 2.3966, | |
| "step": 71000 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 4.3148061288305195e-05, | |
| "loss": 2.3965, | |
| "step": 71500 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 4.309222281783258e-05, | |
| "loss": 2.389, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 4.3036607701241846e-05, | |
| "loss": 2.3833, | |
| "step": 72500 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 4.2980769230769236e-05, | |
| "loss": 2.3841, | |
| "step": 73000 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 4.2925154114178504e-05, | |
| "loss": 2.3791, | |
| "step": 73500 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 4.286931564370589e-05, | |
| "loss": 2.378, | |
| "step": 74000 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 4.281370052711516e-05, | |
| "loss": 2.3719, | |
| "step": 74500 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 4.2757862056642545e-05, | |
| "loss": 2.3649, | |
| "step": 75000 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 4.270224694005182e-05, | |
| "loss": 2.3667, | |
| "step": 75500 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 4.2646408469579203e-05, | |
| "loss": 2.3602, | |
| "step": 76000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 4.259079335298848e-05, | |
| "loss": 2.3529, | |
| "step": 76500 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 4.253495488251586e-05, | |
| "loss": 2.3573, | |
| "step": 77000 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 4.2479339765925136e-05, | |
| "loss": 2.3537, | |
| "step": 77500 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 4.242350129545252e-05, | |
| "loss": 2.3471, | |
| "step": 78000 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 4.2367886178861794e-05, | |
| "loss": 2.3454, | |
| "step": 78500 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 4.231204770838918e-05, | |
| "loss": 2.3423, | |
| "step": 79000 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 4.2256432591798446e-05, | |
| "loss": 2.3349, | |
| "step": 79500 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 4.220059412132583e-05, | |
| "loss": 2.3338, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "eval_accuracy": 0.5822560488428162, | |
| "eval_loss": 2.25390625, | |
| "eval_runtime": 1241.5767, | |
| "eval_samples_per_second": 248.358, | |
| "eval_steps_per_second": 3.881, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 4.2144979004735104e-05, | |
| "loss": 2.3318, | |
| "step": 80500 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 4.208914053426249e-05, | |
| "loss": 2.3315, | |
| "step": 81000 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 4.203352541767176e-05, | |
| "loss": 2.3286, | |
| "step": 81500 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 4.1977686947199145e-05, | |
| "loss": 2.3203, | |
| "step": 82000 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 4.192207183060842e-05, | |
| "loss": 2.3236, | |
| "step": 82500 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 4.18662333601358e-05, | |
| "loss": 2.3147, | |
| "step": 83000 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 4.181061824354508e-05, | |
| "loss": 2.3194, | |
| "step": 83500 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 4.1754779773072454e-05, | |
| "loss": 2.3037, | |
| "step": 84000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 4.169916465648173e-05, | |
| "loss": 2.3121, | |
| "step": 84500 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 4.164332618600911e-05, | |
| "loss": 2.302, | |
| "step": 85000 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 4.158771106941839e-05, | |
| "loss": 2.3057, | |
| "step": 85500 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 4.153187259894577e-05, | |
| "loss": 2.2998, | |
| "step": 86000 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 4.1476257482355045e-05, | |
| "loss": 2.3006, | |
| "step": 86500 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 4.142041901188243e-05, | |
| "loss": 2.2962, | |
| "step": 87000 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 4.13648038952917e-05, | |
| "loss": 2.2941, | |
| "step": 87500 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 4.1308965424819086e-05, | |
| "loss": 2.2915, | |
| "step": 88000 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 4.125335030822836e-05, | |
| "loss": 2.2857, | |
| "step": 88500 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 4.1197511837755744e-05, | |
| "loss": 2.2876, | |
| "step": 89000 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 4.114189672116502e-05, | |
| "loss": 2.2776, | |
| "step": 89500 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 4.10860582506924e-05, | |
| "loss": 2.2838, | |
| "step": 90000 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "eval_accuracy": 0.5894123096001136, | |
| "eval_loss": 2.201171875, | |
| "eval_runtime": 1183.2684, | |
| "eval_samples_per_second": 260.597, | |
| "eval_steps_per_second": 4.073, | |
| "step": 90000 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 4.103044313410167e-05, | |
| "loss": 2.2785, | |
| "step": 90500 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 4.097460466362906e-05, | |
| "loss": 2.2731, | |
| "step": 91000 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 4.091898954703833e-05, | |
| "loss": 2.2686, | |
| "step": 91500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 4.086315107656571e-05, | |
| "loss": 2.2635, | |
| "step": 92000 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 4.0807535959974986e-05, | |
| "loss": 2.2623, | |
| "step": 92500 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 4.075169748950237e-05, | |
| "loss": 2.2522, | |
| "step": 93000 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 4.0696082372911645e-05, | |
| "loss": 2.2606, | |
| "step": 93500 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 4.064024390243902e-05, | |
| "loss": 2.2587, | |
| "step": 94000 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 4.05846287858483e-05, | |
| "loss": 2.258, | |
| "step": 94500 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 4.052879031537568e-05, | |
| "loss": 2.2499, | |
| "step": 95000 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 4.047317519878496e-05, | |
| "loss": 2.2513, | |
| "step": 95500 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 4.041733672831234e-05, | |
| "loss": 2.2464, | |
| "step": 96000 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 4.036172161172161e-05, | |
| "loss": 2.2443, | |
| "step": 96500 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 4.0305883141248995e-05, | |
| "loss": 2.2454, | |
| "step": 97000 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 4.025026802465827e-05, | |
| "loss": 2.2452, | |
| "step": 97500 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 4.019442955418565e-05, | |
| "loss": 2.2395, | |
| "step": 98000 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 4.013881443759493e-05, | |
| "loss": 2.2376, | |
| "step": 98500 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 4.008297596712231e-05, | |
| "loss": 2.2295, | |
| "step": 99000 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 4.0027360850531586e-05, | |
| "loss": 2.2414, | |
| "step": 99500 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 3.997152238005897e-05, | |
| "loss": 2.231, | |
| "step": 100000 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "eval_accuracy": 0.5959185956012876, | |
| "eval_loss": 2.150390625, | |
| "eval_runtime": 1176.5848, | |
| "eval_samples_per_second": 262.077, | |
| "eval_steps_per_second": 4.096, | |
| "step": 100000 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 3.991590726346824e-05, | |
| "loss": 2.2314, | |
| "step": 100500 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 3.986006879299563e-05, | |
| "loss": 2.2268, | |
| "step": 101000 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 3.9804453676404895e-05, | |
| "loss": 2.231, | |
| "step": 101500 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 3.9748615205932285e-05, | |
| "loss": 2.2243, | |
| "step": 102000 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 3.969300008934155e-05, | |
| "loss": 2.2207, | |
| "step": 102500 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 3.963716161886894e-05, | |
| "loss": 2.2192, | |
| "step": 103000 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 3.958154650227821e-05, | |
| "loss": 2.2114, | |
| "step": 103500 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 3.9525708031805594e-05, | |
| "loss": 2.2099, | |
| "step": 104000 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 3.947009291521487e-05, | |
| "loss": 2.2115, | |
| "step": 104500 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 3.9414254444742246e-05, | |
| "loss": 2.2105, | |
| "step": 105000 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 3.935863932815153e-05, | |
| "loss": 2.2079, | |
| "step": 105500 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 3.9302800857678904e-05, | |
| "loss": 2.2013, | |
| "step": 106000 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 3.9247185741088185e-05, | |
| "loss": 2.2021, | |
| "step": 106500 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 3.919134727061556e-05, | |
| "loss": 2.2035, | |
| "step": 107000 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 3.9135732154024843e-05, | |
| "loss": 2.1966, | |
| "step": 107500 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 3.907989368355222e-05, | |
| "loss": 2.2014, | |
| "step": 108000 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 3.9024278566961495e-05, | |
| "loss": 2.1986, | |
| "step": 108500 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 3.896844009648888e-05, | |
| "loss": 2.1917, | |
| "step": 109000 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 3.891282497989815e-05, | |
| "loss": 2.1919, | |
| "step": 109500 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 3.8856986509425536e-05, | |
| "loss": 2.1903, | |
| "step": 110000 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "eval_accuracy": 0.6009023831034535, | |
| "eval_loss": 2.11328125, | |
| "eval_runtime": 1234.1136, | |
| "eval_samples_per_second": 249.86, | |
| "eval_steps_per_second": 3.905, | |
| "step": 110000 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 3.8801371392834804e-05, | |
| "loss": 2.189, | |
| "step": 110500 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 3.8745532922362194e-05, | |
| "loss": 2.1858, | |
| "step": 111000 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 3.868991780577146e-05, | |
| "loss": 2.1867, | |
| "step": 111500 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 3.863407933529885e-05, | |
| "loss": 2.1871, | |
| "step": 112000 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 3.857846421870812e-05, | |
| "loss": 2.1823, | |
| "step": 112500 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 3.852262574823551e-05, | |
| "loss": 2.1827, | |
| "step": 113000 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 3.846701063164478e-05, | |
| "loss": 2.1797, | |
| "step": 113500 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 3.841117216117217e-05, | |
| "loss": 2.1761, | |
| "step": 114000 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 3.8355557044581436e-05, | |
| "loss": 2.1804, | |
| "step": 114500 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 3.829971857410882e-05, | |
| "loss": 2.1727, | |
| "step": 115000 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 3.8244103457518094e-05, | |
| "loss": 2.1697, | |
| "step": 115500 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 3.818826498704548e-05, | |
| "loss": 2.1735, | |
| "step": 116000 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 3.813264987045475e-05, | |
| "loss": 2.1701, | |
| "step": 116500 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 3.807681139998213e-05, | |
| "loss": 2.1679, | |
| "step": 117000 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 3.802119628339141e-05, | |
| "loss": 2.1651, | |
| "step": 117500 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 3.7965357812918787e-05, | |
| "loss": 2.1638, | |
| "step": 118000 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 3.790974269632807e-05, | |
| "loss": 2.1653, | |
| "step": 118500 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 3.7853904225855445e-05, | |
| "loss": 2.1604, | |
| "step": 119000 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 3.7798289109264726e-05, | |
| "loss": 2.1616, | |
| "step": 119500 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 3.77424506387921e-05, | |
| "loss": 2.1594, | |
| "step": 120000 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "eval_accuracy": 0.6053601315304664, | |
| "eval_loss": 2.080078125, | |
| "eval_runtime": 1170.3916, | |
| "eval_samples_per_second": 263.464, | |
| "eval_steps_per_second": 4.117, | |
| "step": 120000 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 3.768683552220138e-05, | |
| "loss": 2.1564, | |
| "step": 120500 | |
| }, | |
| { | |
| "epoch": 1.32, | |
| "learning_rate": 3.763099705172876e-05, | |
| "loss": 2.1504, | |
| "step": 121000 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 3.757538193513803e-05, | |
| "loss": 2.1511, | |
| "step": 121500 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 3.751954346466542e-05, | |
| "loss": 2.1462, | |
| "step": 122000 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 3.746392834807469e-05, | |
| "loss": 2.1409, | |
| "step": 122500 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 3.740808987760208e-05, | |
| "loss": 2.1509, | |
| "step": 123000 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 3.7352474761011345e-05, | |
| "loss": 2.1453, | |
| "step": 123500 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 3.7296636290538735e-05, | |
| "loss": 2.1465, | |
| "step": 124000 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 3.7241021173948e-05, | |
| "loss": 2.141, | |
| "step": 124500 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 3.718518270347539e-05, | |
| "loss": 2.144, | |
| "step": 125000 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 3.712956758688466e-05, | |
| "loss": 2.1372, | |
| "step": 125500 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 3.7073729116412044e-05, | |
| "loss": 2.14, | |
| "step": 126000 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 3.701811399982132e-05, | |
| "loss": 2.1332, | |
| "step": 126500 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 3.69622755293487e-05, | |
| "loss": 2.1398, | |
| "step": 127000 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 3.690666041275798e-05, | |
| "loss": 2.13, | |
| "step": 127500 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 3.685082194228536e-05, | |
| "loss": 2.1305, | |
| "step": 128000 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 3.6795206825694635e-05, | |
| "loss": 2.1296, | |
| "step": 128500 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 3.673936835522201e-05, | |
| "loss": 2.1309, | |
| "step": 129000 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 3.668375323863129e-05, | |
| "loss": 2.1274, | |
| "step": 129500 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 3.662791476815867e-05, | |
| "loss": 2.1307, | |
| "step": 130000 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "eval_accuracy": 0.609508441276654, | |
| "eval_loss": 2.048828125, | |
| "eval_runtime": 1327.7196, | |
| "eval_samples_per_second": 232.245, | |
| "eval_steps_per_second": 3.63, | |
| "step": 130000 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 3.657229965156795e-05, | |
| "loss": 2.123, | |
| "step": 130500 | |
| }, | |
| { | |
| "epoch": 1.43, | |
| "learning_rate": 3.651646118109533e-05, | |
| "loss": 2.1239, | |
| "step": 131000 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 3.64608460645046e-05, | |
| "loss": 2.1248, | |
| "step": 131500 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 3.6405007594031985e-05, | |
| "loss": 2.1189, | |
| "step": 132000 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 3.634939247744126e-05, | |
| "loss": 2.1214, | |
| "step": 132500 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 3.6293554006968643e-05, | |
| "loss": 2.1213, | |
| "step": 133000 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 3.623793889037791e-05, | |
| "loss": 2.1205, | |
| "step": 133500 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 3.61821004199053e-05, | |
| "loss": 2.1236, | |
| "step": 134000 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 3.612648530331457e-05, | |
| "loss": 2.1142, | |
| "step": 134500 | |
| }, | |
| { | |
| "epoch": 1.47, | |
| "learning_rate": 3.607064683284196e-05, | |
| "loss": 2.1223, | |
| "step": 135000 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 3.601503171625123e-05, | |
| "loss": 2.11, | |
| "step": 135500 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 3.595919324577861e-05, | |
| "loss": 2.1085, | |
| "step": 136000 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 3.5903578129187886e-05, | |
| "loss": 2.1053, | |
| "step": 136500 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 3.584773965871527e-05, | |
| "loss": 2.1034, | |
| "step": 137000 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 3.5792124542124544e-05, | |
| "loss": 2.1058, | |
| "step": 137500 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 3.573628607165193e-05, | |
| "loss": 2.1057, | |
| "step": 138000 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 3.56806709550612e-05, | |
| "loss": 2.1048, | |
| "step": 138500 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 3.5624832484588585e-05, | |
| "loss": 2.1043, | |
| "step": 139000 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 3.556921736799786e-05, | |
| "loss": 2.1041, | |
| "step": 139500 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 3.551337889752524e-05, | |
| "loss": 2.0948, | |
| "step": 140000 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "eval_accuracy": 0.6133498725939791, | |
| "eval_loss": 2.0234375, | |
| "eval_runtime": 1220.9619, | |
| "eval_samples_per_second": 252.552, | |
| "eval_steps_per_second": 3.947, | |
| "step": 140000 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 3.545776378093452e-05, | |
| "loss": 2.0964, | |
| "step": 140500 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 3.5401925310461894e-05, | |
| "loss": 2.1022, | |
| "step": 141000 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 3.5346310193871176e-05, | |
| "loss": 2.0929, | |
| "step": 141500 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 3.529047172339855e-05, | |
| "loss": 2.0938, | |
| "step": 142000 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 3.523485660680783e-05, | |
| "loss": 2.0947, | |
| "step": 142500 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 3.517901813633521e-05, | |
| "loss": 2.0924, | |
| "step": 143000 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 3.5123403019744485e-05, | |
| "loss": 2.096, | |
| "step": 143500 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 3.506756454927187e-05, | |
| "loss": 2.0918, | |
| "step": 144000 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 3.501194943268114e-05, | |
| "loss": 2.0927, | |
| "step": 144500 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 3.4956110962208526e-05, | |
| "loss": 2.0903, | |
| "step": 145000 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 3.4900495845617794e-05, | |
| "loss": 2.0883, | |
| "step": 145500 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 3.4844657375145184e-05, | |
| "loss": 2.0866, | |
| "step": 146000 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 3.478904225855445e-05, | |
| "loss": 2.0836, | |
| "step": 146500 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 3.4733203788081836e-05, | |
| "loss": 2.0867, | |
| "step": 147000 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 3.467758867149111e-05, | |
| "loss": 2.0763, | |
| "step": 147500 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 3.4621750201018494e-05, | |
| "loss": 2.0821, | |
| "step": 148000 | |
| }, | |
| { | |
| "epoch": 1.62, | |
| "learning_rate": 3.456613508442777e-05, | |
| "loss": 2.0852, | |
| "step": 148500 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 3.451029661395515e-05, | |
| "loss": 2.081, | |
| "step": 149000 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 3.4454681497364427e-05, | |
| "loss": 2.0729, | |
| "step": 149500 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 3.439884302689181e-05, | |
| "loss": 2.0748, | |
| "step": 150000 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "eval_accuracy": 0.6168509909854293, | |
| "eval_loss": 1.998046875, | |
| "eval_runtime": 1143.5239, | |
| "eval_samples_per_second": 269.654, | |
| "eval_steps_per_second": 4.214, | |
| "step": 150000 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 3.4343227910301085e-05, | |
| "loss": 2.0759, | |
| "step": 150500 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 3.428738943982847e-05, | |
| "loss": 2.0725, | |
| "step": 151000 | |
| }, | |
| { | |
| "epoch": 1.65, | |
| "learning_rate": 3.423177432323774e-05, | |
| "loss": 2.0669, | |
| "step": 151500 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 3.4175935852765126e-05, | |
| "loss": 2.0715, | |
| "step": 152000 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 3.4120320736174394e-05, | |
| "loss": 2.0689, | |
| "step": 152500 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 3.406448226570178e-05, | |
| "loss": 2.0695, | |
| "step": 153000 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 3.400886714911105e-05, | |
| "loss": 2.0675, | |
| "step": 153500 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 3.3953028678638435e-05, | |
| "loss": 2.0664, | |
| "step": 154000 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 3.389741356204771e-05, | |
| "loss": 2.0692, | |
| "step": 154500 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 3.384157509157509e-05, | |
| "loss": 2.0679, | |
| "step": 155000 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 3.378595997498437e-05, | |
| "loss": 2.0619, | |
| "step": 155500 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 3.373012150451175e-05, | |
| "loss": 2.0656, | |
| "step": 156000 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 3.3674506387921026e-05, | |
| "loss": 2.0608, | |
| "step": 156500 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 3.36186679174484e-05, | |
| "loss": 2.056, | |
| "step": 157000 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 3.356305280085768e-05, | |
| "loss": 2.0616, | |
| "step": 157500 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 3.350721433038506e-05, | |
| "loss": 2.0541, | |
| "step": 158000 | |
| }, | |
| { | |
| "epoch": 1.73, | |
| "learning_rate": 3.3451599213794335e-05, | |
| "loss": 2.0608, | |
| "step": 158500 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 3.339576074332172e-05, | |
| "loss": 2.0552, | |
| "step": 159000 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 3.334014562673099e-05, | |
| "loss": 2.0551, | |
| "step": 159500 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 3.3284307156258376e-05, | |
| "loss": 2.0572, | |
| "step": 160000 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "eval_accuracy": 0.6195138509619146, | |
| "eval_loss": 1.9755859375, | |
| "eval_runtime": 1218.1327, | |
| "eval_samples_per_second": 253.138, | |
| "eval_steps_per_second": 3.956, | |
| "step": 160000 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 3.322869203966765e-05, | |
| "loss": 2.0571, | |
| "step": 160500 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 3.3172853569195035e-05, | |
| "loss": 2.0501, | |
| "step": 161000 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 3.311723845260431e-05, | |
| "loss": 2.0531, | |
| "step": 161500 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 3.306139998213169e-05, | |
| "loss": 2.0503, | |
| "step": 162000 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 3.300578486554097e-05, | |
| "loss": 2.0486, | |
| "step": 162500 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 3.294994639506835e-05, | |
| "loss": 2.0453, | |
| "step": 163000 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 3.289433127847762e-05, | |
| "loss": 2.0495, | |
| "step": 163500 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 3.283849280800501e-05, | |
| "loss": 2.0512, | |
| "step": 164000 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 3.278287769141428e-05, | |
| "loss": 2.0421, | |
| "step": 164500 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 3.272703922094166e-05, | |
| "loss": 2.0428, | |
| "step": 165000 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 3.2671424104350935e-05, | |
| "loss": 2.0441, | |
| "step": 165500 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 3.261558563387832e-05, | |
| "loss": 2.0437, | |
| "step": 166000 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 3.255997051728759e-05, | |
| "loss": 2.0416, | |
| "step": 166500 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 3.2504132046814976e-05, | |
| "loss": 2.0395, | |
| "step": 167000 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 3.244851693022425e-05, | |
| "loss": 2.0431, | |
| "step": 167500 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 3.239267845975163e-05, | |
| "loss": 2.0385, | |
| "step": 168000 | |
| }, | |
| { | |
| "epoch": 1.84, | |
| "learning_rate": 3.233706334316091e-05, | |
| "loss": 2.0385, | |
| "step": 168500 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 3.2281224872688285e-05, | |
| "loss": 2.0369, | |
| "step": 169000 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 3.222560975609756e-05, | |
| "loss": 2.0395, | |
| "step": 169500 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 3.216977128562494e-05, | |
| "loss": 2.0359, | |
| "step": 170000 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "eval_accuracy": 0.6225490703978083, | |
| "eval_loss": 1.955078125, | |
| "eval_runtime": 1231.0639, | |
| "eval_samples_per_second": 250.479, | |
| "eval_steps_per_second": 3.915, | |
| "step": 170000 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 3.211415616903422e-05, | |
| "loss": 2.0289, | |
| "step": 170500 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 3.20583176985616e-05, | |
| "loss": 2.0304, | |
| "step": 171000 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 3.2002702581970876e-05, | |
| "loss": 2.0313, | |
| "step": 171500 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 3.194686411149826e-05, | |
| "loss": 2.0323, | |
| "step": 172000 | |
| }, | |
| { | |
| "epoch": 1.88, | |
| "learning_rate": 3.1891248994907534e-05, | |
| "loss": 2.0289, | |
| "step": 172500 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 3.183541052443492e-05, | |
| "loss": 2.0291, | |
| "step": 173000 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 3.1779795407844185e-05, | |
| "loss": 2.0292, | |
| "step": 173500 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 3.1723956937371575e-05, | |
| "loss": 2.0294, | |
| "step": 174000 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 3.1668341820780843e-05, | |
| "loss": 2.0227, | |
| "step": 174500 | |
| }, | |
| { | |
| "epoch": 1.91, | |
| "learning_rate": 3.1612503350308233e-05, | |
| "loss": 2.0217, | |
| "step": 175000 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 3.15568882337175e-05, | |
| "loss": 2.0263, | |
| "step": 175500 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 3.150104976324489e-05, | |
| "loss": 2.0237, | |
| "step": 176000 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 3.144543464665416e-05, | |
| "loss": 2.0225, | |
| "step": 176500 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 3.138959617618154e-05, | |
| "loss": 2.0178, | |
| "step": 177000 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 3.133398105959082e-05, | |
| "loss": 2.0148, | |
| "step": 177500 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 3.12781425891182e-05, | |
| "loss": 2.0175, | |
| "step": 178000 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 3.1222527472527476e-05, | |
| "loss": 2.0107, | |
| "step": 178500 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 3.116668900205485e-05, | |
| "loss": 2.0172, | |
| "step": 179000 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 3.1111073885464134e-05, | |
| "loss": 2.0201, | |
| "step": 179500 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 3.105523541499151e-05, | |
| "loss": 2.0148, | |
| "step": 180000 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "eval_accuracy": 0.6250524947954239, | |
| "eval_loss": 1.9384765625, | |
| "eval_runtime": 1280.5256, | |
| "eval_samples_per_second": 240.804, | |
| "eval_steps_per_second": 3.763, | |
| "step": 180000 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 3.099962029840079e-05, | |
| "loss": 2.0144, | |
| "step": 180500 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 3.094378182792817e-05, | |
| "loss": 2.0074, | |
| "step": 181000 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 3.088816671133744e-05, | |
| "loss": 2.0143, | |
| "step": 181500 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 3.0832328240864826e-05, | |
| "loss": 2.0134, | |
| "step": 182000 | |
| }, | |
| { | |
| "epoch": 1.99, | |
| "learning_rate": 3.07767131242741e-05, | |
| "loss": 2.0085, | |
| "step": 182500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 3.0720874653801484e-05, | |
| "loss": 2.0101, | |
| "step": 183000 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 3.066525953721076e-05, | |
| "loss": 2.004, | |
| "step": 183500 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "learning_rate": 3.060942106673814e-05, | |
| "loss": 2.0083, | |
| "step": 184000 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 3.055380595014741e-05, | |
| "loss": 2.0067, | |
| "step": 184500 | |
| }, | |
| { | |
| "epoch": 2.02, | |
| "learning_rate": 3.04979674796748e-05, | |
| "loss": 2.0051, | |
| "step": 185000 | |
| }, | |
| { | |
| "epoch": 2.03, | |
| "learning_rate": 3.044235236308407e-05, | |
| "loss": 2.0021, | |
| "step": 185500 | |
| }, | |
| { | |
| "epoch": 2.03, | |
| "learning_rate": 3.0386513892611458e-05, | |
| "loss": 2.0043, | |
| "step": 186000 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 3.0330898776020726e-05, | |
| "loss": 2.0063, | |
| "step": 186500 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 3.0275060305548113e-05, | |
| "loss": 1.9993, | |
| "step": 187000 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 3.0219445188957384e-05, | |
| "loss": 1.9985, | |
| "step": 187500 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 3.0163606718484767e-05, | |
| "loss": 1.998, | |
| "step": 188000 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "learning_rate": 3.0107991601894042e-05, | |
| "loss": 1.9991, | |
| "step": 188500 | |
| }, | |
| { | |
| "epoch": 2.06, | |
| "learning_rate": 3.0052153131421422e-05, | |
| "loss": 1.9955, | |
| "step": 189000 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 2.99965380148307e-05, | |
| "loss": 1.9956, | |
| "step": 189500 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 2.994069954435808e-05, | |
| "loss": 1.9994, | |
| "step": 190000 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "eval_accuracy": 0.6274383613077523, | |
| "eval_loss": 1.921875, | |
| "eval_runtime": 1238.0942, | |
| "eval_samples_per_second": 249.057, | |
| "eval_steps_per_second": 3.892, | |
| "step": 190000 | |
| }, | |
| { | |
| "epoch": 2.08, | |
| "learning_rate": 2.988508442776736e-05, | |
| "loss": 1.9938, | |
| "step": 190500 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 2.9829245957294738e-05, | |
| "loss": 1.9881, | |
| "step": 191000 | |
| }, | |
| { | |
| "epoch": 2.09, | |
| "learning_rate": 2.9773630840704013e-05, | |
| "loss": 1.9959, | |
| "step": 191500 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 2.9717792370231396e-05, | |
| "loss": 1.9872, | |
| "step": 192000 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 2.966217725364067e-05, | |
| "loss": 1.9908, | |
| "step": 192500 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 2.9606338783168054e-05, | |
| "loss": 1.9894, | |
| "step": 193000 | |
| }, | |
| { | |
| "epoch": 2.11, | |
| "learning_rate": 2.955072366657733e-05, | |
| "loss": 1.9939, | |
| "step": 193500 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 2.949488519610471e-05, | |
| "loss": 1.995, | |
| "step": 194000 | |
| }, | |
| { | |
| "epoch": 2.12, | |
| "learning_rate": 2.943927007951398e-05, | |
| "loss": 1.9915, | |
| "step": 194500 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "learning_rate": 2.9383431609041367e-05, | |
| "loss": 1.9917, | |
| "step": 195000 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "learning_rate": 2.932781649245064e-05, | |
| "loss": 1.9832, | |
| "step": 195500 | |
| }, | |
| { | |
| "epoch": 2.14, | |
| "learning_rate": 2.9271978021978025e-05, | |
| "loss": 1.9897, | |
| "step": 196000 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 2.9216362905387296e-05, | |
| "loss": 1.9838, | |
| "step": 196500 | |
| }, | |
| { | |
| "epoch": 2.15, | |
| "learning_rate": 2.9160524434914683e-05, | |
| "loss": 1.986, | |
| "step": 197000 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 2.9104909318323954e-05, | |
| "loss": 1.9864, | |
| "step": 197500 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 2.904907084785134e-05, | |
| "loss": 1.9837, | |
| "step": 198000 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 2.899345573126061e-05, | |
| "loss": 1.9817, | |
| "step": 198500 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 2.8937617260787992e-05, | |
| "loss": 1.9793, | |
| "step": 199000 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 2.8882002144197267e-05, | |
| "loss": 1.9817, | |
| "step": 199500 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "learning_rate": 2.882616367372465e-05, | |
| "loss": 1.9769, | |
| "step": 200000 | |
| }, | |
| { | |
| "epoch": 2.18, | |
| "eval_accuracy": 0.629700250907056, | |
| "eval_loss": 1.904296875, | |
| "eval_runtime": 1276.1988, | |
| "eval_samples_per_second": 241.621, | |
| "eval_steps_per_second": 3.776, | |
| "step": 200000 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "learning_rate": 2.8770548557133925e-05, | |
| "loss": 1.9835, | |
| "step": 200500 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 2.8714710086661305e-05, | |
| "loss": 1.9843, | |
| "step": 201000 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "learning_rate": 2.8659094970070583e-05, | |
| "loss": 1.9766, | |
| "step": 201500 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 2.8603256499597963e-05, | |
| "loss": 1.9834, | |
| "step": 202000 | |
| }, | |
| { | |
| "epoch": 2.21, | |
| "learning_rate": 2.854764138300724e-05, | |
| "loss": 1.9767, | |
| "step": 202500 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 2.849180291253462e-05, | |
| "loss": 1.9767, | |
| "step": 203000 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 2.8436187795943896e-05, | |
| "loss": 1.975, | |
| "step": 203500 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "learning_rate": 2.838034932547128e-05, | |
| "loss": 1.9758, | |
| "step": 204000 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "learning_rate": 2.832473420888055e-05, | |
| "loss": 1.9763, | |
| "step": 204500 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 2.8268895738407937e-05, | |
| "loss": 1.969, | |
| "step": 205000 | |
| }, | |
| { | |
| "epoch": 2.24, | |
| "learning_rate": 2.8213280621817205e-05, | |
| "loss": 1.9721, | |
| "step": 205500 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "learning_rate": 2.8157442151344592e-05, | |
| "loss": 1.9696, | |
| "step": 206000 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 2.8101827034753863e-05, | |
| "loss": 1.9675, | |
| "step": 206500 | |
| }, | |
| { | |
| "epoch": 2.26, | |
| "learning_rate": 2.804598856428125e-05, | |
| "loss": 1.9738, | |
| "step": 207000 | |
| }, | |
| { | |
| "epoch": 2.27, | |
| "learning_rate": 2.799037344769052e-05, | |
| "loss": 1.9686, | |
| "step": 207500 | |
| }, | |
| { | |
| "epoch": 2.27, | |
| "learning_rate": 2.7934534977217908e-05, | |
| "loss": 1.9699, | |
| "step": 208000 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 2.787891986062718e-05, | |
| "loss": 1.9712, | |
| "step": 208500 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 2.782308139015456e-05, | |
| "loss": 1.9679, | |
| "step": 209000 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "learning_rate": 2.7767466273563837e-05, | |
| "loss": 1.9679, | |
| "step": 209500 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "learning_rate": 2.7711627803091217e-05, | |
| "loss": 1.9705, | |
| "step": 210000 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "eval_accuracy": 0.6316547044841081, | |
| "eval_loss": 1.8916015625, | |
| "eval_runtime": 1400.444, | |
| "eval_samples_per_second": 220.184, | |
| "eval_steps_per_second": 3.441, | |
| "step": 210000 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "learning_rate": 2.7656012686500492e-05, | |
| "loss": 1.9671, | |
| "step": 210500 | |
| }, | |
| { | |
| "epoch": 2.3, | |
| "learning_rate": 2.7600174216027875e-05, | |
| "loss": 1.9677, | |
| "step": 211000 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 2.754455909943715e-05, | |
| "loss": 1.9629, | |
| "step": 211500 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 2.7488720628964533e-05, | |
| "loss": 1.9631, | |
| "step": 212000 | |
| }, | |
| { | |
| "epoch": 2.32, | |
| "learning_rate": 2.7433105512373808e-05, | |
| "loss": 1.967, | |
| "step": 212500 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 2.7377267041901188e-05, | |
| "loss": 1.9629, | |
| "step": 213000 | |
| }, | |
| { | |
| "epoch": 2.33, | |
| "learning_rate": 2.7321651925310466e-05, | |
| "loss": 1.9636, | |
| "step": 213500 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 2.7265813454837846e-05, | |
| "loss": 1.9572, | |
| "step": 214000 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 2.7210198338247124e-05, | |
| "loss": 1.9611, | |
| "step": 214500 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 2.7154359867774504e-05, | |
| "loss": 1.9565, | |
| "step": 215000 | |
| }, | |
| { | |
| "epoch": 2.35, | |
| "learning_rate": 2.7098744751183775e-05, | |
| "loss": 1.9583, | |
| "step": 215500 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "learning_rate": 2.7042906280711162e-05, | |
| "loss": 1.9562, | |
| "step": 216000 | |
| }, | |
| { | |
| "epoch": 2.36, | |
| "learning_rate": 2.6987291164120433e-05, | |
| "loss": 1.9561, | |
| "step": 216500 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 2.693145269364782e-05, | |
| "loss": 1.9557, | |
| "step": 217000 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 2.6875837577057088e-05, | |
| "loss": 1.9502, | |
| "step": 217500 | |
| }, | |
| { | |
| "epoch": 2.38, | |
| "learning_rate": 2.6819999106584475e-05, | |
| "loss": 1.9551, | |
| "step": 218000 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 2.6764383989993746e-05, | |
| "loss": 1.9553, | |
| "step": 218500 | |
| }, | |
| { | |
| "epoch": 2.39, | |
| "learning_rate": 2.6708545519521133e-05, | |
| "loss": 1.9523, | |
| "step": 219000 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 2.6652930402930404e-05, | |
| "loss": 1.9503, | |
| "step": 219500 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 2.6597091932457784e-05, | |
| "loss": 1.9557, | |
| "step": 220000 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "eval_accuracy": 0.6337785581735893, | |
| "eval_loss": 1.8779296875, | |
| "eval_runtime": 1159.2961, | |
| "eval_samples_per_second": 265.986, | |
| "eval_steps_per_second": 4.157, | |
| "step": 220000 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "learning_rate": 2.6541476815867062e-05, | |
| "loss": 1.9482, | |
| "step": 220500 | |
| }, | |
| { | |
| "epoch": 2.41, | |
| "learning_rate": 2.6485638345394442e-05, | |
| "loss": 1.9543, | |
| "step": 221000 | |
| }, | |
| { | |
| "epoch": 2.42, | |
| "learning_rate": 2.643002322880372e-05, | |
| "loss": 1.9518, | |
| "step": 221500 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 2.63741847583311e-05, | |
| "loss": 1.9449, | |
| "step": 222000 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 2.6318569641740375e-05, | |
| "loss": 1.9454, | |
| "step": 222500 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 2.6262731171267758e-05, | |
| "loss": 1.9453, | |
| "step": 223000 | |
| }, | |
| { | |
| "epoch": 2.44, | |
| "learning_rate": 2.6207116054677033e-05, | |
| "loss": 1.9497, | |
| "step": 223500 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 2.6151277584204416e-05, | |
| "loss": 1.9492, | |
| "step": 224000 | |
| }, | |
| { | |
| "epoch": 2.45, | |
| "learning_rate": 2.609566246761369e-05, | |
| "loss": 1.9471, | |
| "step": 224500 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 2.603982399714107e-05, | |
| "loss": 1.9475, | |
| "step": 225000 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 2.5984208880550342e-05, | |
| "loss": 1.9405, | |
| "step": 225500 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "learning_rate": 2.592837041007773e-05, | |
| "loss": 1.9433, | |
| "step": 226000 | |
| }, | |
| { | |
| "epoch": 2.47, | |
| "learning_rate": 2.5872755293487e-05, | |
| "loss": 1.9449, | |
| "step": 226500 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "learning_rate": 2.5816916823014387e-05, | |
| "loss": 1.9424, | |
| "step": 227000 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 2.5761301706423658e-05, | |
| "loss": 1.9457, | |
| "step": 227500 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 2.5705463235951045e-05, | |
| "loss": 1.9398, | |
| "step": 228000 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 2.5649848119360316e-05, | |
| "loss": 1.9355, | |
| "step": 228500 | |
| }, | |
| { | |
| "epoch": 2.5, | |
| "learning_rate": 2.5594009648887703e-05, | |
| "loss": 1.9387, | |
| "step": 229000 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 2.553839453229697e-05, | |
| "loss": 1.9432, | |
| "step": 229500 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 2.5482556061824354e-05, | |
| "loss": 1.9407, | |
| "step": 230000 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "eval_accuracy": 0.6353719982942662, | |
| "eval_loss": 1.8642578125, | |
| "eval_runtime": 1486.9925, | |
| "eval_samples_per_second": 207.369, | |
| "eval_steps_per_second": 3.241, | |
| "step": 230000 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 2.542694094523363e-05, | |
| "loss": 1.9397, | |
| "step": 230500 | |
| }, | |
| { | |
| "epoch": 2.52, | |
| "learning_rate": 2.5371102474761012e-05, | |
| "loss": 1.9367, | |
| "step": 231000 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 2.5315487358170287e-05, | |
| "loss": 1.9436, | |
| "step": 231500 | |
| }, | |
| { | |
| "epoch": 2.53, | |
| "learning_rate": 2.5259648887697667e-05, | |
| "loss": 1.9399, | |
| "step": 232000 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "learning_rate": 2.5204033771106945e-05, | |
| "loss": 1.9365, | |
| "step": 232500 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 2.5148195300634325e-05, | |
| "loss": 1.9371, | |
| "step": 233000 | |
| }, | |
| { | |
| "epoch": 2.55, | |
| "learning_rate": 2.5092580184043603e-05, | |
| "loss": 1.9315, | |
| "step": 233500 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 2.5036741713570983e-05, | |
| "loss": 1.9344, | |
| "step": 234000 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 2.4981126596980258e-05, | |
| "loss": 1.9403, | |
| "step": 234500 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 2.492528812650764e-05, | |
| "loss": 1.9395, | |
| "step": 235000 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 2.4869673009916912e-05, | |
| "loss": 1.938, | |
| "step": 235500 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 2.48138345394443e-05, | |
| "loss": 1.9306, | |
| "step": 236000 | |
| }, | |
| { | |
| "epoch": 2.58, | |
| "learning_rate": 2.475821942285357e-05, | |
| "loss": 1.93, | |
| "step": 236500 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "learning_rate": 2.4702380952380953e-05, | |
| "loss": 1.9341, | |
| "step": 237000 | |
| }, | |
| { | |
| "epoch": 2.59, | |
| "learning_rate": 2.4646765835790228e-05, | |
| "loss": 1.9329, | |
| "step": 237500 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 2.4590927365317608e-05, | |
| "loss": 1.9327, | |
| "step": 238000 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 2.4535312248726883e-05, | |
| "loss": 1.9281, | |
| "step": 238500 | |
| }, | |
| { | |
| "epoch": 2.61, | |
| "learning_rate": 2.4479473778254266e-05, | |
| "loss": 1.9326, | |
| "step": 239000 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 2.442385866166354e-05, | |
| "loss": 1.927, | |
| "step": 239500 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "learning_rate": 2.4368020191190924e-05, | |
| "loss": 1.9307, | |
| "step": 240000 | |
| }, | |
| { | |
| "epoch": 2.62, | |
| "eval_accuracy": 0.6371893215095429, | |
| "eval_loss": 1.8525390625, | |
| "eval_runtime": 1314.8093, | |
| "eval_samples_per_second": 234.525, | |
| "eval_steps_per_second": 3.665, | |
| "step": 240000 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "learning_rate": 2.43124050746002e-05, | |
| "loss": 1.9308, | |
| "step": 240500 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "learning_rate": 2.4256566604127582e-05, | |
| "loss": 1.9299, | |
| "step": 241000 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 2.4200951487536854e-05, | |
| "loss": 1.9282, | |
| "step": 241500 | |
| }, | |
| { | |
| "epoch": 2.64, | |
| "learning_rate": 2.414511301706424e-05, | |
| "loss": 1.9239, | |
| "step": 242000 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 2.408949790047351e-05, | |
| "loss": 1.9211, | |
| "step": 242500 | |
| }, | |
| { | |
| "epoch": 2.65, | |
| "learning_rate": 2.4033659430000895e-05, | |
| "loss": 1.9273, | |
| "step": 243000 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "learning_rate": 2.397804431341017e-05, | |
| "loss": 1.9244, | |
| "step": 243500 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 2.392220584293755e-05, | |
| "loss": 1.9234, | |
| "step": 244000 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "learning_rate": 2.3866590726346824e-05, | |
| "loss": 1.9232, | |
| "step": 244500 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 2.3810752255874208e-05, | |
| "loss": 1.9286, | |
| "step": 245000 | |
| }, | |
| { | |
| "epoch": 2.68, | |
| "learning_rate": 2.3755137139283482e-05, | |
| "loss": 1.9197, | |
| "step": 245500 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 2.3699298668810866e-05, | |
| "loss": 1.9198, | |
| "step": 246000 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 2.364368355222014e-05, | |
| "loss": 1.9207, | |
| "step": 246500 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 2.3587845081747524e-05, | |
| "loss": 1.9256, | |
| "step": 247000 | |
| }, | |
| { | |
| "epoch": 2.7, | |
| "learning_rate": 2.3532229965156795e-05, | |
| "loss": 1.9135, | |
| "step": 247500 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 2.347639149468418e-05, | |
| "loss": 1.9166, | |
| "step": 248000 | |
| }, | |
| { | |
| "epoch": 2.71, | |
| "learning_rate": 2.3420776378093453e-05, | |
| "loss": 1.9219, | |
| "step": 248500 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "learning_rate": 2.3364937907620836e-05, | |
| "loss": 1.9193, | |
| "step": 249000 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 2.3309322791030108e-05, | |
| "loss": 1.9187, | |
| "step": 249500 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "learning_rate": 2.325348432055749e-05, | |
| "loss": 1.9186, | |
| "step": 250000 | |
| }, | |
| { | |
| "epoch": 2.73, | |
| "eval_accuracy": 0.6388078472748526, | |
| "eval_loss": 1.8408203125, | |
| "eval_runtime": 1109.3427, | |
| "eval_samples_per_second": 277.963, | |
| "eval_steps_per_second": 4.344, | |
| "step": 250000 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "learning_rate": 2.3197645850084877e-05, | |
| "loss": 1.9138, | |
| "step": 250500 | |
| }, | |
| { | |
| "epoch": 2.74, | |
| "learning_rate": 2.314203073349415e-05, | |
| "loss": 1.9199, | |
| "step": 251000 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 2.3086192263021532e-05, | |
| "loss": 1.9143, | |
| "step": 251500 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 2.3030577146430807e-05, | |
| "loss": 1.911, | |
| "step": 252000 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 2.297473867595819e-05, | |
| "loss": 1.9132, | |
| "step": 252500 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 2.2919123559367465e-05, | |
| "loss": 1.9185, | |
| "step": 253000 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 2.2863285088894845e-05, | |
| "loss": 1.9123, | |
| "step": 253500 | |
| }, | |
| { | |
| "epoch": 2.77, | |
| "learning_rate": 2.280766997230412e-05, | |
| "loss": 1.9163, | |
| "step": 254000 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "learning_rate": 2.2751831501831503e-05, | |
| "loss": 1.9149, | |
| "step": 254500 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 2.2696216385240778e-05, | |
| "loss": 1.9139, | |
| "step": 255000 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 2.264037791476816e-05, | |
| "loss": 1.9168, | |
| "step": 255500 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 2.2584762798177432e-05, | |
| "loss": 1.9118, | |
| "step": 256000 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "learning_rate": 2.252892432770482e-05, | |
| "loss": 1.9113, | |
| "step": 256500 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "learning_rate": 2.247330921111409e-05, | |
| "loss": 1.9096, | |
| "step": 257000 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "learning_rate": 2.2417470740641474e-05, | |
| "loss": 1.9077, | |
| "step": 257500 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 2.236185562405075e-05, | |
| "loss": 1.9144, | |
| "step": 258000 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 2.2306017153578128e-05, | |
| "loss": 1.9067, | |
| "step": 258500 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 2.2250402036987403e-05, | |
| "loss": 1.9135, | |
| "step": 259000 | |
| }, | |
| { | |
| "epoch": 2.83, | |
| "learning_rate": 2.2194563566514786e-05, | |
| "loss": 1.9104, | |
| "step": 259500 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "learning_rate": 2.213894844992406e-05, | |
| "loss": 1.9114, | |
| "step": 260000 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "eval_accuracy": 0.6401361780429158, | |
| "eval_loss": 1.83203125, | |
| "eval_runtime": 1109.6609, | |
| "eval_samples_per_second": 277.883, | |
| "eval_steps_per_second": 4.343, | |
| "step": 260000 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 2.2083109979451444e-05, | |
| "loss": 1.905, | |
| "step": 260500 | |
| }, | |
| { | |
| "epoch": 2.85, | |
| "learning_rate": 2.202749486286072e-05, | |
| "loss": 1.9056, | |
| "step": 261000 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 2.1971656392388102e-05, | |
| "loss": 1.9107, | |
| "step": 261500 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "learning_rate": 2.1916041275797374e-05, | |
| "loss": 1.9076, | |
| "step": 262000 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "learning_rate": 2.186020280532476e-05, | |
| "loss": 1.9032, | |
| "step": 262500 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "learning_rate": 2.1804587688734032e-05, | |
| "loss": 1.8933, | |
| "step": 263000 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "learning_rate": 2.1748749218261415e-05, | |
| "loss": 1.9051, | |
| "step": 263500 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "learning_rate": 2.1693134101670686e-05, | |
| "loss": 1.902, | |
| "step": 264000 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 2.163729563119807e-05, | |
| "loss": 1.898, | |
| "step": 264500 | |
| }, | |
| { | |
| "epoch": 2.89, | |
| "learning_rate": 2.1581680514607344e-05, | |
| "loss": 1.9062, | |
| "step": 265000 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 2.1525842044134728e-05, | |
| "loss": 1.9064, | |
| "step": 265500 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 2.1470226927544002e-05, | |
| "loss": 1.9038, | |
| "step": 266000 | |
| }, | |
| { | |
| "epoch": 2.91, | |
| "learning_rate": 2.1414388457071386e-05, | |
| "loss": 1.8987, | |
| "step": 266500 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "learning_rate": 2.135877334048066e-05, | |
| "loss": 1.8978, | |
| "step": 267000 | |
| }, | |
| { | |
| "epoch": 2.92, | |
| "learning_rate": 2.1302934870008044e-05, | |
| "loss": 1.8959, | |
| "step": 267500 | |
| }, | |
| { | |
| "epoch": 2.93, | |
| "learning_rate": 2.1247319753417315e-05, | |
| "loss": 1.8968, | |
| "step": 268000 | |
| }, | |
| { | |
| "epoch": 2.93, | |
| "learning_rate": 2.11914812829447e-05, | |
| "loss": 1.8985, | |
| "step": 268500 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 2.113586616635397e-05, | |
| "loss": 1.899, | |
| "step": 269000 | |
| }, | |
| { | |
| "epoch": 2.94, | |
| "learning_rate": 2.1080027695881356e-05, | |
| "loss": 1.8932, | |
| "step": 269500 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "learning_rate": 2.1024412579290628e-05, | |
| "loss": 1.896, | |
| "step": 270000 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "eval_accuracy": 0.6418558502033883, | |
| "eval_loss": 1.8212890625, | |
| "eval_runtime": 1100.3529, | |
| "eval_samples_per_second": 280.234, | |
| "eval_steps_per_second": 4.38, | |
| "step": 270000 | |
| }, | |
| { | |
| "epoch": 2.95, | |
| "learning_rate": 2.096857410881801e-05, | |
| "loss": 1.901, | |
| "step": 270500 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "learning_rate": 2.0912958992227286e-05, | |
| "loss": 1.8951, | |
| "step": 271000 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 2.085712052175467e-05, | |
| "loss": 1.8921, | |
| "step": 271500 | |
| }, | |
| { | |
| "epoch": 2.97, | |
| "learning_rate": 2.0801505405163944e-05, | |
| "loss": 1.8882, | |
| "step": 272000 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 2.0745666934691327e-05, | |
| "loss": 1.89, | |
| "step": 272500 | |
| }, | |
| { | |
| "epoch": 2.98, | |
| "learning_rate": 2.0690051818100602e-05, | |
| "loss": 1.8966, | |
| "step": 273000 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 2.0634213347627982e-05, | |
| "loss": 1.8921, | |
| "step": 273500 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 2.0578598231037257e-05, | |
| "loss": 1.893, | |
| "step": 274000 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 2.052275976056464e-05, | |
| "loss": 1.892, | |
| "step": 274500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "learning_rate": 2.046714464397391e-05, | |
| "loss": 1.8935, | |
| "step": 275000 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "learning_rate": 2.0411306173501298e-05, | |
| "loss": 1.886, | |
| "step": 275500 | |
| }, | |
| { | |
| "epoch": 3.01, | |
| "learning_rate": 2.035569105691057e-05, | |
| "loss": 1.8924, | |
| "step": 276000 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "learning_rate": 2.0299852586437952e-05, | |
| "loss": 1.8896, | |
| "step": 276500 | |
| }, | |
| { | |
| "epoch": 3.03, | |
| "learning_rate": 2.0244237469847227e-05, | |
| "loss": 1.887, | |
| "step": 277000 | |
| }, | |
| { | |
| "epoch": 3.03, | |
| "learning_rate": 2.018839899937461e-05, | |
| "loss": 1.8863, | |
| "step": 277500 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "learning_rate": 2.0132783882783885e-05, | |
| "loss": 1.8886, | |
| "step": 278000 | |
| }, | |
| { | |
| "epoch": 3.04, | |
| "learning_rate": 2.007694541231127e-05, | |
| "loss": 1.8867, | |
| "step": 278500 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 2.0021330295720543e-05, | |
| "loss": 1.8913, | |
| "step": 279000 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 1.9965491825247923e-05, | |
| "loss": 1.8878, | |
| "step": 279500 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "learning_rate": 1.9909876708657198e-05, | |
| "loss": 1.8857, | |
| "step": 280000 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "eval_accuracy": 0.6433153812116338, | |
| "eval_loss": 1.8115234375, | |
| "eval_runtime": 1118.8118, | |
| "eval_samples_per_second": 275.61, | |
| "eval_steps_per_second": 4.307, | |
| "step": 280000 | |
| }, | |
| { | |
| "epoch": 3.06, | |
| "learning_rate": 1.985403823818458e-05, | |
| "loss": 1.8914, | |
| "step": 280500 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "learning_rate": 1.9798423121593853e-05, | |
| "loss": 1.8881, | |
| "step": 281000 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 1.974258465112124e-05, | |
| "loss": 1.8848, | |
| "step": 281500 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 1.968696953453051e-05, | |
| "loss": 1.8866, | |
| "step": 282000 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "learning_rate": 1.9631131064057894e-05, | |
| "loss": 1.8775, | |
| "step": 282500 | |
| }, | |
| { | |
| "epoch": 3.09, | |
| "learning_rate": 1.957551594746717e-05, | |
| "loss": 1.879, | |
| "step": 283000 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "learning_rate": 1.9519677476994552e-05, | |
| "loss": 1.8776, | |
| "step": 283500 | |
| }, | |
| { | |
| "epoch": 3.1, | |
| "learning_rate": 1.9464062360403827e-05, | |
| "loss": 1.8828, | |
| "step": 284000 | |
| }, | |
| { | |
| "epoch": 3.11, | |
| "learning_rate": 1.9408223889931207e-05, | |
| "loss": 1.8768, | |
| "step": 284500 | |
| }, | |
| { | |
| "epoch": 3.11, | |
| "learning_rate": 1.935260877334048e-05, | |
| "loss": 1.8815, | |
| "step": 285000 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 1.9296770302867865e-05, | |
| "loss": 1.884, | |
| "step": 285500 | |
| }, | |
| { | |
| "epoch": 3.12, | |
| "learning_rate": 1.924115518627714e-05, | |
| "loss": 1.8801, | |
| "step": 286000 | |
| }, | |
| { | |
| "epoch": 3.13, | |
| "learning_rate": 1.9185316715804523e-05, | |
| "loss": 1.8835, | |
| "step": 286500 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "learning_rate": 1.9129701599213794e-05, | |
| "loss": 1.8803, | |
| "step": 287000 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "learning_rate": 1.907386312874118e-05, | |
| "loss": 1.8807, | |
| "step": 287500 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "learning_rate": 1.9018248012150452e-05, | |
| "loss": 1.8786, | |
| "step": 288000 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "learning_rate": 1.8962409541677835e-05, | |
| "loss": 1.8704, | |
| "step": 288500 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 1.890679442508711e-05, | |
| "loss": 1.876, | |
| "step": 289000 | |
| }, | |
| { | |
| "epoch": 3.16, | |
| "learning_rate": 1.885095595461449e-05, | |
| "loss": 1.8795, | |
| "step": 289500 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "learning_rate": 1.8795340838023765e-05, | |
| "loss": 1.8752, | |
| "step": 290000 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "eval_accuracy": 0.6442635719569795, | |
| "eval_loss": 1.8037109375, | |
| "eval_runtime": 1091.3905, | |
| "eval_samples_per_second": 282.535, | |
| "eval_steps_per_second": 4.415, | |
| "step": 290000 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "learning_rate": 1.8739502367551148e-05, | |
| "loss": 1.8821, | |
| "step": 290500 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 1.8683887250960423e-05, | |
| "loss": 1.8784, | |
| "step": 291000 | |
| }, | |
| { | |
| "epoch": 3.18, | |
| "learning_rate": 1.8628048780487806e-05, | |
| "loss": 1.8773, | |
| "step": 291500 | |
| }, | |
| { | |
| "epoch": 3.19, | |
| "learning_rate": 1.857243366389708e-05, | |
| "loss": 1.8748, | |
| "step": 292000 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "learning_rate": 1.8516595193424464e-05, | |
| "loss": 1.8694, | |
| "step": 292500 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "learning_rate": 1.8460980076833735e-05, | |
| "loss": 1.8677, | |
| "step": 293000 | |
| }, | |
| { | |
| "epoch": 3.21, | |
| "learning_rate": 1.8405141606361122e-05, | |
| "loss": 1.8778, | |
| "step": 293500 | |
| }, | |
| { | |
| "epoch": 3.21, | |
| "learning_rate": 1.8349526489770393e-05, | |
| "loss": 1.8749, | |
| "step": 294000 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 1.8293688019297773e-05, | |
| "loss": 1.8698, | |
| "step": 294500 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 1.823807290270705e-05, | |
| "loss": 1.8729, | |
| "step": 295000 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 1.818223443223443e-05, | |
| "loss": 1.8699, | |
| "step": 295500 | |
| }, | |
| { | |
| "epoch": 3.23, | |
| "learning_rate": 1.8126619315643706e-05, | |
| "loss": 1.872, | |
| "step": 296000 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "learning_rate": 1.807078084517109e-05, | |
| "loss": 1.8648, | |
| "step": 296500 | |
| }, | |
| { | |
| "epoch": 3.24, | |
| "learning_rate": 1.8015165728580364e-05, | |
| "loss": 1.8712, | |
| "step": 297000 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "learning_rate": 1.7959327258107747e-05, | |
| "loss": 1.8692, | |
| "step": 297500 | |
| }, | |
| { | |
| "epoch": 3.26, | |
| "learning_rate": 1.7903712141517022e-05, | |
| "loss": 1.8731, | |
| "step": 298000 | |
| }, | |
| { | |
| "epoch": 3.26, | |
| "learning_rate": 1.7847873671044405e-05, | |
| "loss": 1.8671, | |
| "step": 298500 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 1.7792258554453677e-05, | |
| "loss": 1.8672, | |
| "step": 299000 | |
| }, | |
| { | |
| "epoch": 3.27, | |
| "learning_rate": 1.7736420083981063e-05, | |
| "loss": 1.8684, | |
| "step": 299500 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "learning_rate": 1.7680804967390335e-05, | |
| "loss": 1.8662, | |
| "step": 300000 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "eval_accuracy": 0.6457191174875389, | |
| "eval_loss": 1.794921875, | |
| "eval_runtime": 1092.3358, | |
| "eval_samples_per_second": 282.29, | |
| "eval_steps_per_second": 4.412, | |
| "step": 300000 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "learning_rate": 1.7624966496917715e-05, | |
| "loss": 1.8683, | |
| "step": 300500 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 1.756935138032699e-05, | |
| "loss": 1.8664, | |
| "step": 301000 | |
| }, | |
| { | |
| "epoch": 3.29, | |
| "learning_rate": 1.7513512909854373e-05, | |
| "loss": 1.8733, | |
| "step": 301500 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "learning_rate": 1.7457897793263648e-05, | |
| "loss": 1.867, | |
| "step": 302000 | |
| }, | |
| { | |
| "epoch": 3.3, | |
| "learning_rate": 1.740205932279103e-05, | |
| "loss": 1.8667, | |
| "step": 302500 | |
| }, | |
| { | |
| "epoch": 3.31, | |
| "learning_rate": 1.7346444206200306e-05, | |
| "loss": 1.8672, | |
| "step": 303000 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "learning_rate": 1.729060573572769e-05, | |
| "loss": 1.87, | |
| "step": 303500 | |
| }, | |
| { | |
| "epoch": 3.32, | |
| "learning_rate": 1.7234990619136964e-05, | |
| "loss": 1.8707, | |
| "step": 304000 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 1.7179152148664347e-05, | |
| "loss": 1.8679, | |
| "step": 304500 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 1.7123537032073618e-05, | |
| "loss": 1.8665, | |
| "step": 305000 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 1.7067698561601e-05, | |
| "loss": 1.8603, | |
| "step": 305500 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 1.7012083445010273e-05, | |
| "loss": 1.8664, | |
| "step": 306000 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "learning_rate": 1.6956244974537656e-05, | |
| "loss": 1.8612, | |
| "step": 306500 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "learning_rate": 1.690062985794693e-05, | |
| "loss": 1.8667, | |
| "step": 307000 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 1.6844791387474314e-05, | |
| "loss": 1.8608, | |
| "step": 307500 | |
| }, | |
| { | |
| "epoch": 3.36, | |
| "learning_rate": 1.678917627088359e-05, | |
| "loss": 1.8606, | |
| "step": 308000 | |
| }, | |
| { | |
| "epoch": 3.37, | |
| "learning_rate": 1.6733337800410972e-05, | |
| "loss": 1.8592, | |
| "step": 308500 | |
| }, | |
| { | |
| "epoch": 3.38, | |
| "learning_rate": 1.6677722683820247e-05, | |
| "loss": 1.8641, | |
| "step": 309000 | |
| }, | |
| { | |
| "epoch": 3.38, | |
| "learning_rate": 1.662188421334763e-05, | |
| "loss": 1.8584, | |
| "step": 309500 | |
| }, | |
| { | |
| "epoch": 3.39, | |
| "learning_rate": 1.6566269096756905e-05, | |
| "loss": 1.8575, | |
| "step": 310000 | |
| }, | |
| { | |
| "epoch": 3.39, | |
| "eval_accuracy": 0.6469689531293227, | |
| "eval_loss": 1.787109375, | |
| "eval_runtime": 1090.9668, | |
| "eval_samples_per_second": 282.645, | |
| "eval_steps_per_second": 4.417, | |
| "step": 310000 | |
| }, | |
| { | |
| "epoch": 3.39, | |
| "learning_rate": 1.6510430626284285e-05, | |
| "loss": 1.8563, | |
| "step": 310500 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "learning_rate": 1.6454815509693556e-05, | |
| "loss": 1.8623, | |
| "step": 311000 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "learning_rate": 1.6398977039220943e-05, | |
| "loss": 1.8601, | |
| "step": 311500 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 1.6343361922630214e-05, | |
| "loss": 1.8568, | |
| "step": 312000 | |
| }, | |
| { | |
| "epoch": 3.41, | |
| "learning_rate": 1.6287523452157598e-05, | |
| "loss": 1.8626, | |
| "step": 312500 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "learning_rate": 1.6231908335566872e-05, | |
| "loss": 1.8553, | |
| "step": 313000 | |
| }, | |
| { | |
| "epoch": 3.42, | |
| "learning_rate": 1.6176069865094256e-05, | |
| "loss": 1.8546, | |
| "step": 313500 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 1.612045474850353e-05, | |
| "loss": 1.8576, | |
| "step": 314000 | |
| }, | |
| { | |
| "epoch": 3.44, | |
| "learning_rate": 1.6064616278030914e-05, | |
| "loss": 1.8591, | |
| "step": 314500 | |
| }, | |
| { | |
| "epoch": 3.44, | |
| "learning_rate": 1.600900116144019e-05, | |
| "loss": 1.8626, | |
| "step": 315000 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "learning_rate": 1.5953162690967568e-05, | |
| "loss": 1.8535, | |
| "step": 315500 | |
| }, | |
| { | |
| "epoch": 3.45, | |
| "learning_rate": 1.5897547574376843e-05, | |
| "loss": 1.856, | |
| "step": 316000 | |
| }, | |
| { | |
| "epoch": 3.46, | |
| "learning_rate": 1.5841709103904226e-05, | |
| "loss": 1.8562, | |
| "step": 316500 | |
| }, | |
| { | |
| "epoch": 3.46, | |
| "learning_rate": 1.5786093987313498e-05, | |
| "loss": 1.8535, | |
| "step": 317000 | |
| }, | |
| { | |
| "epoch": 3.47, | |
| "learning_rate": 1.5730255516840884e-05, | |
| "loss": 1.858, | |
| "step": 317500 | |
| }, | |
| { | |
| "epoch": 3.47, | |
| "learning_rate": 1.5674640400250156e-05, | |
| "loss": 1.8552, | |
| "step": 318000 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "learning_rate": 1.561880192977754e-05, | |
| "loss": 1.852, | |
| "step": 318500 | |
| }, | |
| { | |
| "epoch": 3.48, | |
| "learning_rate": 1.5563186813186814e-05, | |
| "loss": 1.851, | |
| "step": 319000 | |
| }, | |
| { | |
| "epoch": 3.49, | |
| "learning_rate": 1.5507348342714197e-05, | |
| "loss": 1.849, | |
| "step": 319500 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 1.5451733226123472e-05, | |
| "loss": 1.8538, | |
| "step": 320000 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "eval_accuracy": 0.6478183167003876, | |
| "eval_loss": 1.779296875, | |
| "eval_runtime": 1093.454, | |
| "eval_samples_per_second": 282.002, | |
| "eval_steps_per_second": 4.407, | |
| "step": 320000 | |
| }, | |
| { | |
| "epoch": 3.5, | |
| "learning_rate": 1.539589475565085e-05, | |
| "loss": 1.8531, | |
| "step": 320500 | |
| }, | |
| { | |
| "epoch": 3.51, | |
| "learning_rate": 1.534027963906013e-05, | |
| "loss": 1.8541, | |
| "step": 321000 | |
| }, | |
| { | |
| "epoch": 3.51, | |
| "learning_rate": 1.528444116858751e-05, | |
| "loss": 1.8532, | |
| "step": 321500 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 1.5228826051996783e-05, | |
| "loss": 1.8527, | |
| "step": 322000 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 1.5172987581524168e-05, | |
| "loss": 1.855, | |
| "step": 322500 | |
| }, | |
| { | |
| "epoch": 3.53, | |
| "learning_rate": 1.511737246493344e-05, | |
| "loss": 1.8474, | |
| "step": 323000 | |
| }, | |
| { | |
| "epoch": 3.53, | |
| "learning_rate": 1.5061533994460824e-05, | |
| "loss": 1.8451, | |
| "step": 323500 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 1.5005918877870099e-05, | |
| "loss": 1.8498, | |
| "step": 324000 | |
| }, | |
| { | |
| "epoch": 3.54, | |
| "learning_rate": 1.4950080407397482e-05, | |
| "loss": 1.8517, | |
| "step": 324500 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "learning_rate": 1.4894465290806755e-05, | |
| "loss": 1.8478, | |
| "step": 325000 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "learning_rate": 1.483862682033414e-05, | |
| "loss": 1.8477, | |
| "step": 325500 | |
| }, | |
| { | |
| "epoch": 3.56, | |
| "learning_rate": 1.4783011703743413e-05, | |
| "loss": 1.851, | |
| "step": 326000 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "learning_rate": 1.4727173233270795e-05, | |
| "loss": 1.8511, | |
| "step": 326500 | |
| }, | |
| { | |
| "epoch": 3.57, | |
| "learning_rate": 1.4671558116680068e-05, | |
| "loss": 1.8536, | |
| "step": 327000 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "learning_rate": 1.4615719646207451e-05, | |
| "loss": 1.847, | |
| "step": 327500 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "learning_rate": 1.4560104529616724e-05, | |
| "loss": 1.8487, | |
| "step": 328000 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "learning_rate": 1.4504266059144109e-05, | |
| "loss": 1.8507, | |
| "step": 328500 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "learning_rate": 1.4448650942553382e-05, | |
| "loss": 1.8466, | |
| "step": 329000 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "learning_rate": 1.4392812472080765e-05, | |
| "loss": 1.8448, | |
| "step": 329500 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "learning_rate": 1.433719735549004e-05, | |
| "loss": 1.8426, | |
| "step": 330000 | |
| }, | |
| { | |
| "epoch": 3.6, | |
| "eval_accuracy": 0.6488575128878505, | |
| "eval_loss": 1.7734375, | |
| "eval_runtime": 1118.1827, | |
| "eval_samples_per_second": 275.765, | |
| "eval_steps_per_second": 4.31, | |
| "step": 330000 | |
| }, | |
| { | |
| "epoch": 3.61, | |
| "learning_rate": 1.4281358885017423e-05, | |
| "loss": 1.8477, | |
| "step": 330500 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "learning_rate": 1.4225743768426697e-05, | |
| "loss": 1.8491, | |
| "step": 331000 | |
| }, | |
| { | |
| "epoch": 3.62, | |
| "learning_rate": 1.4169905297954078e-05, | |
| "loss": 1.8402, | |
| "step": 331500 | |
| }, | |
| { | |
| "epoch": 3.63, | |
| "learning_rate": 1.4114290181363351e-05, | |
| "loss": 1.843, | |
| "step": 332000 | |
| }, | |
| { | |
| "epoch": 3.63, | |
| "learning_rate": 1.4058451710890736e-05, | |
| "loss": 1.8444, | |
| "step": 332500 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 1.400283659430001e-05, | |
| "loss": 1.8424, | |
| "step": 333000 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 1.3946998123827392e-05, | |
| "loss": 1.8499, | |
| "step": 333500 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "learning_rate": 1.3891383007236666e-05, | |
| "loss": 1.8437, | |
| "step": 334000 | |
| }, | |
| { | |
| "epoch": 3.65, | |
| "learning_rate": 1.383554453676405e-05, | |
| "loss": 1.8441, | |
| "step": 334500 | |
| }, | |
| { | |
| "epoch": 3.66, | |
| "learning_rate": 1.3779929420173324e-05, | |
| "loss": 1.8397, | |
| "step": 335000 | |
| }, | |
| { | |
| "epoch": 3.66, | |
| "learning_rate": 1.3724090949700707e-05, | |
| "loss": 1.8444, | |
| "step": 335500 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 1.3668475833109982e-05, | |
| "loss": 1.8394, | |
| "step": 336000 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "learning_rate": 1.3612637362637362e-05, | |
| "loss": 1.8431, | |
| "step": 336500 | |
| }, | |
| { | |
| "epoch": 3.68, | |
| "learning_rate": 1.3557022246046636e-05, | |
| "loss": 1.8396, | |
| "step": 337000 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 1.350118377557402e-05, | |
| "loss": 1.8363, | |
| "step": 337500 | |
| }, | |
| { | |
| "epoch": 3.69, | |
| "learning_rate": 1.3445568658983293e-05, | |
| "loss": 1.8372, | |
| "step": 338000 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "learning_rate": 1.3389730188510678e-05, | |
| "loss": 1.8385, | |
| "step": 338500 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "learning_rate": 1.333411507191995e-05, | |
| "loss": 1.8399, | |
| "step": 339000 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 1.3278276601447334e-05, | |
| "loss": 1.8401, | |
| "step": 339500 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "learning_rate": 1.3222661484856607e-05, | |
| "loss": 1.8389, | |
| "step": 340000 | |
| }, | |
| { | |
| "epoch": 3.71, | |
| "eval_accuracy": 0.650145600351507, | |
| "eval_loss": 1.7646484375, | |
| "eval_runtime": 1118.7806, | |
| "eval_samples_per_second": 275.618, | |
| "eval_steps_per_second": 4.307, | |
| "step": 340000 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "learning_rate": 1.3166823014383992e-05, | |
| "loss": 1.8357, | |
| "step": 340500 | |
| }, | |
| { | |
| "epoch": 3.72, | |
| "learning_rate": 1.3111207897793265e-05, | |
| "loss": 1.8402, | |
| "step": 341000 | |
| }, | |
| { | |
| "epoch": 3.73, | |
| "learning_rate": 1.3055369427320647e-05, | |
| "loss": 1.8349, | |
| "step": 341500 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 1.2999754310729923e-05, | |
| "loss": 1.8359, | |
| "step": 342000 | |
| }, | |
| { | |
| "epoch": 3.74, | |
| "learning_rate": 1.2943915840257303e-05, | |
| "loss": 1.839, | |
| "step": 342500 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "learning_rate": 1.2888300723666578e-05, | |
| "loss": 1.8331, | |
| "step": 343000 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "learning_rate": 1.2832462253193961e-05, | |
| "loss": 1.8348, | |
| "step": 343500 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 1.2776847136603234e-05, | |
| "loss": 1.8394, | |
| "step": 344000 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 1.2721008666130619e-05, | |
| "loss": 1.8347, | |
| "step": 344500 | |
| }, | |
| { | |
| "epoch": 3.77, | |
| "learning_rate": 1.2665393549539892e-05, | |
| "loss": 1.8368, | |
| "step": 345000 | |
| }, | |
| { | |
| "epoch": 3.77, | |
| "learning_rate": 1.2609555079067275e-05, | |
| "loss": 1.8331, | |
| "step": 345500 | |
| }, | |
| { | |
| "epoch": 3.78, | |
| "learning_rate": 1.2553939962476548e-05, | |
| "loss": 1.8302, | |
| "step": 346000 | |
| }, | |
| { | |
| "epoch": 3.79, | |
| "learning_rate": 1.2498101492003932e-05, | |
| "loss": 1.836, | |
| "step": 346500 | |
| }, | |
| { | |
| "epoch": 3.79, | |
| "learning_rate": 1.2442486375413205e-05, | |
| "loss": 1.835, | |
| "step": 347000 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "learning_rate": 1.238664790494059e-05, | |
| "loss": 1.8351, | |
| "step": 347500 | |
| }, | |
| { | |
| "epoch": 3.8, | |
| "learning_rate": 1.2331032788349863e-05, | |
| "loss": 1.8323, | |
| "step": 348000 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "learning_rate": 1.2275194317877244e-05, | |
| "loss": 1.831, | |
| "step": 348500 | |
| }, | |
| { | |
| "epoch": 3.81, | |
| "learning_rate": 1.221957920128652e-05, | |
| "loss": 1.8349, | |
| "step": 349000 | |
| }, | |
| { | |
| "epoch": 3.82, | |
| "learning_rate": 1.2163740730813902e-05, | |
| "loss": 1.8326, | |
| "step": 349500 | |
| }, | |
| { | |
| "epoch": 3.82, | |
| "learning_rate": 1.2108125614223176e-05, | |
| "loss": 1.8278, | |
| "step": 350000 | |
| }, | |
| { | |
| "epoch": 3.82, | |
| "eval_accuracy": 0.651100408667841, | |
| "eval_loss": 1.759765625, | |
| "eval_runtime": 1097.038, | |
| "eval_samples_per_second": 281.081, | |
| "eval_steps_per_second": 4.393, | |
| "step": 350000 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 1.2052287143750559e-05, | |
| "loss": 1.8426, | |
| "step": 350500 | |
| }, | |
| { | |
| "epoch": 3.83, | |
| "learning_rate": 1.1996672027159834e-05, | |
| "loss": 1.8314, | |
| "step": 351000 | |
| }, | |
| { | |
| "epoch": 3.84, | |
| "learning_rate": 1.1940833556687215e-05, | |
| "loss": 1.8348, | |
| "step": 351500 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 1.188521844009649e-05, | |
| "loss": 1.827, | |
| "step": 352000 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 1.1829379969623873e-05, | |
| "loss": 1.828, | |
| "step": 352500 | |
| }, | |
| { | |
| "epoch": 3.86, | |
| "learning_rate": 1.1773764853033146e-05, | |
| "loss": 1.8306, | |
| "step": 353000 | |
| }, | |
| { | |
| "epoch": 3.86, | |
| "learning_rate": 1.171792638256053e-05, | |
| "loss": 1.8321, | |
| "step": 353500 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 1.1662311265969803e-05, | |
| "loss": 1.8293, | |
| "step": 354000 | |
| }, | |
| { | |
| "epoch": 3.87, | |
| "learning_rate": 1.1606472795497186e-05, | |
| "loss": 1.8292, | |
| "step": 354500 | |
| }, | |
| { | |
| "epoch": 3.88, | |
| "learning_rate": 1.155085767890646e-05, | |
| "loss": 1.8267, | |
| "step": 355000 | |
| }, | |
| { | |
| "epoch": 3.88, | |
| "learning_rate": 1.1495019208433844e-05, | |
| "loss": 1.8349, | |
| "step": 355500 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "learning_rate": 1.1439404091843117e-05, | |
| "loss": 1.8303, | |
| "step": 356000 | |
| }, | |
| { | |
| "epoch": 3.89, | |
| "learning_rate": 1.13835656213705e-05, | |
| "loss": 1.8295, | |
| "step": 356500 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 1.1327950504779773e-05, | |
| "loss": 1.8285, | |
| "step": 357000 | |
| }, | |
| { | |
| "epoch": 3.91, | |
| "learning_rate": 1.1272112034307156e-05, | |
| "loss": 1.8326, | |
| "step": 357500 | |
| }, | |
| { | |
| "epoch": 3.91, | |
| "learning_rate": 1.1216496917716431e-05, | |
| "loss": 1.8239, | |
| "step": 358000 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 1.1160658447243813e-05, | |
| "loss": 1.8253, | |
| "step": 358500 | |
| }, | |
| { | |
| "epoch": 3.92, | |
| "learning_rate": 1.1105043330653088e-05, | |
| "loss": 1.824, | |
| "step": 359000 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "learning_rate": 1.104920486018047e-05, | |
| "loss": 1.831, | |
| "step": 359500 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "learning_rate": 1.0993589743589744e-05, | |
| "loss": 1.8319, | |
| "step": 360000 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "eval_accuracy": 0.6520117131015779, | |
| "eval_loss": 1.7529296875, | |
| "eval_runtime": 1095.3232, | |
| "eval_samples_per_second": 281.521, | |
| "eval_steps_per_second": 4.4, | |
| "step": 360000 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "learning_rate": 1.0937751273117127e-05, | |
| "loss": 1.8286, | |
| "step": 360500 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "learning_rate": 1.0882136156526402e-05, | |
| "loss": 1.8253, | |
| "step": 361000 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "learning_rate": 1.0826297686053783e-05, | |
| "loss": 1.8312, | |
| "step": 361500 | |
| }, | |
| { | |
| "epoch": 3.95, | |
| "learning_rate": 1.0770682569463057e-05, | |
| "loss": 1.8271, | |
| "step": 362000 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "learning_rate": 1.0714844098990442e-05, | |
| "loss": 1.8258, | |
| "step": 362500 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "learning_rate": 1.0659228982399715e-05, | |
| "loss": 1.8274, | |
| "step": 363000 | |
| }, | |
| { | |
| "epoch": 3.97, | |
| "learning_rate": 1.0603390511927098e-05, | |
| "loss": 1.8201, | |
| "step": 363500 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "learning_rate": 1.0547775395336373e-05, | |
| "loss": 1.8269, | |
| "step": 364000 | |
| }, | |
| { | |
| "epoch": 3.98, | |
| "learning_rate": 1.0491936924863754e-05, | |
| "loss": 1.826, | |
| "step": 364500 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 1.0436321808273027e-05, | |
| "loss": 1.8206, | |
| "step": 365000 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 1.0380483337800412e-05, | |
| "loss": 1.8279, | |
| "step": 365500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 1.0324868221209685e-05, | |
| "loss": 1.8242, | |
| "step": 366000 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "learning_rate": 1.0269029750737069e-05, | |
| "loss": 1.8204, | |
| "step": 366500 | |
| }, | |
| { | |
| "epoch": 4.01, | |
| "learning_rate": 1.0213414634146342e-05, | |
| "loss": 1.8198, | |
| "step": 367000 | |
| }, | |
| { | |
| "epoch": 4.01, | |
| "learning_rate": 1.0157576163673725e-05, | |
| "loss": 1.8219, | |
| "step": 367500 | |
| }, | |
| { | |
| "epoch": 4.02, | |
| "learning_rate": 1.0101961047082998e-05, | |
| "loss": 1.8195, | |
| "step": 368000 | |
| }, | |
| { | |
| "epoch": 4.03, | |
| "learning_rate": 1.0046122576610383e-05, | |
| "loss": 1.8181, | |
| "step": 368500 | |
| }, | |
| { | |
| "epoch": 4.03, | |
| "learning_rate": 9.990507460019656e-06, | |
| "loss": 1.8245, | |
| "step": 369000 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "learning_rate": 9.93466898954704e-06, | |
| "loss": 1.8179, | |
| "step": 369500 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "learning_rate": 9.879053872956312e-06, | |
| "loss": 1.8203, | |
| "step": 370000 | |
| }, | |
| { | |
| "epoch": 4.04, | |
| "eval_accuracy": 0.6527260129714422, | |
| "eval_loss": 1.7470703125, | |
| "eval_runtime": 1100.1141, | |
| "eval_samples_per_second": 280.295, | |
| "eval_steps_per_second": 4.38, | |
| "step": 370000 | |
| }, | |
| { | |
| "epoch": 4.05, | |
| "learning_rate": 9.823215402483696e-06, | |
| "loss": 1.8153, | |
| "step": 370500 | |
| }, | |
| { | |
| "epoch": 4.05, | |
| "learning_rate": 9.767600285892969e-06, | |
| "loss": 1.8176, | |
| "step": 371000 | |
| }, | |
| { | |
| "epoch": 4.06, | |
| "learning_rate": 9.711761815420352e-06, | |
| "loss": 1.8204, | |
| "step": 371500 | |
| }, | |
| { | |
| "epoch": 4.06, | |
| "learning_rate": 9.656146698829627e-06, | |
| "loss": 1.8176, | |
| "step": 372000 | |
| }, | |
| { | |
| "epoch": 4.07, | |
| "learning_rate": 9.60030822835701e-06, | |
| "loss": 1.8161, | |
| "step": 372500 | |
| }, | |
| { | |
| "epoch": 4.07, | |
| "learning_rate": 9.544693111766283e-06, | |
| "loss": 1.8186, | |
| "step": 373000 | |
| }, | |
| { | |
| "epoch": 4.08, | |
| "learning_rate": 9.488854641293666e-06, | |
| "loss": 1.8183, | |
| "step": 373500 | |
| }, | |
| { | |
| "epoch": 4.09, | |
| "learning_rate": 9.43323952470294e-06, | |
| "loss": 1.8181, | |
| "step": 374000 | |
| }, | |
| { | |
| "epoch": 4.09, | |
| "learning_rate": 9.377401054230323e-06, | |
| "loss": 1.8131, | |
| "step": 374500 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 9.321785937639596e-06, | |
| "loss": 1.8174, | |
| "step": 375000 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 9.26594746716698e-06, | |
| "loss": 1.8157, | |
| "step": 375500 | |
| }, | |
| { | |
| "epoch": 4.11, | |
| "learning_rate": 9.210332350576254e-06, | |
| "loss": 1.8124, | |
| "step": 376000 | |
| }, | |
| { | |
| "epoch": 4.11, | |
| "learning_rate": 9.154493880103637e-06, | |
| "loss": 1.8173, | |
| "step": 376500 | |
| }, | |
| { | |
| "epoch": 4.12, | |
| "learning_rate": 9.09887876351291e-06, | |
| "loss": 1.8158, | |
| "step": 377000 | |
| }, | |
| { | |
| "epoch": 4.12, | |
| "learning_rate": 9.043040293040293e-06, | |
| "loss": 1.8163, | |
| "step": 377500 | |
| }, | |
| { | |
| "epoch": 4.13, | |
| "learning_rate": 8.987425176449567e-06, | |
| "loss": 1.8144, | |
| "step": 378000 | |
| }, | |
| { | |
| "epoch": 4.13, | |
| "learning_rate": 8.931586705976951e-06, | |
| "loss": 1.8201, | |
| "step": 378500 | |
| }, | |
| { | |
| "epoch": 4.14, | |
| "learning_rate": 8.875971589386225e-06, | |
| "loss": 1.8151, | |
| "step": 379000 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "learning_rate": 8.820133118913606e-06, | |
| "loss": 1.8133, | |
| "step": 379500 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "learning_rate": 8.764518002322881e-06, | |
| "loss": 1.8162, | |
| "step": 380000 | |
| }, | |
| { | |
| "epoch": 4.15, | |
| "eval_accuracy": 0.6536149151745975, | |
| "eval_loss": 1.7412109375, | |
| "eval_runtime": 1101.5449, | |
| "eval_samples_per_second": 279.93, | |
| "eval_steps_per_second": 4.375, | |
| "step": 380000 | |
| }, | |
| { | |
| "epoch": 4.16, | |
| "learning_rate": 8.708679531850264e-06, | |
| "loss": 1.8092, | |
| "step": 380500 | |
| }, | |
| { | |
| "epoch": 4.16, | |
| "learning_rate": 8.653064415259537e-06, | |
| "loss": 1.8136, | |
| "step": 381000 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "learning_rate": 8.597225944786922e-06, | |
| "loss": 1.8141, | |
| "step": 381500 | |
| }, | |
| { | |
| "epoch": 4.17, | |
| "learning_rate": 8.541610828196195e-06, | |
| "loss": 1.8116, | |
| "step": 382000 | |
| }, | |
| { | |
| "epoch": 4.18, | |
| "learning_rate": 8.485772357723577e-06, | |
| "loss": 1.8128, | |
| "step": 382500 | |
| }, | |
| { | |
| "epoch": 4.18, | |
| "learning_rate": 8.430157241132852e-06, | |
| "loss": 1.8134, | |
| "step": 383000 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "learning_rate": 8.374318770660235e-06, | |
| "loss": 1.8128, | |
| "step": 383500 | |
| }, | |
| { | |
| "epoch": 4.19, | |
| "learning_rate": 8.318703654069508e-06, | |
| "loss": 1.8073, | |
| "step": 384000 | |
| }, | |
| { | |
| "epoch": 4.2, | |
| "learning_rate": 8.262865183596891e-06, | |
| "loss": 1.8102, | |
| "step": 384500 | |
| }, | |
| { | |
| "epoch": 4.21, | |
| "learning_rate": 8.207250067006166e-06, | |
| "loss": 1.8114, | |
| "step": 385000 | |
| }, | |
| { | |
| "epoch": 4.21, | |
| "learning_rate": 8.151411596533547e-06, | |
| "loss": 1.8091, | |
| "step": 385500 | |
| }, | |
| { | |
| "epoch": 4.22, | |
| "learning_rate": 8.095796479942822e-06, | |
| "loss": 1.8082, | |
| "step": 386000 | |
| }, | |
| { | |
| "epoch": 4.22, | |
| "learning_rate": 8.039958009470205e-06, | |
| "loss": 1.8159, | |
| "step": 386500 | |
| }, | |
| { | |
| "epoch": 4.23, | |
| "learning_rate": 7.984342892879479e-06, | |
| "loss": 1.8124, | |
| "step": 387000 | |
| }, | |
| { | |
| "epoch": 4.23, | |
| "learning_rate": 7.928504422406862e-06, | |
| "loss": 1.8109, | |
| "step": 387500 | |
| }, | |
| { | |
| "epoch": 4.24, | |
| "learning_rate": 7.872889305816135e-06, | |
| "loss": 1.8148, | |
| "step": 388000 | |
| }, | |
| { | |
| "epoch": 4.24, | |
| "learning_rate": 7.817050835343518e-06, | |
| "loss": 1.813, | |
| "step": 388500 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "learning_rate": 7.761435718752793e-06, | |
| "loss": 1.8152, | |
| "step": 389000 | |
| }, | |
| { | |
| "epoch": 4.25, | |
| "learning_rate": 7.705597248280176e-06, | |
| "loss": 1.8099, | |
| "step": 389500 | |
| }, | |
| { | |
| "epoch": 4.26, | |
| "learning_rate": 7.64998213168945e-06, | |
| "loss": 1.8113, | |
| "step": 390000 | |
| }, | |
| { | |
| "epoch": 4.26, | |
| "eval_accuracy": 0.6542793430067383, | |
| "eval_loss": 1.7373046875, | |
| "eval_runtime": 1096.3677, | |
| "eval_samples_per_second": 281.252, | |
| "eval_steps_per_second": 4.395, | |
| "step": 390000 | |
| }, | |
| { | |
| "epoch": 4.27, | |
| "learning_rate": 7.594143661216832e-06, | |
| "loss": 1.8111, | |
| "step": 390500 | |
| }, | |
| { | |
| "epoch": 4.27, | |
| "learning_rate": 7.538528544626106e-06, | |
| "loss": 1.8098, | |
| "step": 391000 | |
| }, | |
| { | |
| "epoch": 4.28, | |
| "learning_rate": 7.48269007415349e-06, | |
| "loss": 1.807, | |
| "step": 391500 | |
| }, | |
| { | |
| "epoch": 4.28, | |
| "learning_rate": 7.427074957562763e-06, | |
| "loss": 1.8063, | |
| "step": 392000 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "learning_rate": 7.371236487090145e-06, | |
| "loss": 1.8079, | |
| "step": 392500 | |
| }, | |
| { | |
| "epoch": 4.29, | |
| "learning_rate": 7.31562137049942e-06, | |
| "loss": 1.8064, | |
| "step": 393000 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "learning_rate": 7.259782900026802e-06, | |
| "loss": 1.8082, | |
| "step": 393500 | |
| }, | |
| { | |
| "epoch": 4.3, | |
| "learning_rate": 7.204167783436076e-06, | |
| "loss": 1.8075, | |
| "step": 394000 | |
| }, | |
| { | |
| "epoch": 4.31, | |
| "learning_rate": 7.1483293129634604e-06, | |
| "loss": 1.8078, | |
| "step": 394500 | |
| }, | |
| { | |
| "epoch": 4.31, | |
| "learning_rate": 7.092714196372734e-06, | |
| "loss": 1.8005, | |
| "step": 395000 | |
| }, | |
| { | |
| "epoch": 4.32, | |
| "learning_rate": 7.036875725900116e-06, | |
| "loss": 1.8048, | |
| "step": 395500 | |
| }, | |
| { | |
| "epoch": 4.33, | |
| "learning_rate": 6.98126060930939e-06, | |
| "loss": 1.8076, | |
| "step": 396000 | |
| }, | |
| { | |
| "epoch": 4.33, | |
| "learning_rate": 6.925422138836773e-06, | |
| "loss": 1.8055, | |
| "step": 396500 | |
| }, | |
| { | |
| "epoch": 4.34, | |
| "learning_rate": 6.869807022246047e-06, | |
| "loss": 1.805, | |
| "step": 397000 | |
| }, | |
| { | |
| "epoch": 4.34, | |
| "learning_rate": 6.8139685517734294e-06, | |
| "loss": 1.8104, | |
| "step": 397500 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "learning_rate": 6.758353435182704e-06, | |
| "loss": 1.812, | |
| "step": 398000 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "learning_rate": 6.702514964710087e-06, | |
| "loss": 1.8072, | |
| "step": 398500 | |
| }, | |
| { | |
| "epoch": 4.36, | |
| "learning_rate": 6.646899848119361e-06, | |
| "loss": 1.8032, | |
| "step": 399000 | |
| }, | |
| { | |
| "epoch": 4.36, | |
| "learning_rate": 6.591061377646744e-06, | |
| "loss": 1.8022, | |
| "step": 399500 | |
| }, | |
| { | |
| "epoch": 4.37, | |
| "learning_rate": 6.535446261056018e-06, | |
| "loss": 1.8055, | |
| "step": 400000 | |
| }, | |
| { | |
| "epoch": 4.37, | |
| "eval_accuracy": 0.655123594491739, | |
| "eval_loss": 1.732421875, | |
| "eval_runtime": 1175.7931, | |
| "eval_samples_per_second": 262.254, | |
| "eval_steps_per_second": 4.099, | |
| "step": 400000 | |
| }, | |
| { | |
| "epoch": 4.37, | |
| "learning_rate": 6.4796077905834e-06, | |
| "loss": 1.806, | |
| "step": 400500 | |
| }, | |
| { | |
| "epoch": 4.38, | |
| "learning_rate": 6.423992673992673e-06, | |
| "loss": 1.8084, | |
| "step": 401000 | |
| }, | |
| { | |
| "epoch": 4.39, | |
| "learning_rate": 6.368154203520057e-06, | |
| "loss": 1.8053, | |
| "step": 401500 | |
| }, | |
| { | |
| "epoch": 4.39, | |
| "learning_rate": 6.312539086929331e-06, | |
| "loss": 1.8048, | |
| "step": 402000 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "learning_rate": 6.2567006164567145e-06, | |
| "loss": 1.8099, | |
| "step": 402500 | |
| }, | |
| { | |
| "epoch": 4.4, | |
| "learning_rate": 6.201085499865988e-06, | |
| "loss": 1.8034, | |
| "step": 403000 | |
| }, | |
| { | |
| "epoch": 4.41, | |
| "learning_rate": 6.145247029393371e-06, | |
| "loss": 1.8049, | |
| "step": 403500 | |
| }, | |
| { | |
| "epoch": 4.41, | |
| "learning_rate": 6.089631912802645e-06, | |
| "loss": 1.8046, | |
| "step": 404000 | |
| }, | |
| { | |
| "epoch": 4.42, | |
| "learning_rate": 6.033793442330028e-06, | |
| "loss": 1.8008, | |
| "step": 404500 | |
| }, | |
| { | |
| "epoch": 4.42, | |
| "learning_rate": 5.978178325739302e-06, | |
| "loss": 1.8016, | |
| "step": 405000 | |
| }, | |
| { | |
| "epoch": 4.43, | |
| "learning_rate": 5.922339855266685e-06, | |
| "loss": 1.8048, | |
| "step": 405500 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "learning_rate": 5.866724738675958e-06, | |
| "loss": 1.8014, | |
| "step": 406000 | |
| }, | |
| { | |
| "epoch": 4.44, | |
| "learning_rate": 5.8108862682033416e-06, | |
| "loss": 1.8031, | |
| "step": 406500 | |
| }, | |
| { | |
| "epoch": 4.45, | |
| "learning_rate": 5.755271151612615e-06, | |
| "loss": 1.8013, | |
| "step": 407000 | |
| }, | |
| { | |
| "epoch": 4.45, | |
| "learning_rate": 5.699432681139998e-06, | |
| "loss": 1.7957, | |
| "step": 407500 | |
| }, | |
| { | |
| "epoch": 4.46, | |
| "learning_rate": 5.643817564549273e-06, | |
| "loss": 1.8082, | |
| "step": 408000 | |
| }, | |
| { | |
| "epoch": 4.46, | |
| "learning_rate": 5.587979094076656e-06, | |
| "loss": 1.8064, | |
| "step": 408500 | |
| }, | |
| { | |
| "epoch": 4.47, | |
| "learning_rate": 5.532363977485929e-06, | |
| "loss": 1.7994, | |
| "step": 409000 | |
| }, | |
| { | |
| "epoch": 4.47, | |
| "learning_rate": 5.476525507013312e-06, | |
| "loss": 1.8027, | |
| "step": 409500 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "learning_rate": 5.4209103904225854e-06, | |
| "loss": 1.7991, | |
| "step": 410000 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "eval_accuracy": 0.6556022179984572, | |
| "eval_loss": 1.728515625, | |
| "eval_runtime": 1174.9994, | |
| "eval_samples_per_second": 262.431, | |
| "eval_steps_per_second": 4.101, | |
| "step": 410000 | |
| }, | |
| { | |
| "epoch": 4.48, | |
| "learning_rate": 5.365071919949969e-06, | |
| "loss": 1.8046, | |
| "step": 410500 | |
| }, | |
| { | |
| "epoch": 4.49, | |
| "learning_rate": 5.309456803359243e-06, | |
| "loss": 1.8017, | |
| "step": 411000 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "learning_rate": 5.253618332886626e-06, | |
| "loss": 1.8038, | |
| "step": 411500 | |
| }, | |
| { | |
| "epoch": 4.5, | |
| "learning_rate": 5.198003216295899e-06, | |
| "loss": 1.8046, | |
| "step": 412000 | |
| }, | |
| { | |
| "epoch": 4.51, | |
| "learning_rate": 5.142164745823282e-06, | |
| "loss": 1.8046, | |
| "step": 412500 | |
| }, | |
| { | |
| "epoch": 4.51, | |
| "learning_rate": 5.086549629232556e-06, | |
| "loss": 1.7988, | |
| "step": 413000 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "learning_rate": 5.030711158759939e-06, | |
| "loss": 1.7947, | |
| "step": 413500 | |
| }, | |
| { | |
| "epoch": 4.52, | |
| "learning_rate": 4.975096042169213e-06, | |
| "loss": 1.7999, | |
| "step": 414000 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "learning_rate": 4.9192575716965965e-06, | |
| "loss": 1.7909, | |
| "step": 414500 | |
| }, | |
| { | |
| "epoch": 4.53, | |
| "learning_rate": 4.86364245510587e-06, | |
| "loss": 1.8029, | |
| "step": 415000 | |
| }, | |
| { | |
| "epoch": 4.54, | |
| "learning_rate": 4.807803984633253e-06, | |
| "loss": 1.8017, | |
| "step": 415500 | |
| }, | |
| { | |
| "epoch": 4.54, | |
| "learning_rate": 4.752188868042527e-06, | |
| "loss": 1.7924, | |
| "step": 416000 | |
| }, | |
| { | |
| "epoch": 4.55, | |
| "learning_rate": 4.69635039756991e-06, | |
| "loss": 1.797, | |
| "step": 416500 | |
| }, | |
| { | |
| "epoch": 4.56, | |
| "learning_rate": 4.640735280979184e-06, | |
| "loss": 1.7941, | |
| "step": 417000 | |
| }, | |
| { | |
| "epoch": 4.56, | |
| "learning_rate": 4.584896810506567e-06, | |
| "loss": 1.7972, | |
| "step": 417500 | |
| }, | |
| { | |
| "epoch": 4.57, | |
| "learning_rate": 4.52928169391584e-06, | |
| "loss": 1.8002, | |
| "step": 418000 | |
| }, | |
| { | |
| "epoch": 4.57, | |
| "learning_rate": 4.4734432234432235e-06, | |
| "loss": 1.796, | |
| "step": 418500 | |
| }, | |
| { | |
| "epoch": 4.58, | |
| "learning_rate": 4.4178281068524975e-06, | |
| "loss": 1.8012, | |
| "step": 419000 | |
| }, | |
| { | |
| "epoch": 4.58, | |
| "learning_rate": 4.361989636379881e-06, | |
| "loss": 1.7975, | |
| "step": 419500 | |
| }, | |
| { | |
| "epoch": 4.59, | |
| "learning_rate": 4.306374519789154e-06, | |
| "loss": 1.7965, | |
| "step": 420000 | |
| }, | |
| { | |
| "epoch": 4.59, | |
| "eval_accuracy": 0.6561621453610571, | |
| "eval_loss": 1.724609375, | |
| "eval_runtime": 1175.4978, | |
| "eval_samples_per_second": 262.32, | |
| "eval_steps_per_second": 4.1, | |
| "step": 420000 | |
| }, | |
| { | |
| "epoch": 4.59, | |
| "learning_rate": 4.250536049316537e-06, | |
| "loss": 1.7974, | |
| "step": 420500 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "learning_rate": 4.194920932725811e-06, | |
| "loss": 1.7986, | |
| "step": 421000 | |
| }, | |
| { | |
| "epoch": 4.6, | |
| "learning_rate": 4.139082462253194e-06, | |
| "loss": 1.7967, | |
| "step": 421500 | |
| }, | |
| { | |
| "epoch": 4.61, | |
| "learning_rate": 4.083467345662468e-06, | |
| "loss": 1.7991, | |
| "step": 422000 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 4.0276288751898514e-06, | |
| "loss": 1.7968, | |
| "step": 422500 | |
| }, | |
| { | |
| "epoch": 4.62, | |
| "learning_rate": 3.972013758599125e-06, | |
| "loss": 1.7939, | |
| "step": 423000 | |
| }, | |
| { | |
| "epoch": 4.63, | |
| "learning_rate": 3.916175288126508e-06, | |
| "loss": 1.7957, | |
| "step": 423500 | |
| }, | |
| { | |
| "epoch": 4.63, | |
| "learning_rate": 3.860560171535781e-06, | |
| "loss": 1.7962, | |
| "step": 424000 | |
| }, | |
| { | |
| "epoch": 4.64, | |
| "learning_rate": 3.8047217010631645e-06, | |
| "loss": 1.7939, | |
| "step": 424500 | |
| }, | |
| { | |
| "epoch": 4.64, | |
| "learning_rate": 3.7491065844724385e-06, | |
| "loss": 1.7937, | |
| "step": 425000 | |
| }, | |
| { | |
| "epoch": 4.65, | |
| "learning_rate": 3.6932681139998217e-06, | |
| "loss": 1.7939, | |
| "step": 425500 | |
| }, | |
| { | |
| "epoch": 4.65, | |
| "learning_rate": 3.6376529974090953e-06, | |
| "loss": 1.7954, | |
| "step": 426000 | |
| }, | |
| { | |
| "epoch": 4.66, | |
| "learning_rate": 3.5818145269364785e-06, | |
| "loss": 1.7946, | |
| "step": 426500 | |
| }, | |
| { | |
| "epoch": 4.66, | |
| "learning_rate": 3.526199410345752e-06, | |
| "loss": 1.7933, | |
| "step": 427000 | |
| }, | |
| { | |
| "epoch": 4.67, | |
| "learning_rate": 3.4703609398731352e-06, | |
| "loss": 1.7967, | |
| "step": 427500 | |
| }, | |
| { | |
| "epoch": 4.68, | |
| "learning_rate": 3.414745823282409e-06, | |
| "loss": 1.7946, | |
| "step": 428000 | |
| }, | |
| { | |
| "epoch": 4.68, | |
| "learning_rate": 3.358907352809792e-06, | |
| "loss": 1.7916, | |
| "step": 428500 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 3.303292236219065e-06, | |
| "loss": 1.7982, | |
| "step": 429000 | |
| }, | |
| { | |
| "epoch": 4.69, | |
| "learning_rate": 3.2474537657464483e-06, | |
| "loss": 1.7951, | |
| "step": 429500 | |
| }, | |
| { | |
| "epoch": 4.7, | |
| "learning_rate": 3.1918386491557228e-06, | |
| "loss": 1.7938, | |
| "step": 430000 | |
| }, | |
| { | |
| "epoch": 4.7, | |
| "eval_accuracy": 0.6567150458487134, | |
| "eval_loss": 1.720703125, | |
| "eval_runtime": 1096.463, | |
| "eval_samples_per_second": 281.228, | |
| "eval_steps_per_second": 4.395, | |
| "step": 430000 | |
| }, | |
| { | |
| "epoch": 4.7, | |
| "learning_rate": 3.136000178683106e-06, | |
| "loss": 1.7908, | |
| "step": 430500 | |
| }, | |
| { | |
| "epoch": 4.71, | |
| "learning_rate": 3.0803850620923795e-06, | |
| "loss": 1.798, | |
| "step": 431000 | |
| }, | |
| { | |
| "epoch": 4.71, | |
| "learning_rate": 3.0245465916197627e-06, | |
| "loss": 1.796, | |
| "step": 431500 | |
| }, | |
| { | |
| "epoch": 4.72, | |
| "learning_rate": 2.968931475029036e-06, | |
| "loss": 1.7931, | |
| "step": 432000 | |
| }, | |
| { | |
| "epoch": 4.72, | |
| "learning_rate": 2.913093004556419e-06, | |
| "loss": 1.7915, | |
| "step": 432500 | |
| }, | |
| { | |
| "epoch": 4.73, | |
| "learning_rate": 2.857477887965693e-06, | |
| "loss": 1.7932, | |
| "step": 433000 | |
| }, | |
| { | |
| "epoch": 4.74, | |
| "learning_rate": 2.8016394174930762e-06, | |
| "loss": 1.7867, | |
| "step": 433500 | |
| }, | |
| { | |
| "epoch": 4.74, | |
| "learning_rate": 2.74602430090235e-06, | |
| "loss": 1.7906, | |
| "step": 434000 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 2.690185830429733e-06, | |
| "loss": 1.7915, | |
| "step": 434500 | |
| }, | |
| { | |
| "epoch": 4.75, | |
| "learning_rate": 2.6345707138390066e-06, | |
| "loss": 1.7906, | |
| "step": 435000 | |
| }, | |
| { | |
| "epoch": 4.76, | |
| "learning_rate": 2.5787322433663898e-06, | |
| "loss": 1.7956, | |
| "step": 435500 | |
| }, | |
| { | |
| "epoch": 4.76, | |
| "learning_rate": 2.5231171267756633e-06, | |
| "loss": 1.793, | |
| "step": 436000 | |
| }, | |
| { | |
| "epoch": 4.77, | |
| "learning_rate": 2.4672786563030465e-06, | |
| "loss": 1.7897, | |
| "step": 436500 | |
| }, | |
| { | |
| "epoch": 4.77, | |
| "learning_rate": 2.4116635397123205e-06, | |
| "loss": 1.7912, | |
| "step": 437000 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "learning_rate": 2.3558250692397037e-06, | |
| "loss": 1.7917, | |
| "step": 437500 | |
| }, | |
| { | |
| "epoch": 4.78, | |
| "learning_rate": 2.3002099526489773e-06, | |
| "loss": 1.7924, | |
| "step": 438000 | |
| }, | |
| { | |
| "epoch": 4.79, | |
| "learning_rate": 2.2443714821763605e-06, | |
| "loss": 1.7868, | |
| "step": 438500 | |
| }, | |
| { | |
| "epoch": 4.8, | |
| "learning_rate": 2.188756365585634e-06, | |
| "loss": 1.7911, | |
| "step": 439000 | |
| }, | |
| { | |
| "epoch": 4.8, | |
| "learning_rate": 2.1329178951130172e-06, | |
| "loss": 1.7874, | |
| "step": 439500 | |
| }, | |
| { | |
| "epoch": 4.81, | |
| "learning_rate": 2.077302778522291e-06, | |
| "loss": 1.793, | |
| "step": 440000 | |
| }, | |
| { | |
| "epoch": 4.81, | |
| "eval_accuracy": 0.6571231862326748, | |
| "eval_loss": 1.7177734375, | |
| "eval_runtime": 1080.1698, | |
| "eval_samples_per_second": 285.47, | |
| "eval_steps_per_second": 4.461, | |
| "step": 440000 | |
| }, | |
| { | |
| "epoch": 4.81, | |
| "learning_rate": 2.021464308049674e-06, | |
| "loss": 1.784, | |
| "step": 440500 | |
| }, | |
| { | |
| "epoch": 4.82, | |
| "learning_rate": 1.965849191458948e-06, | |
| "loss": 1.7901, | |
| "step": 441000 | |
| }, | |
| { | |
| "epoch": 4.82, | |
| "learning_rate": 1.910010720986331e-06, | |
| "loss": 1.7949, | |
| "step": 441500 | |
| }, | |
| { | |
| "epoch": 4.83, | |
| "learning_rate": 1.8543956043956045e-06, | |
| "loss": 1.7868, | |
| "step": 442000 | |
| }, | |
| { | |
| "epoch": 4.83, | |
| "learning_rate": 1.7985571339229877e-06, | |
| "loss": 1.789, | |
| "step": 442500 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "learning_rate": 1.742942017332261e-06, | |
| "loss": 1.7878, | |
| "step": 443000 | |
| }, | |
| { | |
| "epoch": 4.84, | |
| "learning_rate": 1.6871035468596447e-06, | |
| "loss": 1.7864, | |
| "step": 443500 | |
| }, | |
| { | |
| "epoch": 4.85, | |
| "learning_rate": 1.6314884302689183e-06, | |
| "loss": 1.7892, | |
| "step": 444000 | |
| }, | |
| { | |
| "epoch": 4.86, | |
| "learning_rate": 1.5756499597963012e-06, | |
| "loss": 1.794, | |
| "step": 444500 | |
| }, | |
| { | |
| "epoch": 4.86, | |
| "learning_rate": 1.520034843205575e-06, | |
| "loss": 1.7899, | |
| "step": 445000 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "learning_rate": 1.4641963727329582e-06, | |
| "loss": 1.788, | |
| "step": 445500 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "learning_rate": 1.4085812561422318e-06, | |
| "loss": 1.7869, | |
| "step": 446000 | |
| }, | |
| { | |
| "epoch": 4.88, | |
| "learning_rate": 1.352742785669615e-06, | |
| "loss": 1.7861, | |
| "step": 446500 | |
| }, | |
| { | |
| "epoch": 4.88, | |
| "learning_rate": 1.2971276690788888e-06, | |
| "loss": 1.7895, | |
| "step": 447000 | |
| }, | |
| { | |
| "epoch": 4.89, | |
| "learning_rate": 1.241289198606272e-06, | |
| "loss": 1.791, | |
| "step": 447500 | |
| }, | |
| { | |
| "epoch": 4.89, | |
| "learning_rate": 1.1856740820155455e-06, | |
| "loss": 1.7922, | |
| "step": 448000 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "learning_rate": 1.1298356115429287e-06, | |
| "loss": 1.7853, | |
| "step": 448500 | |
| }, | |
| { | |
| "epoch": 4.9, | |
| "learning_rate": 1.0742204949522025e-06, | |
| "loss": 1.7862, | |
| "step": 449000 | |
| }, | |
| { | |
| "epoch": 4.91, | |
| "learning_rate": 1.0183820244795855e-06, | |
| "loss": 1.7849, | |
| "step": 449500 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "learning_rate": 9.62766907888859e-07, | |
| "loss": 1.7848, | |
| "step": 450000 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "eval_accuracy": 0.6576050004678714, | |
| "eval_loss": 1.71484375, | |
| "eval_runtime": 1081.0209, | |
| "eval_samples_per_second": 285.245, | |
| "eval_steps_per_second": 4.458, | |
| "step": 450000 | |
| }, | |
| { | |
| "epoch": 4.92, | |
| "learning_rate": 9.069284374162424e-07, | |
| "loss": 1.787, | |
| "step": 450500 | |
| }, | |
| { | |
| "epoch": 4.93, | |
| "learning_rate": 8.513133208255159e-07, | |
| "loss": 1.7882, | |
| "step": 451000 | |
| }, | |
| { | |
| "epoch": 4.93, | |
| "learning_rate": 7.954748503528992e-07, | |
| "loss": 1.7849, | |
| "step": 451500 | |
| }, | |
| { | |
| "epoch": 4.94, | |
| "learning_rate": 7.398597337621729e-07, | |
| "loss": 1.7889, | |
| "step": 452000 | |
| }, | |
| { | |
| "epoch": 4.94, | |
| "learning_rate": 6.84021263289556e-07, | |
| "loss": 1.79, | |
| "step": 452500 | |
| }, | |
| { | |
| "epoch": 4.95, | |
| "learning_rate": 6.284061466988297e-07, | |
| "loss": 1.7851, | |
| "step": 453000 | |
| }, | |
| { | |
| "epoch": 4.95, | |
| "learning_rate": 5.725676762262128e-07, | |
| "loss": 1.7859, | |
| "step": 453500 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "learning_rate": 5.169525596354864e-07, | |
| "loss": 1.7877, | |
| "step": 454000 | |
| }, | |
| { | |
| "epoch": 4.96, | |
| "learning_rate": 4.611140891628697e-07, | |
| "loss": 1.7828, | |
| "step": 454500 | |
| }, | |
| { | |
| "epoch": 4.97, | |
| "learning_rate": 4.054989725721433e-07, | |
| "loss": 1.7882, | |
| "step": 455000 | |
| }, | |
| { | |
| "epoch": 4.98, | |
| "learning_rate": 3.496605020995265e-07, | |
| "loss": 1.7847, | |
| "step": 455500 | |
| }, | |
| { | |
| "epoch": 4.98, | |
| "learning_rate": 2.9404538550880015e-07, | |
| "loss": 1.7876, | |
| "step": 456000 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "learning_rate": 2.3820691503618333e-07, | |
| "loss": 1.7887, | |
| "step": 456500 | |
| }, | |
| { | |
| "epoch": 4.99, | |
| "learning_rate": 1.82591798445457e-07, | |
| "loss": 1.7844, | |
| "step": 457000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "learning_rate": 1.2675332797284018e-07, | |
| "loss": 1.7896, | |
| "step": 457500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "step": 457720, | |
| "total_flos": 7.401345611745722e+18, | |
| "train_loss": 2.2305684480447927, | |
| "train_runtime": 322220.8599, | |
| "train_samples_per_second": 90.912, | |
| "train_steps_per_second": 1.421 | |
| } | |
| ], | |
| "max_steps": 457720, | |
| "num_train_epochs": 5, | |
| "total_flos": 7.401345611745722e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |