| { | |
| "best_metric": 0.045277200639247894, | |
| "best_model_checkpoint": "./vit-base-mri/checkpoint-1500", | |
| "epoch": 4.0, | |
| "global_step": 6760, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 2.977810650887574e-05, | |
| "loss": 0.099, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 2.9556213017751482e-05, | |
| "loss": 0.0913, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 2.933431952662722e-05, | |
| "loss": 0.0751, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 2.911242603550296e-05, | |
| "loss": 0.0663, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 2.88905325443787e-05, | |
| "loss": 0.0773, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 2.8668639053254437e-05, | |
| "loss": 0.0628, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 2.844674556213018e-05, | |
| "loss": 0.0516, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 2.8224852071005918e-05, | |
| "loss": 0.0537, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 2.800295857988166e-05, | |
| "loss": 0.0498, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 2.7781065088757396e-05, | |
| "loss": 0.04, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "eval_accuracy": 0.9689694936576161, | |
| "eval_loss": 0.08280089497566223, | |
| "eval_runtime": 109.6629, | |
| "eval_samples_per_second": 86.985, | |
| "eval_steps_per_second": 10.879, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 2.7559171597633135e-05, | |
| "loss": 0.0462, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 2.7337278106508877e-05, | |
| "loss": 0.0382, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 2.7115384615384616e-05, | |
| "loss": 0.0351, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 2.6893491124260358e-05, | |
| "loss": 0.0274, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 2.6671597633136097e-05, | |
| "loss": 0.029, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 2.6449704142011832e-05, | |
| "loss": 0.0599, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 2.6227810650887575e-05, | |
| "loss": 0.0621, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 2.6005917159763313e-05, | |
| "loss": 0.0807, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 2.5784023668639056e-05, | |
| "loss": 0.0535, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 2.5562130177514795e-05, | |
| "loss": 0.0765, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "eval_accuracy": 0.9750497955760562, | |
| "eval_loss": 0.0623435452580452, | |
| "eval_runtime": 109.6354, | |
| "eval_samples_per_second": 87.007, | |
| "eval_steps_per_second": 10.882, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 2.5340236686390533e-05, | |
| "loss": 0.0574, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 2.5118343195266272e-05, | |
| "loss": 0.0608, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 2.489644970414201e-05, | |
| "loss": 0.0546, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 2.467455621301775e-05, | |
| "loss": 0.0509, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 2.4452662721893492e-05, | |
| "loss": 0.0509, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 2.423076923076923e-05, | |
| "loss": 0.0471, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 2.4008875739644973e-05, | |
| "loss": 0.0326, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 2.3786982248520712e-05, | |
| "loss": 0.0568, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 2.3565088757396448e-05, | |
| "loss": 0.0289, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 2.334319526627219e-05, | |
| "loss": 0.0479, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "eval_accuracy": 0.9827025893699549, | |
| "eval_loss": 0.045277200639247894, | |
| "eval_runtime": 110.2271, | |
| "eval_samples_per_second": 86.54, | |
| "eval_steps_per_second": 10.823, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 2.312130177514793e-05, | |
| "loss": 0.055, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 2.289940828402367e-05, | |
| "loss": 0.0402, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 2.267751479289941e-05, | |
| "loss": 0.0546, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 2.245562130177515e-05, | |
| "loss": 0.0377, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 2.2238165680473372e-05, | |
| "loss": 0.0164, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 2.2016272189349114e-05, | |
| "loss": 0.0081, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 2.1794378698224853e-05, | |
| "loss": 0.0097, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 2.1572485207100592e-05, | |
| "loss": 0.0139, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 2.135059171597633e-05, | |
| "loss": 0.0075, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 2.112869822485207e-05, | |
| "loss": 0.0199, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "eval_accuracy": 0.985742740329175, | |
| "eval_loss": 0.05243392661213875, | |
| "eval_runtime": 109.2241, | |
| "eval_samples_per_second": 87.334, | |
| "eval_steps_per_second": 10.923, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 2.0906804733727812e-05, | |
| "loss": 0.0079, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 2.068491124260355e-05, | |
| "loss": 0.0145, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 2.0463017751479293e-05, | |
| "loss": 0.0166, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 2.0241124260355032e-05, | |
| "loss": 0.0101, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 2.0019230769230767e-05, | |
| "loss": 0.0079, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 1.36, | |
| "learning_rate": 1.979733727810651e-05, | |
| "loss": 0.0075, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.39, | |
| "learning_rate": 1.9575443786982248e-05, | |
| "loss": 0.0176, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 1.935355029585799e-05, | |
| "loss": 0.0107, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 1.913165680473373e-05, | |
| "loss": 0.0213, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 1.8909763313609468e-05, | |
| "loss": 0.0114, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "eval_accuracy": 0.9860572387042666, | |
| "eval_loss": 0.048441529273986816, | |
| "eval_runtime": 108.7938, | |
| "eval_samples_per_second": 87.68, | |
| "eval_steps_per_second": 10.966, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.51, | |
| "learning_rate": 1.8687869822485207e-05, | |
| "loss": 0.0158, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 1.8465976331360946e-05, | |
| "loss": 0.0105, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 1.8244082840236688e-05, | |
| "loss": 0.0078, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 1.8022189349112427e-05, | |
| "loss": 0.0131, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 1.7800295857988166e-05, | |
| "loss": 0.0063, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 1.7578402366863908e-05, | |
| "loss": 0.0064, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.69, | |
| "learning_rate": 1.7356508875739643e-05, | |
| "loss": 0.0077, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 1.7134615384615386e-05, | |
| "loss": 0.0063, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 1.6912721893491125e-05, | |
| "loss": 0.0098, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 1.6690828402366863e-05, | |
| "loss": 0.008, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "eval_accuracy": 0.9852185763706888, | |
| "eval_loss": 0.056569721549749374, | |
| "eval_runtime": 108.3934, | |
| "eval_samples_per_second": 88.004, | |
| "eval_steps_per_second": 11.006, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "learning_rate": 1.6468934911242606e-05, | |
| "loss": 0.0133, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 1.6247041420118344e-05, | |
| "loss": 0.0134, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 1.6025147928994083e-05, | |
| "loss": 0.0067, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 1.5803254437869822e-05, | |
| "loss": 0.009, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 1.558136094674556e-05, | |
| "loss": 0.0132, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 1.95, | |
| "learning_rate": 1.5359467455621303e-05, | |
| "loss": 0.0131, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 1.5137573964497042e-05, | |
| "loss": 0.0071, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 2.01, | |
| "learning_rate": 1.4915680473372781e-05, | |
| "loss": 0.0115, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 2.04, | |
| "learning_rate": 1.4693786982248521e-05, | |
| "loss": 0.0017, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "learning_rate": 1.4471893491124262e-05, | |
| "loss": 0.0051, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.07, | |
| "eval_accuracy": 0.9874200649963308, | |
| "eval_loss": 0.051312096416950226, | |
| "eval_runtime": 108.8914, | |
| "eval_samples_per_second": 87.601, | |
| "eval_steps_per_second": 10.956, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "learning_rate": 1.4249999999999999e-05, | |
| "loss": 0.0023, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 2.13, | |
| "learning_rate": 1.402810650887574e-05, | |
| "loss": 0.0037, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "learning_rate": 1.380621301775148e-05, | |
| "loss": 0.001, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 2.19, | |
| "learning_rate": 1.3584319526627219e-05, | |
| "loss": 0.0037, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.22, | |
| "learning_rate": 1.336242603550296e-05, | |
| "loss": 0.0026, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 2.25, | |
| "learning_rate": 1.3140532544378698e-05, | |
| "loss": 0.0012, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.28, | |
| "learning_rate": 1.2918639053254437e-05, | |
| "loss": 0.0022, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 1.2696745562130178e-05, | |
| "loss": 0.0006, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 2.34, | |
| "learning_rate": 1.2474852071005918e-05, | |
| "loss": 0.0005, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 1.2252958579881657e-05, | |
| "loss": 0.0008, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "eval_accuracy": 0.9874200649963308, | |
| "eval_loss": 0.061718400567770004, | |
| "eval_runtime": 108.6858, | |
| "eval_samples_per_second": 87.767, | |
| "eval_steps_per_second": 10.977, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "learning_rate": 1.2031065088757396e-05, | |
| "loss": 0.005, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 2.43, | |
| "learning_rate": 1.1809171597633137e-05, | |
| "loss": 0.0005, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.46, | |
| "learning_rate": 1.1587278106508877e-05, | |
| "loss": 0.0019, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 2.49, | |
| "learning_rate": 1.1365384615384616e-05, | |
| "loss": 0.0015, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 2.51, | |
| "learning_rate": 1.1143491124260355e-05, | |
| "loss": 0.0044, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 2.54, | |
| "learning_rate": 1.0921597633136095e-05, | |
| "loss": 0.0005, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 2.57, | |
| "learning_rate": 1.0699704142011834e-05, | |
| "loss": 0.0036, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "learning_rate": 1.0477810650887575e-05, | |
| "loss": 0.0009, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 2.63, | |
| "learning_rate": 1.0255917159763315e-05, | |
| "loss": 0.0004, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "learning_rate": 1.0034023668639052e-05, | |
| "loss": 0.0021, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.66, | |
| "eval_accuracy": 0.9870007338295419, | |
| "eval_loss": 0.06636156141757965, | |
| "eval_runtime": 108.716, | |
| "eval_samples_per_second": 87.742, | |
| "eval_steps_per_second": 10.974, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.69, | |
| "learning_rate": 9.812130177514793e-06, | |
| "loss": 0.0022, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 2.72, | |
| "learning_rate": 9.590236686390533e-06, | |
| "loss": 0.0043, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 2.75, | |
| "learning_rate": 9.368343195266272e-06, | |
| "loss": 0.0047, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 2.78, | |
| "learning_rate": 9.146449704142013e-06, | |
| "loss": 0.0008, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 2.81, | |
| "learning_rate": 8.924556213017752e-06, | |
| "loss": 0.0008, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 2.84, | |
| "learning_rate": 8.70266272189349e-06, | |
| "loss": 0.0022, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 2.87, | |
| "learning_rate": 8.480769230769231e-06, | |
| "loss": 0.0015, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 2.9, | |
| "learning_rate": 8.258875739644972e-06, | |
| "loss": 0.0004, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 2.93, | |
| "learning_rate": 8.036982248520709e-06, | |
| "loss": 0.0071, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "learning_rate": 7.81508875739645e-06, | |
| "loss": 0.0005, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "eval_accuracy": 0.9872103994129363, | |
| "eval_loss": 0.06394314020872116, | |
| "eval_runtime": 108.7628, | |
| "eval_samples_per_second": 87.705, | |
| "eval_steps_per_second": 10.969, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.99, | |
| "learning_rate": 7.59319526627219e-06, | |
| "loss": 0.0006, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 3.02, | |
| "learning_rate": 7.3713017751479295e-06, | |
| "loss": 0.0004, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 3.05, | |
| "learning_rate": 7.149408284023668e-06, | |
| "loss": 0.0006, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 3.08, | |
| "learning_rate": 6.9319526627218935e-06, | |
| "loss": 0.0106, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 3.11, | |
| "learning_rate": 6.710059171597633e-06, | |
| "loss": 0.0029, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 3.14, | |
| "learning_rate": 6.488165680473374e-06, | |
| "loss": 0.001, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 3.17, | |
| "learning_rate": 6.2662721893491125e-06, | |
| "loss": 0.0097, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 3.2, | |
| "learning_rate": 6.044378698224852e-06, | |
| "loss": 0.0037, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 3.22, | |
| "learning_rate": 5.822485207100592e-06, | |
| "loss": 0.0016, | |
| "step": 5450 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "learning_rate": 5.600591715976332e-06, | |
| "loss": 0.001, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 3.25, | |
| "eval_accuracy": 0.987944228954817, | |
| "eval_loss": 0.06436183303594589, | |
| "eval_runtime": 108.9405, | |
| "eval_samples_per_second": 87.562, | |
| "eval_steps_per_second": 10.951, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 3.28, | |
| "learning_rate": 5.3786982248520704e-06, | |
| "loss": 0.0006, | |
| "step": 5550 | |
| }, | |
| { | |
| "epoch": 3.31, | |
| "learning_rate": 5.156804733727811e-06, | |
| "loss": 0.0004, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 4.934911242603551e-06, | |
| "loss": 0.0002, | |
| "step": 5650 | |
| }, | |
| { | |
| "epoch": 3.37, | |
| "learning_rate": 4.7130177514792895e-06, | |
| "loss": 0.0003, | |
| "step": 5700 | |
| }, | |
| { | |
| "epoch": 3.4, | |
| "learning_rate": 4.49112426035503e-06, | |
| "loss": 0.0021, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 3.43, | |
| "learning_rate": 4.269230769230769e-06, | |
| "loss": 0.0003, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 3.46, | |
| "learning_rate": 4.047337278106509e-06, | |
| "loss": 0.0005, | |
| "step": 5850 | |
| }, | |
| { | |
| "epoch": 3.49, | |
| "learning_rate": 3.825443786982248e-06, | |
| "loss": 0.0003, | |
| "step": 5900 | |
| }, | |
| { | |
| "epoch": 3.52, | |
| "learning_rate": 3.6035502958579884e-06, | |
| "loss": 0.0003, | |
| "step": 5950 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "learning_rate": 3.3816568047337276e-06, | |
| "loss": 0.0004, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "eval_accuracy": 0.9875248977880281, | |
| "eval_loss": 0.06724812090396881, | |
| "eval_runtime": 108.7744, | |
| "eval_samples_per_second": 87.695, | |
| "eval_steps_per_second": 10.968, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.58, | |
| "learning_rate": 3.1597633136094673e-06, | |
| "loss": 0.0018, | |
| "step": 6050 | |
| }, | |
| { | |
| "epoch": 3.61, | |
| "learning_rate": 2.9378698224852074e-06, | |
| "loss": 0.0003, | |
| "step": 6100 | |
| }, | |
| { | |
| "epoch": 3.64, | |
| "learning_rate": 2.7159763313609467e-06, | |
| "loss": 0.0016, | |
| "step": 6150 | |
| }, | |
| { | |
| "epoch": 3.67, | |
| "learning_rate": 2.4940828402366864e-06, | |
| "loss": 0.0009, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 3.7, | |
| "learning_rate": 2.272189349112426e-06, | |
| "loss": 0.0015, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 3.73, | |
| "learning_rate": 2.0502958579881658e-06, | |
| "loss": 0.0004, | |
| "step": 6300 | |
| }, | |
| { | |
| "epoch": 3.76, | |
| "learning_rate": 1.8284023668639055e-06, | |
| "loss": 0.0003, | |
| "step": 6350 | |
| }, | |
| { | |
| "epoch": 3.79, | |
| "learning_rate": 1.606508875739645e-06, | |
| "loss": 0.0017, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 3.82, | |
| "learning_rate": 1.3846153846153846e-06, | |
| "loss": 0.0004, | |
| "step": 6450 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "learning_rate": 1.1627218934911243e-06, | |
| "loss": 0.0003, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.85, | |
| "eval_accuracy": 0.987944228954817, | |
| "eval_loss": 0.06903128325939178, | |
| "eval_runtime": 108.9231, | |
| "eval_samples_per_second": 87.576, | |
| "eval_steps_per_second": 10.953, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.88, | |
| "learning_rate": 9.40828402366864e-07, | |
| "loss": 0.0016, | |
| "step": 6550 | |
| }, | |
| { | |
| "epoch": 3.91, | |
| "learning_rate": 7.189349112426036e-07, | |
| "loss": 0.0002, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 3.93, | |
| "learning_rate": 4.970414201183432e-07, | |
| "loss": 0.0003, | |
| "step": 6650 | |
| }, | |
| { | |
| "epoch": 3.96, | |
| "learning_rate": 2.751479289940828e-07, | |
| "loss": 0.0016, | |
| "step": 6700 | |
| }, | |
| { | |
| "epoch": 3.99, | |
| "learning_rate": 5.3254437869822486e-08, | |
| "loss": 0.0013, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "step": 6760, | |
| "total_flos": 1.6754388090591707e+19, | |
| "train_loss": 0.017405257951027365, | |
| "train_runtime": 6361.011, | |
| "train_samples_per_second": 33.99, | |
| "train_steps_per_second": 1.063 | |
| } | |
| ], | |
| "max_steps": 6760, | |
| "num_train_epochs": 4, | |
| "total_flos": 1.6754388090591707e+19, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |