| { | |
| "best_metric": 0.8381742738589212, | |
| "best_model_checkpoint": "videomae-timesformer-surf-analytics\\checkpoint-925", | |
| "epoch": 4.195675675675676, | |
| "eval_steps": 500, | |
| "global_step": 925, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.010810810810810811, | |
| "grad_norm": 11.895417213439941, | |
| "learning_rate": 5.376344086021506e-06, | |
| "loss": 1.5009, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.021621621621621623, | |
| "grad_norm": 11.518338203430176, | |
| "learning_rate": 1.0752688172043012e-05, | |
| "loss": 1.1077, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.032432432432432434, | |
| "grad_norm": 10.138296127319336, | |
| "learning_rate": 1.6129032258064517e-05, | |
| "loss": 1.3124, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.043243243243243246, | |
| "grad_norm": 9.477767944335938, | |
| "learning_rate": 2.1505376344086024e-05, | |
| "loss": 1.0199, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.05405405405405406, | |
| "grad_norm": 11.046083450317383, | |
| "learning_rate": 2.6881720430107527e-05, | |
| "loss": 1.038, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.06486486486486487, | |
| "grad_norm": 12.39089584350586, | |
| "learning_rate": 3.2258064516129034e-05, | |
| "loss": 0.9406, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.07567567567567568, | |
| "grad_norm": 12.075498580932617, | |
| "learning_rate": 3.763440860215054e-05, | |
| "loss": 1.0054, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.08648648648648649, | |
| "grad_norm": 9.824885368347168, | |
| "learning_rate": 4.301075268817205e-05, | |
| "loss": 0.7344, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.0972972972972973, | |
| "grad_norm": 10.267470359802246, | |
| "learning_rate": 4.8387096774193554e-05, | |
| "loss": 0.6901, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.10810810810810811, | |
| "grad_norm": 4.5673370361328125, | |
| "learning_rate": 4.957932692307692e-05, | |
| "loss": 0.4999, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.11891891891891893, | |
| "grad_norm": 7.499106407165527, | |
| "learning_rate": 4.897836538461539e-05, | |
| "loss": 0.7291, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.12972972972972974, | |
| "grad_norm": 13.209514617919922, | |
| "learning_rate": 4.8377403846153846e-05, | |
| "loss": 0.4908, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.14054054054054055, | |
| "grad_norm": 27.258346557617188, | |
| "learning_rate": 4.777644230769231e-05, | |
| "loss": 0.6281, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.15135135135135136, | |
| "grad_norm": 23.13751220703125, | |
| "learning_rate": 4.717548076923077e-05, | |
| "loss": 0.7854, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.16216216216216217, | |
| "grad_norm": 21.831802368164062, | |
| "learning_rate": 4.657451923076923e-05, | |
| "loss": 0.6653, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.17297297297297298, | |
| "grad_norm": 3.0441360473632812, | |
| "learning_rate": 4.5973557692307694e-05, | |
| "loss": 1.2533, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.1837837837837838, | |
| "grad_norm": 10.554399490356445, | |
| "learning_rate": 4.5372596153846156e-05, | |
| "loss": 0.546, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.1945945945945946, | |
| "grad_norm": 14.284193992614746, | |
| "learning_rate": 4.477163461538462e-05, | |
| "loss": 0.4491, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.20108108108108108, | |
| "eval_accuracy": 0.7385892116182573, | |
| "eval_loss": 0.6939307451248169, | |
| "eval_runtime": 407.9813, | |
| "eval_samples_per_second": 0.591, | |
| "eval_steps_per_second": 0.15, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 1.0043243243243243, | |
| "grad_norm": 11.024110794067383, | |
| "learning_rate": 4.417067307692308e-05, | |
| "loss": 0.4377, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.0151351351351352, | |
| "grad_norm": 4.518445014953613, | |
| "learning_rate": 4.3569711538461535e-05, | |
| "loss": 0.0803, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.025945945945946, | |
| "grad_norm": 0.42581477761268616, | |
| "learning_rate": 4.2968750000000004e-05, | |
| "loss": 0.3532, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.0367567567567568, | |
| "grad_norm": 13.55870246887207, | |
| "learning_rate": 4.2367788461538466e-05, | |
| "loss": 0.7917, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 1.0475675675675675, | |
| "grad_norm": 13.94046401977539, | |
| "learning_rate": 4.176682692307692e-05, | |
| "loss": 0.1946, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 1.0583783783783784, | |
| "grad_norm": 24.760360717773438, | |
| "learning_rate": 4.116586538461539e-05, | |
| "loss": 0.9298, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 1.0691891891891891, | |
| "grad_norm": 7.651761531829834, | |
| "learning_rate": 4.0564903846153846e-05, | |
| "loss": 0.7205, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "grad_norm": 16.21826171875, | |
| "learning_rate": 3.996394230769231e-05, | |
| "loss": 0.6958, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 1.0908108108108108, | |
| "grad_norm": 16.531909942626953, | |
| "learning_rate": 3.936298076923077e-05, | |
| "loss": 0.2937, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 1.1016216216216217, | |
| "grad_norm": 16.31216049194336, | |
| "learning_rate": 3.876201923076923e-05, | |
| "loss": 0.6104, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 1.1124324324324324, | |
| "grad_norm": 8.48189640045166, | |
| "learning_rate": 3.8161057692307694e-05, | |
| "loss": 0.4254, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 1.1232432432432433, | |
| "grad_norm": 29.133272171020508, | |
| "learning_rate": 3.7560096153846156e-05, | |
| "loss": 0.262, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 1.134054054054054, | |
| "grad_norm": 20.123014450073242, | |
| "learning_rate": 3.695913461538462e-05, | |
| "loss": 0.4921, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 1.144864864864865, | |
| "grad_norm": 9.895979881286621, | |
| "learning_rate": 3.635817307692308e-05, | |
| "loss": 0.2059, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 1.1556756756756756, | |
| "grad_norm": 11.974223136901855, | |
| "learning_rate": 3.5757211538461535e-05, | |
| "loss": 0.2591, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 1.1664864864864866, | |
| "grad_norm": 0.1094137504696846, | |
| "learning_rate": 3.5156250000000004e-05, | |
| "loss": 0.437, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 1.1772972972972973, | |
| "grad_norm": 0.12011051177978516, | |
| "learning_rate": 3.4555288461538466e-05, | |
| "loss": 0.5299, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 1.1881081081081082, | |
| "grad_norm": 0.32698747515678406, | |
| "learning_rate": 3.395432692307692e-05, | |
| "loss": 0.1667, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 1.1989189189189189, | |
| "grad_norm": 2.423952102661133, | |
| "learning_rate": 3.335336538461539e-05, | |
| "loss": 0.5627, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 1.201081081081081, | |
| "eval_accuracy": 0.7759336099585062, | |
| "eval_loss": 0.6805610060691833, | |
| "eval_runtime": 399.3517, | |
| "eval_samples_per_second": 0.603, | |
| "eval_steps_per_second": 0.153, | |
| "step": 372 | |
| }, | |
| { | |
| "epoch": 2.0086486486486486, | |
| "grad_norm": 0.037202008068561554, | |
| "learning_rate": 3.2752403846153846e-05, | |
| "loss": 0.0415, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 2.0194594594594593, | |
| "grad_norm": 23.98035430908203, | |
| "learning_rate": 3.215144230769231e-05, | |
| "loss": 0.3379, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 2.0302702702702704, | |
| "grad_norm": 31.957252502441406, | |
| "learning_rate": 3.155048076923077e-05, | |
| "loss": 0.357, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.041081081081081, | |
| "grad_norm": 1.1118696928024292, | |
| "learning_rate": 3.094951923076923e-05, | |
| "loss": 0.1775, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 2.051891891891892, | |
| "grad_norm": 0.05329513177275658, | |
| "learning_rate": 3.0348557692307694e-05, | |
| "loss": 0.0175, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 2.0627027027027025, | |
| "grad_norm": 32.40390396118164, | |
| "learning_rate": 2.974759615384616e-05, | |
| "loss": 0.4569, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 2.0735135135135137, | |
| "grad_norm": 0.15056586265563965, | |
| "learning_rate": 2.9146634615384614e-05, | |
| "loss": 0.3911, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 2.0843243243243244, | |
| "grad_norm": 0.2210923731327057, | |
| "learning_rate": 2.854567307692308e-05, | |
| "loss": 0.112, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.095135135135135, | |
| "grad_norm": 0.9941416382789612, | |
| "learning_rate": 2.794471153846154e-05, | |
| "loss": 0.3037, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 2.1059459459459458, | |
| "grad_norm": 0.035190433263778687, | |
| "learning_rate": 2.734375e-05, | |
| "loss": 0.6371, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 2.116756756756757, | |
| "grad_norm": 0.04186534136533737, | |
| "learning_rate": 2.6742788461538466e-05, | |
| "loss": 0.4244, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 2.1275675675675676, | |
| "grad_norm": 0.5409103631973267, | |
| "learning_rate": 2.6141826923076925e-05, | |
| "loss": 0.311, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 2.1383783783783783, | |
| "grad_norm": 0.04897398501634598, | |
| "learning_rate": 2.5540865384615387e-05, | |
| "loss": 0.0555, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.149189189189189, | |
| "grad_norm": 16.756502151489258, | |
| "learning_rate": 2.493990384615385e-05, | |
| "loss": 0.264, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 2.16, | |
| "grad_norm": 1.5812554359436035, | |
| "learning_rate": 2.4338942307692307e-05, | |
| "loss": 0.1869, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 2.170810810810811, | |
| "grad_norm": 0.20836937427520752, | |
| "learning_rate": 2.373798076923077e-05, | |
| "loss": 0.2804, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 2.1816216216216215, | |
| "grad_norm": 0.14942559599876404, | |
| "learning_rate": 2.313701923076923e-05, | |
| "loss": 0.2552, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 2.1924324324324322, | |
| "grad_norm": 0.09908430278301239, | |
| "learning_rate": 2.2536057692307694e-05, | |
| "loss": 0.5189, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 2.2010810810810812, | |
| "eval_accuracy": 0.8174273858921162, | |
| "eval_loss": 0.651021420955658, | |
| "eval_runtime": 406.013, | |
| "eval_samples_per_second": 0.594, | |
| "eval_steps_per_second": 0.15, | |
| "step": 558 | |
| }, | |
| { | |
| "epoch": 3.002162162162162, | |
| "grad_norm": 15.29819107055664, | |
| "learning_rate": 2.1935096153846156e-05, | |
| "loss": 0.7952, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 3.012972972972973, | |
| "grad_norm": 7.53931188583374, | |
| "learning_rate": 2.1334134615384614e-05, | |
| "loss": 0.0482, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 3.023783783783784, | |
| "grad_norm": 0.01849238947033882, | |
| "learning_rate": 2.073317307692308e-05, | |
| "loss": 0.0809, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 3.0345945945945947, | |
| "grad_norm": 28.94001007080078, | |
| "learning_rate": 2.0132211538461542e-05, | |
| "loss": 0.0708, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 3.0454054054054054, | |
| "grad_norm": 0.031998779624700546, | |
| "learning_rate": 1.953125e-05, | |
| "loss": 0.0426, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.056216216216216, | |
| "grad_norm": 6.872684001922607, | |
| "learning_rate": 1.8930288461538462e-05, | |
| "loss": 0.1062, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 3.0670270270270272, | |
| "grad_norm": 0.02178012765944004, | |
| "learning_rate": 1.832932692307692e-05, | |
| "loss": 0.1908, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 3.077837837837838, | |
| "grad_norm": 12.06849193572998, | |
| "learning_rate": 1.7728365384615387e-05, | |
| "loss": 0.0114, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 3.0886486486486486, | |
| "grad_norm": 0.18928392231464386, | |
| "learning_rate": 1.712740384615385e-05, | |
| "loss": 0.1763, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 3.0994594594594593, | |
| "grad_norm": 10.532954216003418, | |
| "learning_rate": 1.6526442307692307e-05, | |
| "loss": 0.3634, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 3.1102702702702705, | |
| "grad_norm": 18.90543556213379, | |
| "learning_rate": 1.592548076923077e-05, | |
| "loss": 0.728, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 3.121081081081081, | |
| "grad_norm": 0.35980573296546936, | |
| "learning_rate": 1.532451923076923e-05, | |
| "loss": 0.144, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 3.131891891891892, | |
| "grad_norm": 0.03495261073112488, | |
| "learning_rate": 1.4723557692307693e-05, | |
| "loss": 0.0163, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 3.1427027027027026, | |
| "grad_norm": 24.22405242919922, | |
| "learning_rate": 1.4122596153846154e-05, | |
| "loss": 0.1584, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 3.1535135135135137, | |
| "grad_norm": 0.026546325534582138, | |
| "learning_rate": 1.3521634615384616e-05, | |
| "loss": 0.3465, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 3.1643243243243244, | |
| "grad_norm": 0.43370339274406433, | |
| "learning_rate": 1.292067307692308e-05, | |
| "loss": 0.3739, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 3.175135135135135, | |
| "grad_norm": 0.02699950709939003, | |
| "learning_rate": 1.231971153846154e-05, | |
| "loss": 0.1644, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 3.185945945945946, | |
| "grad_norm": 2.032027006149292, | |
| "learning_rate": 1.171875e-05, | |
| "loss": 0.1225, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 3.1967567567567565, | |
| "grad_norm": 0.006064319983124733, | |
| "learning_rate": 1.111778846153846e-05, | |
| "loss": 0.2503, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 3.2010810810810812, | |
| "eval_accuracy": 0.8174273858921162, | |
| "eval_loss": 0.6731572151184082, | |
| "eval_runtime": 400.1385, | |
| "eval_samples_per_second": 0.602, | |
| "eval_steps_per_second": 0.152, | |
| "step": 744 | |
| }, | |
| { | |
| "epoch": 4.006486486486486, | |
| "grad_norm": 0.4040788412094116, | |
| "learning_rate": 1.0516826923076924e-05, | |
| "loss": 0.2375, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 4.017297297297297, | |
| "grad_norm": 11.23088264465332, | |
| "learning_rate": 9.915865384615385e-06, | |
| "loss": 0.1009, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 4.028108108108108, | |
| "grad_norm": 44.4477424621582, | |
| "learning_rate": 9.314903846153847e-06, | |
| "loss": 0.1268, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 4.0389189189189185, | |
| "grad_norm": 0.01056050043553114, | |
| "learning_rate": 8.713942307692307e-06, | |
| "loss": 0.0738, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 4.04972972972973, | |
| "grad_norm": 14.111398696899414, | |
| "learning_rate": 8.112980769230769e-06, | |
| "loss": 0.2126, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 4.060540540540541, | |
| "grad_norm": 0.014713864773511887, | |
| "learning_rate": 7.512019230769231e-06, | |
| "loss": 0.0011, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 4.0713513513513515, | |
| "grad_norm": 0.010851687751710415, | |
| "learning_rate": 6.911057692307693e-06, | |
| "loss": 0.1786, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 4.082162162162162, | |
| "grad_norm": 0.050808586180210114, | |
| "learning_rate": 6.310096153846154e-06, | |
| "loss": 0.0783, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 4.092972972972973, | |
| "grad_norm": 0.037915121763944626, | |
| "learning_rate": 5.709134615384616e-06, | |
| "loss": 0.2292, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 4.103783783783784, | |
| "grad_norm": 2.1745355129241943, | |
| "learning_rate": 5.108173076923077e-06, | |
| "loss": 0.1868, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 4.114594594594594, | |
| "grad_norm": 0.020182184875011444, | |
| "learning_rate": 4.507211538461539e-06, | |
| "loss": 0.0016, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 4.125405405405405, | |
| "grad_norm": 23.419614791870117, | |
| "learning_rate": 3.90625e-06, | |
| "loss": 0.0688, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 4.136216216216217, | |
| "grad_norm": 0.5133500695228577, | |
| "learning_rate": 3.3052884615384617e-06, | |
| "loss": 0.0782, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 4.147027027027027, | |
| "grad_norm": 1.0855499505996704, | |
| "learning_rate": 2.7043269230769233e-06, | |
| "loss": 0.0486, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 4.157837837837838, | |
| "grad_norm": 0.05244865640997887, | |
| "learning_rate": 2.103365384615385e-06, | |
| "loss": 0.0217, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 4.168648648648649, | |
| "grad_norm": 0.013233544304966927, | |
| "learning_rate": 1.5024038461538464e-06, | |
| "loss": 0.2434, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 4.179459459459459, | |
| "grad_norm": 12.086763381958008, | |
| "learning_rate": 9.014423076923077e-07, | |
| "loss": 0.0501, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 4.19027027027027, | |
| "grad_norm": 0.0869203507900238, | |
| "learning_rate": 3.0048076923076924e-07, | |
| "loss": 0.0159, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 4.195675675675676, | |
| "eval_accuracy": 0.8381742738589212, | |
| "eval_loss": 0.6192311644554138, | |
| "eval_runtime": 401.9033, | |
| "eval_samples_per_second": 0.6, | |
| "eval_steps_per_second": 0.152, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 4.195675675675676, | |
| "step": 925, | |
| "total_flos": 3.2312722580082524e+18, | |
| "train_loss": 0.3796664474924674, | |
| "train_runtime": 18773.5619, | |
| "train_samples_per_second": 0.197, | |
| "train_steps_per_second": 0.049 | |
| }, | |
| { | |
| "epoch": 4.195675675675676, | |
| "eval_accuracy": 0.9770580296896086, | |
| "eval_loss": 0.11411414295434952, | |
| "eval_runtime": 1291.965, | |
| "eval_samples_per_second": 0.574, | |
| "eval_steps_per_second": 0.144, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 4.195675675675676, | |
| "eval_accuracy": 0.8134831460674158, | |
| "eval_loss": 0.7141955494880676, | |
| "eval_runtime": 720.0647, | |
| "eval_samples_per_second": 0.618, | |
| "eval_steps_per_second": 0.156, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 4.195675675675676, | |
| "eval_accuracy": 0.8381742738589212, | |
| "eval_loss": 0.6192311644554138, | |
| "eval_runtime": 391.274, | |
| "eval_samples_per_second": 0.616, | |
| "eval_steps_per_second": 0.156, | |
| "step": 925 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 925, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 9223372036854775807, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.2312722580082524e+18, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |