| { | |
| "best_metric": 0.16163970530033112, | |
| "best_model_checkpoint": "./fine_tuned_mistral/checkpoint-2502", | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 2502, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.003996802557953637, | |
| "grad_norm": 187.7821502685547, | |
| "learning_rate": 0.00019968025579536372, | |
| "loss": 8.0944, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.007993605115907274, | |
| "grad_norm": 0.9809202551841736, | |
| "learning_rate": 0.00019888089528377297, | |
| "loss": 0.5985, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.011990407673860911, | |
| "grad_norm": 1.3078651428222656, | |
| "learning_rate": 0.00019808153477218226, | |
| "loss": 0.2975, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.01598721023181455, | |
| "grad_norm": 1.6677987575531006, | |
| "learning_rate": 0.00019728217426059154, | |
| "loss": 0.4915, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.019984012789768184, | |
| "grad_norm": 1.3078293800354004, | |
| "learning_rate": 0.00019648281374900082, | |
| "loss": 0.1599, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.023980815347721823, | |
| "grad_norm": 0.7252454161643982, | |
| "learning_rate": 0.00019568345323741008, | |
| "loss": 0.2224, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.027977617905675458, | |
| "grad_norm": 0.7883257865905762, | |
| "learning_rate": 0.00019488409272581936, | |
| "loss": 0.2254, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.0319744204636291, | |
| "grad_norm": 0.2993629276752472, | |
| "learning_rate": 0.00019408473221422861, | |
| "loss": 0.2191, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.03597122302158273, | |
| "grad_norm": 1.1389100551605225, | |
| "learning_rate": 0.0001932853717026379, | |
| "loss": 0.2474, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.03996802557953637, | |
| "grad_norm": 0.1363106220960617, | |
| "learning_rate": 0.00019248601119104715, | |
| "loss": 0.0983, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.04396482813749001, | |
| "grad_norm": 0.2188768833875656, | |
| "learning_rate": 0.00019168665067945644, | |
| "loss": 0.1767, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.047961630695443645, | |
| "grad_norm": 0.5281928777694702, | |
| "learning_rate": 0.00019088729016786572, | |
| "loss": 0.2052, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.05195843325339728, | |
| "grad_norm": 0.716292679309845, | |
| "learning_rate": 0.000190087929656275, | |
| "loss": 0.2613, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.055955235811350916, | |
| "grad_norm": 0.6606540679931641, | |
| "learning_rate": 0.00018928856914468426, | |
| "loss": 0.2156, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.05995203836930456, | |
| "grad_norm": 0.7895898222923279, | |
| "learning_rate": 0.00018848920863309354, | |
| "loss": 0.1875, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.0639488409272582, | |
| "grad_norm": 1.9056642055511475, | |
| "learning_rate": 0.00018768984812150282, | |
| "loss": 0.1542, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.06794564348521183, | |
| "grad_norm": 0.6912965774536133, | |
| "learning_rate": 0.00018689048760991208, | |
| "loss": 0.1603, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.07194244604316546, | |
| "grad_norm": 0.33341923356056213, | |
| "learning_rate": 0.00018609112709832136, | |
| "loss": 0.199, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.0759392486011191, | |
| "grad_norm": 0.30143648386001587, | |
| "learning_rate": 0.00018529176658673062, | |
| "loss": 0.1626, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.07993605115907274, | |
| "grad_norm": 0.5107619166374207, | |
| "learning_rate": 0.0001844924060751399, | |
| "loss": 0.2186, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.08393285371702638, | |
| "grad_norm": 0.6437661647796631, | |
| "learning_rate": 0.00018369304556354915, | |
| "loss": 0.1858, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.08792965627498002, | |
| "grad_norm": 1.3543767929077148, | |
| "learning_rate": 0.00018289368505195846, | |
| "loss": 0.2575, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.09192645883293366, | |
| "grad_norm": 0.10899212956428528, | |
| "learning_rate": 0.00018209432454036772, | |
| "loss": 0.236, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.09592326139088729, | |
| "grad_norm": 0.8948803544044495, | |
| "learning_rate": 0.000181294964028777, | |
| "loss": 0.2171, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.09992006394884093, | |
| "grad_norm": 0.8165796995162964, | |
| "learning_rate": 0.00018049560351718626, | |
| "loss": 0.1654, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.10391686650679456, | |
| "grad_norm": 0.8842654824256897, | |
| "learning_rate": 0.00017969624300559554, | |
| "loss": 0.2257, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.1079136690647482, | |
| "grad_norm": 0.6311301589012146, | |
| "learning_rate": 0.0001788968824940048, | |
| "loss": 0.1598, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.11191047162270183, | |
| "grad_norm": 0.19081318378448486, | |
| "learning_rate": 0.00017809752198241408, | |
| "loss": 0.2578, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.11590727418065548, | |
| "grad_norm": 0.7785463333129883, | |
| "learning_rate": 0.00017729816147082333, | |
| "loss": 0.2017, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.11990407673860912, | |
| "grad_norm": 0.5954427123069763, | |
| "learning_rate": 0.00017649880095923262, | |
| "loss": 0.1798, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.12390087929656275, | |
| "grad_norm": 0.9389680027961731, | |
| "learning_rate": 0.0001756994404476419, | |
| "loss": 0.2168, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.1278976818545164, | |
| "grad_norm": 0.741294264793396, | |
| "learning_rate": 0.00017490007993605118, | |
| "loss": 0.1646, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.13189448441247004, | |
| "grad_norm": 0.6614203453063965, | |
| "learning_rate": 0.00017410071942446044, | |
| "loss": 0.1309, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.13589128697042366, | |
| "grad_norm": 0.31702834367752075, | |
| "learning_rate": 0.00017330135891286972, | |
| "loss": 0.25, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.1398880895283773, | |
| "grad_norm": 0.9767606854438782, | |
| "learning_rate": 0.00017250199840127898, | |
| "loss": 0.223, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.14388489208633093, | |
| "grad_norm": 0.7476068139076233, | |
| "learning_rate": 0.00017170263788968826, | |
| "loss": 0.2379, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.14788169464428458, | |
| "grad_norm": 0.5389412045478821, | |
| "learning_rate": 0.00017090327737809751, | |
| "loss": 0.1281, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.1518784972022382, | |
| "grad_norm": 0.3029794991016388, | |
| "learning_rate": 0.0001701039168665068, | |
| "loss": 0.1842, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.15587529976019185, | |
| "grad_norm": 0.7127607464790344, | |
| "learning_rate": 0.00016930455635491608, | |
| "loss": 0.1654, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.15987210231814547, | |
| "grad_norm": 0.6440016031265259, | |
| "learning_rate": 0.00016850519584332536, | |
| "loss": 0.2095, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.16386890487609912, | |
| "grad_norm": 0.7137667536735535, | |
| "learning_rate": 0.00016770583533173462, | |
| "loss": 0.2133, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.16786570743405277, | |
| "grad_norm": 0.5804216861724854, | |
| "learning_rate": 0.0001669064748201439, | |
| "loss": 0.1748, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.1718625099920064, | |
| "grad_norm": 0.8831233978271484, | |
| "learning_rate": 0.00016610711430855316, | |
| "loss": 0.2371, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.17585931254996004, | |
| "grad_norm": 0.47154247760772705, | |
| "learning_rate": 0.00016530775379696244, | |
| "loss": 0.1595, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.17985611510791366, | |
| "grad_norm": 0.5801379680633545, | |
| "learning_rate": 0.00016450839328537172, | |
| "loss": 0.2059, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.1838529176658673, | |
| "grad_norm": 0.6498193144798279, | |
| "learning_rate": 0.00016370903277378098, | |
| "loss": 0.1884, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.18784972022382093, | |
| "grad_norm": 0.6178190112113953, | |
| "learning_rate": 0.00016290967226219026, | |
| "loss": 0.1875, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.19184652278177458, | |
| "grad_norm": 0.4150198698043823, | |
| "learning_rate": 0.00016211031175059952, | |
| "loss": 0.163, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.19584332533972823, | |
| "grad_norm": 0.6352556943893433, | |
| "learning_rate": 0.00016131095123900883, | |
| "loss": 0.2324, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.19984012789768185, | |
| "grad_norm": 0.6494005918502808, | |
| "learning_rate": 0.00016051159072741808, | |
| "loss": 0.2352, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.2038369304556355, | |
| "grad_norm": 0.41544780135154724, | |
| "learning_rate": 0.00015971223021582736, | |
| "loss": 0.132, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.20783373301358912, | |
| "grad_norm": 0.5191593170166016, | |
| "learning_rate": 0.00015891286970423662, | |
| "loss": 0.1876, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.21183053557154277, | |
| "grad_norm": 0.8121716976165771, | |
| "learning_rate": 0.0001581135091926459, | |
| "loss": 0.0978, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.2158273381294964, | |
| "grad_norm": 0.8463426828384399, | |
| "learning_rate": 0.00015731414868105516, | |
| "loss": 0.1591, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.21982414068745004, | |
| "grad_norm": 0.7256947159767151, | |
| "learning_rate": 0.00015651478816946444, | |
| "loss": 0.2144, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.22382094324540366, | |
| "grad_norm": 0.817537248134613, | |
| "learning_rate": 0.0001557154276578737, | |
| "loss": 0.1262, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.2278177458033573, | |
| "grad_norm": 0.6075506806373596, | |
| "learning_rate": 0.00015491606714628298, | |
| "loss": 0.1707, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.23181454836131096, | |
| "grad_norm": 0.4158308804035187, | |
| "learning_rate": 0.00015411670663469223, | |
| "loss": 0.1992, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.23581135091926458, | |
| "grad_norm": 0.8251807689666748, | |
| "learning_rate": 0.00015331734612310154, | |
| "loss": 0.188, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.23980815347721823, | |
| "grad_norm": 0.3950844705104828, | |
| "learning_rate": 0.0001525179856115108, | |
| "loss": 0.1346, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.24380495603517185, | |
| "grad_norm": 0.7561228275299072, | |
| "learning_rate": 0.00015171862509992008, | |
| "loss": 0.2248, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.2478017585931255, | |
| "grad_norm": 1.5557297468185425, | |
| "learning_rate": 0.00015091926458832934, | |
| "loss": 0.1497, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.2517985611510791, | |
| "grad_norm": 0.5314338207244873, | |
| "learning_rate": 0.00015011990407673862, | |
| "loss": 0.1718, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.2557953637090328, | |
| "grad_norm": 0.622250497341156, | |
| "learning_rate": 0.00014932054356514788, | |
| "loss": 0.1844, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.2597921662669864, | |
| "grad_norm": 0.7836592197418213, | |
| "learning_rate": 0.00014852118305355716, | |
| "loss": 0.1056, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.2637889688249401, | |
| "grad_norm": 0.608813464641571, | |
| "learning_rate": 0.00014772182254196641, | |
| "loss": 0.2293, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.26778577138289367, | |
| "grad_norm": 0.7594510316848755, | |
| "learning_rate": 0.0001469224620303757, | |
| "loss": 0.1408, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.2717825739408473, | |
| "grad_norm": 0.7624852061271667, | |
| "learning_rate": 0.00014612310151878498, | |
| "loss": 0.1451, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.27577937649880097, | |
| "grad_norm": 0.29632413387298584, | |
| "learning_rate": 0.00014532374100719426, | |
| "loss": 0.2376, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.2797761790567546, | |
| "grad_norm": 0.5400770306587219, | |
| "learning_rate": 0.00014452438049560352, | |
| "loss": 0.1359, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.2837729816147082, | |
| "grad_norm": 0.6905292272567749, | |
| "learning_rate": 0.0001437250199840128, | |
| "loss": 0.1188, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.28776978417266186, | |
| "grad_norm": 0.43142080307006836, | |
| "learning_rate": 0.00014292565947242206, | |
| "loss": 0.1628, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.2917665867306155, | |
| "grad_norm": 0.5245963335037231, | |
| "learning_rate": 0.00014212629896083134, | |
| "loss": 0.2371, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.29576338928856916, | |
| "grad_norm": 0.2653980553150177, | |
| "learning_rate": 0.00014132693844924062, | |
| "loss": 0.1396, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.2997601918465228, | |
| "grad_norm": 0.4195268154144287, | |
| "learning_rate": 0.00014052757793764988, | |
| "loss": 0.1438, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.3037569944044764, | |
| "grad_norm": 0.48287633061408997, | |
| "learning_rate": 0.00013972821742605916, | |
| "loss": 0.1826, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.30775379696243005, | |
| "grad_norm": 0.3496572971343994, | |
| "learning_rate": 0.00013892885691446844, | |
| "loss": 0.1042, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.3117505995203837, | |
| "grad_norm": 0.4998539388179779, | |
| "learning_rate": 0.00013812949640287772, | |
| "loss": 0.1642, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.31574740207833735, | |
| "grad_norm": 0.3192286789417267, | |
| "learning_rate": 0.00013733013589128698, | |
| "loss": 0.1511, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.31974420463629094, | |
| "grad_norm": 0.19637304544448853, | |
| "learning_rate": 0.00013653077537969626, | |
| "loss": 0.1756, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.3237410071942446, | |
| "grad_norm": 0.3817007839679718, | |
| "learning_rate": 0.00013573141486810552, | |
| "loss": 0.1729, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.32773780975219824, | |
| "grad_norm": 0.5161091685295105, | |
| "learning_rate": 0.0001349320543565148, | |
| "loss": 0.1724, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.3317346123101519, | |
| "grad_norm": 0.41696417331695557, | |
| "learning_rate": 0.00013413269384492406, | |
| "loss": 0.2164, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.33573141486810554, | |
| "grad_norm": 0.7698529362678528, | |
| "learning_rate": 0.00013333333333333334, | |
| "loss": 0.1703, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.33972821742605913, | |
| "grad_norm": 0.6981766819953918, | |
| "learning_rate": 0.0001325339728217426, | |
| "loss": 0.1563, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.3437250199840128, | |
| "grad_norm": 0.8318856358528137, | |
| "learning_rate": 0.0001317346123101519, | |
| "loss": 0.185, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.34772182254196643, | |
| "grad_norm": 0.7456117272377014, | |
| "learning_rate": 0.00013093525179856116, | |
| "loss": 0.1273, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.3517186250999201, | |
| "grad_norm": 0.32372888922691345, | |
| "learning_rate": 0.00013013589128697044, | |
| "loss": 0.2206, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.35571542765787373, | |
| "grad_norm": 0.2670097351074219, | |
| "learning_rate": 0.0001293365307753797, | |
| "loss": 0.1818, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.3597122302158273, | |
| "grad_norm": 0.38958919048309326, | |
| "learning_rate": 0.00012853717026378898, | |
| "loss": 0.1137, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.36370903277378097, | |
| "grad_norm": 0.47033756971359253, | |
| "learning_rate": 0.00012773780975219824, | |
| "loss": 0.1577, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.3677058353317346, | |
| "grad_norm": 0.9784656167030334, | |
| "learning_rate": 0.00012693844924060752, | |
| "loss": 0.2, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.37170263788968827, | |
| "grad_norm": 0.5877514481544495, | |
| "learning_rate": 0.00012613908872901678, | |
| "loss": 0.1277, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.37569944044764186, | |
| "grad_norm": 0.5953680276870728, | |
| "learning_rate": 0.00012533972821742606, | |
| "loss": 0.1541, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.3796962430055955, | |
| "grad_norm": 1.5499800443649292, | |
| "learning_rate": 0.00012454036770583534, | |
| "loss": 0.2633, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.38369304556354916, | |
| "grad_norm": 0.8279550075531006, | |
| "learning_rate": 0.00012374100719424462, | |
| "loss": 0.2287, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.3876898481215028, | |
| "grad_norm": 0.6293567419052124, | |
| "learning_rate": 0.00012294164668265388, | |
| "loss": 0.1654, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.39168665067945646, | |
| "grad_norm": 0.897535502910614, | |
| "learning_rate": 0.00012214228617106316, | |
| "loss": 0.1163, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.39568345323741005, | |
| "grad_norm": 0.2581837773323059, | |
| "learning_rate": 0.00012134292565947243, | |
| "loss": 0.1061, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.3996802557953637, | |
| "grad_norm": 0.3749452531337738, | |
| "learning_rate": 0.0001205435651478817, | |
| "loss": 0.1198, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.40367705835331735, | |
| "grad_norm": 1.0581694841384888, | |
| "learning_rate": 0.00011974420463629097, | |
| "loss": 0.2201, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.407673860911271, | |
| "grad_norm": 0.5319008827209473, | |
| "learning_rate": 0.00011894484412470024, | |
| "loss": 0.1652, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.4116706634692246, | |
| "grad_norm": 0.5321987867355347, | |
| "learning_rate": 0.00011814548361310951, | |
| "loss": 0.1187, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.41566746602717825, | |
| "grad_norm": 0.764324426651001, | |
| "learning_rate": 0.0001173461231015188, | |
| "loss": 0.24, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.4196642685851319, | |
| "grad_norm": 0.19670064747333527, | |
| "learning_rate": 0.00011654676258992807, | |
| "loss": 0.1465, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.42366107114308554, | |
| "grad_norm": 0.6624041795730591, | |
| "learning_rate": 0.00011574740207833734, | |
| "loss": 0.1659, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.4276578737010392, | |
| "grad_norm": 0.3703235983848572, | |
| "learning_rate": 0.00011494804156674661, | |
| "loss": 0.1845, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.4316546762589928, | |
| "grad_norm": 0.7150025367736816, | |
| "learning_rate": 0.00011414868105515588, | |
| "loss": 0.1728, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.43565147881694644, | |
| "grad_norm": 0.46187496185302734, | |
| "learning_rate": 0.00011334932054356515, | |
| "loss": 0.1614, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.4396482813749001, | |
| "grad_norm": 0.614830493927002, | |
| "learning_rate": 0.00011254996003197442, | |
| "loss": 0.1669, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.44364508393285373, | |
| "grad_norm": 0.5666422247886658, | |
| "learning_rate": 0.00011175059952038369, | |
| "loss": 0.1387, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.44764188649080733, | |
| "grad_norm": 0.38144662976264954, | |
| "learning_rate": 0.00011095123900879296, | |
| "loss": 0.1492, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.451638689048761, | |
| "grad_norm": 0.46162304282188416, | |
| "learning_rate": 0.00011015187849720225, | |
| "loss": 0.1946, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.4556354916067146, | |
| "grad_norm": 0.305127888917923, | |
| "learning_rate": 0.00010935251798561152, | |
| "loss": 0.1498, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.4596322941646683, | |
| "grad_norm": 0.5906249284744263, | |
| "learning_rate": 0.00010855315747402079, | |
| "loss": 0.119, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.4636290967226219, | |
| "grad_norm": 0.10765030235052109, | |
| "learning_rate": 0.00010775379696243006, | |
| "loss": 0.1357, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.4676258992805755, | |
| "grad_norm": 0.6704568862915039, | |
| "learning_rate": 0.00010695443645083933, | |
| "loss": 0.1257, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.47162270183852917, | |
| "grad_norm": 0.5508949756622314, | |
| "learning_rate": 0.0001061550759392486, | |
| "loss": 0.1837, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.4756195043964828, | |
| "grad_norm": 0.5210421085357666, | |
| "learning_rate": 0.00010535571542765788, | |
| "loss": 0.2463, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.47961630695443647, | |
| "grad_norm": 0.8721128702163696, | |
| "learning_rate": 0.00010455635491606715, | |
| "loss": 0.1465, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.48361310951239006, | |
| "grad_norm": 1.4497110843658447, | |
| "learning_rate": 0.00010375699440447642, | |
| "loss": 0.1743, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.4876099120703437, | |
| "grad_norm": 0.46403270959854126, | |
| "learning_rate": 0.00010295763389288569, | |
| "loss": 0.1449, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.49160671462829736, | |
| "grad_norm": 0.6765905618667603, | |
| "learning_rate": 0.00010215827338129497, | |
| "loss": 0.1533, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.495603517186251, | |
| "grad_norm": 0.6220802068710327, | |
| "learning_rate": 0.00010135891286970425, | |
| "loss": 0.2103, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.49960031974420466, | |
| "grad_norm": 0.26665735244750977, | |
| "learning_rate": 0.00010055955235811352, | |
| "loss": 0.1589, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.5035971223021583, | |
| "grad_norm": 0.6205161213874817, | |
| "learning_rate": 9.976019184652279e-05, | |
| "loss": 0.1655, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.5075939248601119, | |
| "grad_norm": 0.6908559799194336, | |
| "learning_rate": 9.896083133493206e-05, | |
| "loss": 0.1659, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.5115907274180655, | |
| "grad_norm": 0.7626561522483826, | |
| "learning_rate": 9.816147082334133e-05, | |
| "loss": 0.1768, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.5155875299760192, | |
| "grad_norm": 0.6223479509353638, | |
| "learning_rate": 9.736211031175061e-05, | |
| "loss": 0.1931, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.5195843325339728, | |
| "grad_norm": 1.0529000759124756, | |
| "learning_rate": 9.656274980015988e-05, | |
| "loss": 0.2085, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.5235811350919265, | |
| "grad_norm": 1.199170708656311, | |
| "learning_rate": 9.576338928856915e-05, | |
| "loss": 0.1647, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.5275779376498801, | |
| "grad_norm": 0.8140775561332703, | |
| "learning_rate": 9.496402877697842e-05, | |
| "loss": 0.1954, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.5315747402078337, | |
| "grad_norm": 0.5082156658172607, | |
| "learning_rate": 9.416466826538769e-05, | |
| "loss": 0.211, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.5355715427657873, | |
| "grad_norm": 0.7366045117378235, | |
| "learning_rate": 9.336530775379697e-05, | |
| "loss": 0.2314, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.539568345323741, | |
| "grad_norm": 0.45685720443725586, | |
| "learning_rate": 9.256594724220624e-05, | |
| "loss": 0.1322, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.5435651478816946, | |
| "grad_norm": 0.9116412401199341, | |
| "learning_rate": 9.176658673061551e-05, | |
| "loss": 0.2288, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.5475619504396483, | |
| "grad_norm": 0.04045610502362251, | |
| "learning_rate": 9.096722621902478e-05, | |
| "loss": 0.1568, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.5515587529976019, | |
| "grad_norm": 0.5173507928848267, | |
| "learning_rate": 9.016786570743405e-05, | |
| "loss": 0.1515, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.5555555555555556, | |
| "grad_norm": 0.9522683620452881, | |
| "learning_rate": 8.936850519584333e-05, | |
| "loss": 0.2538, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.5595523581135092, | |
| "grad_norm": 0.5445271134376526, | |
| "learning_rate": 8.85691446842526e-05, | |
| "loss": 0.1693, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.5635491606714629, | |
| "grad_norm": 0.4531286954879761, | |
| "learning_rate": 8.776978417266187e-05, | |
| "loss": 0.1973, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.5675459632294164, | |
| "grad_norm": 0.7141970992088318, | |
| "learning_rate": 8.697042366107114e-05, | |
| "loss": 0.1563, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.5715427657873701, | |
| "grad_norm": 0.3411566913127899, | |
| "learning_rate": 8.617106314948042e-05, | |
| "loss": 0.1146, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.5755395683453237, | |
| "grad_norm": 0.33047667145729065, | |
| "learning_rate": 8.537170263788969e-05, | |
| "loss": 0.1565, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.5795363709032774, | |
| "grad_norm": 0.9432806372642517, | |
| "learning_rate": 8.457234212629896e-05, | |
| "loss": 0.1469, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.583533173461231, | |
| "grad_norm": 0.379395455121994, | |
| "learning_rate": 8.377298161470823e-05, | |
| "loss": 0.1181, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.5875299760191847, | |
| "grad_norm": 1.003814697265625, | |
| "learning_rate": 8.29736211031175e-05, | |
| "loss": 0.1335, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.5915267785771383, | |
| "grad_norm": 0.6143353581428528, | |
| "learning_rate": 8.217426059152678e-05, | |
| "loss": 0.1922, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.595523581135092, | |
| "grad_norm": 1.0933538675308228, | |
| "learning_rate": 8.137490007993605e-05, | |
| "loss": 0.1876, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.5995203836930456, | |
| "grad_norm": 0.7725152373313904, | |
| "learning_rate": 8.057553956834533e-05, | |
| "loss": 0.1944, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.6035171862509991, | |
| "grad_norm": 0.7263298034667969, | |
| "learning_rate": 7.97761790567546e-05, | |
| "loss": 0.2342, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.6075139888089528, | |
| "grad_norm": 0.9801928400993347, | |
| "learning_rate": 7.897681854516387e-05, | |
| "loss": 0.1932, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.6115107913669064, | |
| "grad_norm": 0.8123881816864014, | |
| "learning_rate": 7.817745803357315e-05, | |
| "loss": 0.1853, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.6155075939248601, | |
| "grad_norm": 0.19386546313762665, | |
| "learning_rate": 7.737809752198242e-05, | |
| "loss": 0.1773, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.6195043964828137, | |
| "grad_norm": 0.6336228251457214, | |
| "learning_rate": 7.657873701039169e-05, | |
| "loss": 0.1537, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.6235011990407674, | |
| "grad_norm": 0.5702576041221619, | |
| "learning_rate": 7.577937649880096e-05, | |
| "loss": 0.1771, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.627498001598721, | |
| "grad_norm": 0.35963866114616394, | |
| "learning_rate": 7.498001598721024e-05, | |
| "loss": 0.1688, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.6314948041566747, | |
| "grad_norm": 0.5332326889038086, | |
| "learning_rate": 7.418065547561951e-05, | |
| "loss": 0.169, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.6354916067146283, | |
| "grad_norm": 0.6524598598480225, | |
| "learning_rate": 7.338129496402878e-05, | |
| "loss": 0.1548, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.6394884092725819, | |
| "grad_norm": 0.5481794476509094, | |
| "learning_rate": 7.258193445243805e-05, | |
| "loss": 0.1428, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.6434852118305355, | |
| "grad_norm": 0.3422311842441559, | |
| "learning_rate": 7.178257394084733e-05, | |
| "loss": 0.1782, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.6474820143884892, | |
| "grad_norm": 0.3071247935295105, | |
| "learning_rate": 7.09832134292566e-05, | |
| "loss": 0.1486, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.6514788169464428, | |
| "grad_norm": 0.5833027958869934, | |
| "learning_rate": 7.018385291766587e-05, | |
| "loss": 0.239, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.6554756195043965, | |
| "grad_norm": 0.6470035910606384, | |
| "learning_rate": 6.938449240607514e-05, | |
| "loss": 0.1646, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.6594724220623501, | |
| "grad_norm": 0.8131123185157776, | |
| "learning_rate": 6.858513189448441e-05, | |
| "loss": 0.1891, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.6634692246203038, | |
| "grad_norm": 0.281305193901062, | |
| "learning_rate": 6.778577138289369e-05, | |
| "loss": 0.1694, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.6674660271782574, | |
| "grad_norm": 0.4203711748123169, | |
| "learning_rate": 6.698641087130296e-05, | |
| "loss": 0.1765, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.6714628297362111, | |
| "grad_norm": 0.3434304893016815, | |
| "learning_rate": 6.618705035971223e-05, | |
| "loss": 0.2065, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.6754596322941646, | |
| "grad_norm": 1.1291146278381348, | |
| "learning_rate": 6.53876898481215e-05, | |
| "loss": 0.2437, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.6794564348521183, | |
| "grad_norm": 0.5707736015319824, | |
| "learning_rate": 6.458832933653078e-05, | |
| "loss": 0.1552, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.6834532374100719, | |
| "grad_norm": 0.15203942358493805, | |
| "learning_rate": 6.378896882494005e-05, | |
| "loss": 0.1222, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.6874500399680256, | |
| "grad_norm": 1.4667842388153076, | |
| "learning_rate": 6.298960831334932e-05, | |
| "loss": 0.2537, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.6914468425259792, | |
| "grad_norm": 0.5294522643089294, | |
| "learning_rate": 6.219024780175859e-05, | |
| "loss": 0.1668, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.6954436450839329, | |
| "grad_norm": 0.7321768999099731, | |
| "learning_rate": 6.139088729016786e-05, | |
| "loss": 0.2347, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.6994404476418865, | |
| "grad_norm": 0.6722360849380493, | |
| "learning_rate": 6.059152677857714e-05, | |
| "loss": 0.1461, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.7034372501998402, | |
| "grad_norm": 0.6050254702568054, | |
| "learning_rate": 5.979216626698642e-05, | |
| "loss": 0.1743, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.7074340527577938, | |
| "grad_norm": 0.6438426971435547, | |
| "learning_rate": 5.899280575539569e-05, | |
| "loss": 0.134, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.7114308553157475, | |
| "grad_norm": 0.3928489685058594, | |
| "learning_rate": 5.8193445243804957e-05, | |
| "loss": 0.1741, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.715427657873701, | |
| "grad_norm": 0.2181692123413086, | |
| "learning_rate": 5.7394084732214226e-05, | |
| "loss": 0.1901, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.7194244604316546, | |
| "grad_norm": 0.17083604633808136, | |
| "learning_rate": 5.659472422062351e-05, | |
| "loss": 0.1611, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.7234212629896083, | |
| "grad_norm": 0.8438991904258728, | |
| "learning_rate": 5.579536370903278e-05, | |
| "loss": 0.1516, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.7274180655475619, | |
| "grad_norm": 0.5326571464538574, | |
| "learning_rate": 5.4996003197442047e-05, | |
| "loss": 0.1638, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.7314148681055156, | |
| "grad_norm": 0.17157304286956787, | |
| "learning_rate": 5.4196642685851316e-05, | |
| "loss": 0.1372, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.7354116706634692, | |
| "grad_norm": 0.5487836003303528, | |
| "learning_rate": 5.33972821742606e-05, | |
| "loss": 0.1297, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.7394084732214229, | |
| "grad_norm": 0.5392123460769653, | |
| "learning_rate": 5.259792166266987e-05, | |
| "loss": 0.1717, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.7434052757793765, | |
| "grad_norm": 0.9712696671485901, | |
| "learning_rate": 5.179856115107914e-05, | |
| "loss": 0.1538, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.7474020783373302, | |
| "grad_norm": 0.0985872820019722, | |
| "learning_rate": 5.0999200639488406e-05, | |
| "loss": 0.1218, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.7513988808952837, | |
| "grad_norm": 0.5596745014190674, | |
| "learning_rate": 5.019984012789768e-05, | |
| "loss": 0.1305, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.7553956834532374, | |
| "grad_norm": 0.6789805889129639, | |
| "learning_rate": 4.940047961630696e-05, | |
| "loss": 0.1586, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.759392486011191, | |
| "grad_norm": 0.6722508668899536, | |
| "learning_rate": 4.8601119104716234e-05, | |
| "loss": 0.1545, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.7633892885691447, | |
| "grad_norm": 0.9916704893112183, | |
| "learning_rate": 4.78017585931255e-05, | |
| "loss": 0.1132, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.7673860911270983, | |
| "grad_norm": 0.558546781539917, | |
| "learning_rate": 4.700239808153478e-05, | |
| "loss": 0.1211, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.771382893685052, | |
| "grad_norm": 0.5568097233772278, | |
| "learning_rate": 4.620303756994405e-05, | |
| "loss": 0.1415, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.7753796962430056, | |
| "grad_norm": 1.2026703357696533, | |
| "learning_rate": 4.5403677058353324e-05, | |
| "loss": 0.2147, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.7793764988009593, | |
| "grad_norm": 0.3532833456993103, | |
| "learning_rate": 4.460431654676259e-05, | |
| "loss": 0.1856, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.7833733013589129, | |
| "grad_norm": 0.7475394010543823, | |
| "learning_rate": 4.380495603517186e-05, | |
| "loss": 0.1259, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.7873701039168665, | |
| "grad_norm": 0.584240734577179, | |
| "learning_rate": 4.300559552358114e-05, | |
| "loss": 0.1643, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.7913669064748201, | |
| "grad_norm": 0.9373254179954529, | |
| "learning_rate": 4.220623501199041e-05, | |
| "loss": 0.1365, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.7953637090327738, | |
| "grad_norm": 0.09720321744680405, | |
| "learning_rate": 4.140687450039968e-05, | |
| "loss": 0.1569, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.7993605115907274, | |
| "grad_norm": 0.3333890438079834, | |
| "learning_rate": 4.060751398880895e-05, | |
| "loss": 0.1588, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.8033573141486811, | |
| "grad_norm": 0.19799380004405975, | |
| "learning_rate": 3.980815347721823e-05, | |
| "loss": 0.1463, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 0.8073541167066347, | |
| "grad_norm": 0.37868112325668335, | |
| "learning_rate": 3.90087929656275e-05, | |
| "loss": 0.1312, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.8113509192645884, | |
| "grad_norm": 0.48458245396614075, | |
| "learning_rate": 3.820943245403677e-05, | |
| "loss": 0.1252, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 0.815347721822542, | |
| "grad_norm": 0.14216120541095734, | |
| "learning_rate": 3.741007194244605e-05, | |
| "loss": 0.1453, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.8193445243804957, | |
| "grad_norm": 0.7967089414596558, | |
| "learning_rate": 3.661071143085532e-05, | |
| "loss": 0.139, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.8233413269384492, | |
| "grad_norm": 1.3931658267974854, | |
| "learning_rate": 3.5811350919264594e-05, | |
| "loss": 0.202, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.8273381294964028, | |
| "grad_norm": 0.6114959120750427, | |
| "learning_rate": 3.501199040767386e-05, | |
| "loss": 0.1687, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 0.8313349320543565, | |
| "grad_norm": 0.8499948382377625, | |
| "learning_rate": 3.421262989608314e-05, | |
| "loss": 0.1564, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.8353317346123101, | |
| "grad_norm": 1.8267698287963867, | |
| "learning_rate": 3.341326938449241e-05, | |
| "loss": 0.2304, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 0.8393285371702638, | |
| "grad_norm": 0.572331428527832, | |
| "learning_rate": 3.2613908872901684e-05, | |
| "loss": 0.0897, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.8433253397282174, | |
| "grad_norm": 0.6183590888977051, | |
| "learning_rate": 3.181454836131095e-05, | |
| "loss": 0.127, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 0.8473221422861711, | |
| "grad_norm": 0.44028234481811523, | |
| "learning_rate": 3.101518784972022e-05, | |
| "loss": 0.2057, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.8513189448441247, | |
| "grad_norm": 0.6345455050468445, | |
| "learning_rate": 3.0215827338129498e-05, | |
| "loss": 0.1591, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 0.8553157474020784, | |
| "grad_norm": 1.1488350629806519, | |
| "learning_rate": 2.9416466826538767e-05, | |
| "loss": 0.2077, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.8593125499600319, | |
| "grad_norm": 0.7199774384498596, | |
| "learning_rate": 2.8617106314948043e-05, | |
| "loss": 0.1261, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.8633093525179856, | |
| "grad_norm": 0.8149317502975464, | |
| "learning_rate": 2.7817745803357316e-05, | |
| "loss": 0.1791, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.8673061550759392, | |
| "grad_norm": 0.33090534806251526, | |
| "learning_rate": 2.701838529176659e-05, | |
| "loss": 0.2044, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 0.8713029576338929, | |
| "grad_norm": 1.0773526430130005, | |
| "learning_rate": 2.621902478017586e-05, | |
| "loss": 0.1776, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.8752997601918465, | |
| "grad_norm": 0.5906376838684082, | |
| "learning_rate": 2.541966426858513e-05, | |
| "loss": 0.1361, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 0.8792965627498002, | |
| "grad_norm": 0.6298534870147705, | |
| "learning_rate": 2.4620303756994406e-05, | |
| "loss": 0.178, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.8832933653077538, | |
| "grad_norm": 1.040358066558838, | |
| "learning_rate": 2.3820943245403678e-05, | |
| "loss": 0.2245, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 0.8872901678657075, | |
| "grad_norm": 0.6028202176094055, | |
| "learning_rate": 2.302158273381295e-05, | |
| "loss": 0.0818, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.8912869704236611, | |
| "grad_norm": 0.43753504753112793, | |
| "learning_rate": 2.2222222222222223e-05, | |
| "loss": 0.1271, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 0.8952837729816147, | |
| "grad_norm": 0.34733837842941284, | |
| "learning_rate": 2.1422861710631496e-05, | |
| "loss": 0.1148, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.8992805755395683, | |
| "grad_norm": 0.33790159225463867, | |
| "learning_rate": 2.062350119904077e-05, | |
| "loss": 0.2148, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.903277378097522, | |
| "grad_norm": 1.4219551086425781, | |
| "learning_rate": 1.982414068745004e-05, | |
| "loss": 0.181, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.9072741806554756, | |
| "grad_norm": 1.1721121072769165, | |
| "learning_rate": 1.9024780175859313e-05, | |
| "loss": 0.2161, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 0.9112709832134293, | |
| "grad_norm": 1.0592964887619019, | |
| "learning_rate": 1.8225419664268586e-05, | |
| "loss": 0.1566, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.9152677857713829, | |
| "grad_norm": 0.2788325250148773, | |
| "learning_rate": 1.742605915267786e-05, | |
| "loss": 0.1555, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 0.9192645883293366, | |
| "grad_norm": 0.7817983031272888, | |
| "learning_rate": 1.662669864108713e-05, | |
| "loss": 0.1883, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.9232613908872902, | |
| "grad_norm": 0.26448529958724976, | |
| "learning_rate": 1.5827338129496403e-05, | |
| "loss": 0.2078, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 0.9272581934452439, | |
| "grad_norm": 0.92426997423172, | |
| "learning_rate": 1.5027977617905676e-05, | |
| "loss": 0.1933, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.9312549960031974, | |
| "grad_norm": 0.7494958639144897, | |
| "learning_rate": 1.4228617106314948e-05, | |
| "loss": 0.1466, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 0.935251798561151, | |
| "grad_norm": 0.3624798357486725, | |
| "learning_rate": 1.3429256594724221e-05, | |
| "loss": 0.1308, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.9392486011191047, | |
| "grad_norm": 0.6994137763977051, | |
| "learning_rate": 1.2629896083133494e-05, | |
| "loss": 0.1665, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.9432454036770583, | |
| "grad_norm": 0.9269047975540161, | |
| "learning_rate": 1.1830535571542766e-05, | |
| "loss": 0.193, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.947242206235012, | |
| "grad_norm": 0.5760440230369568, | |
| "learning_rate": 1.1031175059952039e-05, | |
| "loss": 0.109, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 0.9512390087929656, | |
| "grad_norm": 0.9416897296905518, | |
| "learning_rate": 1.0231814548361311e-05, | |
| "loss": 0.1273, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.9552358113509193, | |
| "grad_norm": 0.7053970098495483, | |
| "learning_rate": 9.432454036770584e-06, | |
| "loss": 0.1861, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 0.9592326139088729, | |
| "grad_norm": 1.9960613250732422, | |
| "learning_rate": 8.633093525179858e-06, | |
| "loss": 0.1811, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.9632294164668266, | |
| "grad_norm": 1.1505160331726074, | |
| "learning_rate": 7.833733013589129e-06, | |
| "loss": 0.2093, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 0.9672262190247801, | |
| "grad_norm": 0.7323939204216003, | |
| "learning_rate": 7.034372501998401e-06, | |
| "loss": 0.1221, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.9712230215827338, | |
| "grad_norm": 0.810534656047821, | |
| "learning_rate": 6.2350119904076745e-06, | |
| "loss": 0.1261, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 0.9752198241406874, | |
| "grad_norm": 0.25207674503326416, | |
| "learning_rate": 5.435651478816946e-06, | |
| "loss": 0.1268, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.9792166266986411, | |
| "grad_norm": 0.6386179327964783, | |
| "learning_rate": 4.6362909672262196e-06, | |
| "loss": 0.1437, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.9832134292565947, | |
| "grad_norm": 0.9403632879257202, | |
| "learning_rate": 3.836930455635491e-06, | |
| "loss": 0.2184, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.9872102318145484, | |
| "grad_norm": 0.7406390905380249, | |
| "learning_rate": 3.0375699440447646e-06, | |
| "loss": 0.1826, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 0.991207034372502, | |
| "grad_norm": 0.5971857309341431, | |
| "learning_rate": 2.238209432454037e-06, | |
| "loss": 0.1409, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.9952038369304557, | |
| "grad_norm": 0.8005123734474182, | |
| "learning_rate": 1.4388489208633094e-06, | |
| "loss": 0.18, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 0.9992006394884093, | |
| "grad_norm": 0.33795028924942017, | |
| "learning_rate": 6.394884092725819e-07, | |
| "loss": 0.1293, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 0.16163970530033112, | |
| "eval_runtime": 143.7398, | |
| "eval_samples_per_second": 1.934, | |
| "eval_steps_per_second": 1.934, | |
| "step": 2502 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 2502, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.467975082783539e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |