| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.5514959327174962, | |
| "eval_steps": 500, | |
| "global_step": 4000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0013787398317937406, | |
| "grad_norm": 1.0734721422195435, | |
| "learning_rate": 0.0002, | |
| "loss": 2.8003, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.002757479663587481, | |
| "grad_norm": 0.19643057882785797, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3727, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.004136219495381221, | |
| "grad_norm": 0.8137874007225037, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3519, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.005514959327174962, | |
| "grad_norm": 0.4064357578754425, | |
| "learning_rate": 0.0002, | |
| "loss": 0.357, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0068936991589687024, | |
| "grad_norm": 1.0673978328704834, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3515, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.008272438990762443, | |
| "grad_norm": 2.2639453411102295, | |
| "learning_rate": 0.0002, | |
| "loss": 0.4068, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.009651178822556184, | |
| "grad_norm": 1.2247616052627563, | |
| "learning_rate": 0.0002, | |
| "loss": 0.5108, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.011029918654349925, | |
| "grad_norm": 0.07231716066598892, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3644, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.012408658486143665, | |
| "grad_norm": 1.4329577684402466, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3571, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.013787398317937405, | |
| "grad_norm": 0.8798255920410156, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3521, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.015166138149731145, | |
| "grad_norm": 0.06499180942773819, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3597, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.016544877981524885, | |
| "grad_norm": 0.284839928150177, | |
| "learning_rate": 0.0002, | |
| "loss": 0.4099, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.017923617813318627, | |
| "grad_norm": 0.12186373770236969, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3572, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.01930235764511237, | |
| "grad_norm": 0.21747085452079773, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3497, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.020681097476906107, | |
| "grad_norm": 0.22812429070472717, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3778, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.02205983730869985, | |
| "grad_norm": 1.1110987663269043, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3607, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.023438577140493588, | |
| "grad_norm": 0.8457236886024475, | |
| "learning_rate": 0.0002, | |
| "loss": 0.9183, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.02481731697228733, | |
| "grad_norm": 0.46427470445632935, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3503, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.02619605680408107, | |
| "grad_norm": 2.4730632305145264, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3824, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.02757479663587481, | |
| "grad_norm": 0.3589227497577667, | |
| "learning_rate": 0.0002, | |
| "loss": 1.0105, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.02895353646766855, | |
| "grad_norm": 3.5308260917663574, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3605, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.03033227629946229, | |
| "grad_norm": 0.5237946510314941, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3983, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.031711016131256035, | |
| "grad_norm": 0.5702632665634155, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3521, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.03308975596304977, | |
| "grad_norm": 1.1318608522415161, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3617, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.03446849579484351, | |
| "grad_norm": 0.536571741104126, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3555, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.035847235626637254, | |
| "grad_norm": 0.12125778943300247, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3733, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.037225975458430996, | |
| "grad_norm": 0.045536063611507416, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3514, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.03860471529022474, | |
| "grad_norm": 0.31765612959861755, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3536, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.03998345512201847, | |
| "grad_norm": 0.27900660037994385, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3495, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.041362194953812215, | |
| "grad_norm": 0.15112566947937012, | |
| "learning_rate": 0.0002, | |
| "loss": 0.355, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.04274093478560596, | |
| "grad_norm": 0.2682303786277771, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3609, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.0441196746173997, | |
| "grad_norm": 1.0106860399246216, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3697, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.04549841444919344, | |
| "grad_norm": 1.0782426595687866, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3594, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.046877154280987175, | |
| "grad_norm": 2.294581651687622, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3676, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.04825589411278092, | |
| "grad_norm": 0.6223801970481873, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3741, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.04963463394457466, | |
| "grad_norm": 0.2735952138900757, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3628, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.0510133737763684, | |
| "grad_norm": 0.7569056153297424, | |
| "learning_rate": 0.0002, | |
| "loss": 1.1827, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.05239211360816214, | |
| "grad_norm": 0.6536706686019897, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3543, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.05377085343995588, | |
| "grad_norm": 0.3573110103607178, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3529, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.05514959327174962, | |
| "grad_norm": 0.8121228218078613, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3566, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.05652833310354336, | |
| "grad_norm": 0.7444269061088562, | |
| "learning_rate": 0.0002, | |
| "loss": 0.7803, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.0579070729353371, | |
| "grad_norm": 0.5015038847923279, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6134, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.059285812767130845, | |
| "grad_norm": 0.08748292177915573, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3501, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.06066455259892458, | |
| "grad_norm": 0.3987080156803131, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3484, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.06204329243071832, | |
| "grad_norm": 347.7005920410156, | |
| "learning_rate": 0.0002, | |
| "loss": 0.8413, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.06342203226251207, | |
| "grad_norm": 88.2750473022461, | |
| "learning_rate": 0.0002, | |
| "loss": 2.8013, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.0648007720943058, | |
| "grad_norm": 0.8716701865196228, | |
| "learning_rate": 0.0002, | |
| "loss": 0.8356, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.06617951192609954, | |
| "grad_norm": 0.8243119120597839, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3616, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.06755825175789329, | |
| "grad_norm": 1.1744294166564941, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3998, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.06893699158968702, | |
| "grad_norm": 0.03163053095340729, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3549, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.07031573142148077, | |
| "grad_norm": 3.4403915405273438, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3722, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.07169447125327451, | |
| "grad_norm": 1.0608879327774048, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3701, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.07307321108506824, | |
| "grad_norm": 1.2809940576553345, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3602, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.07445195091686199, | |
| "grad_norm": 0.40460118651390076, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3532, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.07583069074865573, | |
| "grad_norm": 0.6290703415870667, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.07720943058044948, | |
| "grad_norm": 0.2159261256456375, | |
| "learning_rate": 0.0002, | |
| "loss": 0.405, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.07858817041224321, | |
| "grad_norm": 0.37101301550865173, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3752, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.07996691024403695, | |
| "grad_norm": 1.3007190227508545, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3503, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.0813456500758307, | |
| "grad_norm": 0.4508918225765228, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3531, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.08272438990762443, | |
| "grad_norm": 0.46898791193962097, | |
| "learning_rate": 0.0002, | |
| "loss": 0.347, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.08410312973941818, | |
| "grad_norm": 0.8449831604957581, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3546, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.08548186957121191, | |
| "grad_norm": 0.7988163232803345, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3505, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.08686060940300565, | |
| "grad_norm": 0.4426226317882538, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3649, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.0882393492347994, | |
| "grad_norm": 0.2260913848876953, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3499, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.08961808906659313, | |
| "grad_norm": 1.476747751235962, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3546, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.09099682889838688, | |
| "grad_norm": 0.7640777230262756, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3568, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.09237556873018062, | |
| "grad_norm": 0.8559088706970215, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3507, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.09375430856197435, | |
| "grad_norm": 0.20833595097064972, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3556, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.0951330483937681, | |
| "grad_norm": 1.1485021114349365, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3516, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.09651178822556183, | |
| "grad_norm": 1.0206815004348755, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3591, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.09789052805735558, | |
| "grad_norm": 0.9966775178909302, | |
| "learning_rate": 0.0002, | |
| "loss": 0.359, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.09926926788914932, | |
| "grad_norm": 0.8833585977554321, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3546, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.10064800772094305, | |
| "grad_norm": 1.0842584371566772, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3556, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.1020267475527368, | |
| "grad_norm": 0.3791511058807373, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3571, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.10340548738453054, | |
| "grad_norm": 0.24666732549667358, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3502, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.10478422721632429, | |
| "grad_norm": 0.21794968843460083, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3483, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.10616296704811802, | |
| "grad_norm": 0.47017499804496765, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3499, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.10754170687991176, | |
| "grad_norm": 0.2813131809234619, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3482, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.1089204467117055, | |
| "grad_norm": 1.2175363302230835, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3524, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.11029918654349924, | |
| "grad_norm": 0.2712210416793823, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3526, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.11167792637529299, | |
| "grad_norm": 0.1428445726633072, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3518, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.11305666620708672, | |
| "grad_norm": 0.23716595768928528, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3501, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.11443540603888046, | |
| "grad_norm": 0.07993923872709274, | |
| "learning_rate": 0.0002, | |
| "loss": 0.349, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.1158141458706742, | |
| "grad_norm": 1.2958595752716064, | |
| "learning_rate": 0.0002, | |
| "loss": 0.352, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.11719288570246794, | |
| "grad_norm": 1.6257132291793823, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3589, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.11857162553426169, | |
| "grad_norm": 0.20367591083049774, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3602, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.11995036536605543, | |
| "grad_norm": 1.147210955619812, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3619, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.12132910519784916, | |
| "grad_norm": 0.19706425070762634, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3536, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.12270784502964291, | |
| "grad_norm": 0.17990930378437042, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3501, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.12408658486143664, | |
| "grad_norm": 0.5770463943481445, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3509, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.12546532469323038, | |
| "grad_norm": 0.24645955860614777, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3526, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 0.12684406452502414, | |
| "grad_norm": 0.15745119750499725, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3503, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 0.12822280435681788, | |
| "grad_norm": 0.054484590888023376, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3508, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 0.1296015441886116, | |
| "grad_norm": 0.30564025044441223, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3489, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 0.13098028402040535, | |
| "grad_norm": 0.3614678382873535, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.13235902385219908, | |
| "grad_norm": 0.703029990196228, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3552, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 0.13373776368399284, | |
| "grad_norm": 1.1954560279846191, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3528, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 0.13511650351578658, | |
| "grad_norm": 0.8106504678726196, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3586, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 0.13649524334758031, | |
| "grad_norm": 0.40758854150772095, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3908, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 0.13787398317937405, | |
| "grad_norm": 0.613096296787262, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3519, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.13925272301116778, | |
| "grad_norm": 0.38185614347457886, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3506, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 0.14063146284296155, | |
| "grad_norm": 0.07220327854156494, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3472, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 0.14201020267475528, | |
| "grad_norm": 0.1451689898967743, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3534, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 0.14338894250654902, | |
| "grad_norm": 0.08052591234445572, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3476, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 0.14476768233834275, | |
| "grad_norm": 1.0108163356781006, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3508, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.1461464221701365, | |
| "grad_norm": 0.5895722508430481, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3541, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 0.14752516200193025, | |
| "grad_norm": 0.6988415718078613, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3512, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 0.14890390183372398, | |
| "grad_norm": 0.54078608751297, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3479, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 0.15028264166551772, | |
| "grad_norm": 0.19162333011627197, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3518, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 0.15166138149731145, | |
| "grad_norm": 0.36928215622901917, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3505, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.1530401213291052, | |
| "grad_norm": 0.572607696056366, | |
| "learning_rate": 0.0002, | |
| "loss": 0.355, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 0.15441886116089895, | |
| "grad_norm": 0.20841191709041595, | |
| "learning_rate": 0.0002, | |
| "loss": 0.348, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 0.1557976009926927, | |
| "grad_norm": 0.04682110995054245, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3502, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 0.15717634082448642, | |
| "grad_norm": 0.867899477481842, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3476, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 0.15855508065628016, | |
| "grad_norm": 0.2828502655029297, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3525, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.1599338204880739, | |
| "grad_norm": 0.44510889053344727, | |
| "learning_rate": 0.0002, | |
| "loss": 0.35, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 0.16131256031986765, | |
| "grad_norm": 0.1896822154521942, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3493, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 0.1626913001516614, | |
| "grad_norm": 0.15781590342521667, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 0.16407003998345512, | |
| "grad_norm": 0.2315225899219513, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3498, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 0.16544877981524886, | |
| "grad_norm": 0.2198018729686737, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3484, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.1668275196470426, | |
| "grad_norm": 0.2039571851491928, | |
| "learning_rate": 0.0002, | |
| "loss": 0.348, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 0.16820625947883636, | |
| "grad_norm": 0.009352603927254677, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3481, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 0.1695849993106301, | |
| "grad_norm": 0.2558707892894745, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3475, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 0.17096373914242383, | |
| "grad_norm": 0.07278712838888168, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3471, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 0.17234247897421756, | |
| "grad_norm": 0.4133436381816864, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3453, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.1737212188060113, | |
| "grad_norm": 0.16729828715324402, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3527, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 0.17509995863780506, | |
| "grad_norm": 0.33326980471611023, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3463, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 0.1764786984695988, | |
| "grad_norm": 0.7140666246414185, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3627, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.17785743830139253, | |
| "grad_norm": 0.17751634120941162, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3506, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 0.17923617813318626, | |
| "grad_norm": 0.44009125232696533, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3516, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.18061491796498, | |
| "grad_norm": 0.07371579110622406, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3479, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 0.18199365779677376, | |
| "grad_norm": 0.6804266571998596, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3476, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 0.1833723976285675, | |
| "grad_norm": 0.19634029269218445, | |
| "learning_rate": 0.0002, | |
| "loss": 0.354, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 0.18475113746036123, | |
| "grad_norm": 0.34020882844924927, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3481, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 0.18612987729215497, | |
| "grad_norm": 0.38502731919288635, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3501, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.1875086171239487, | |
| "grad_norm": 0.0810522586107254, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3473, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 0.18888735695574246, | |
| "grad_norm": 0.4057389497756958, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3489, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 0.1902660967875362, | |
| "grad_norm": 0.17514599859714508, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3472, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 0.19164483661932993, | |
| "grad_norm": 0.10964088141918182, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3479, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 0.19302357645112367, | |
| "grad_norm": 0.20920871198177338, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3488, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.1944023162829174, | |
| "grad_norm": 1.149121880531311, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3548, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 0.19578105611471117, | |
| "grad_norm": 1.3394649028778076, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3495, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 0.1971597959465049, | |
| "grad_norm": 1.2763960361480713, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3679, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 0.19853853577829864, | |
| "grad_norm": 0.5421571731567383, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3538, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 0.19991727561009237, | |
| "grad_norm": 0.22273503243923187, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3518, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.2012960154418861, | |
| "grad_norm": 0.6335702538490295, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3481, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 0.20267475527367987, | |
| "grad_norm": 0.7090324759483337, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3486, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 0.2040534951054736, | |
| "grad_norm": 0.011333847418427467, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3476, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 0.20543223493726734, | |
| "grad_norm": 0.24088676273822784, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3569, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 0.20681097476906107, | |
| "grad_norm": 0.8654371500015259, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3528, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.2081897146008548, | |
| "grad_norm": 0.06135034188628197, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3509, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 0.20956845443264857, | |
| "grad_norm": 0.38141730427742004, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3474, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 0.2109471942644423, | |
| "grad_norm": 1.1622456312179565, | |
| "learning_rate": 0.0002, | |
| "loss": 0.353, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 0.21232593409623604, | |
| "grad_norm": 0.5747712254524231, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3513, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 0.21370467392802978, | |
| "grad_norm": 0.09723293781280518, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3482, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.2150834137598235, | |
| "grad_norm": 0.18574804067611694, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3512, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 0.21646215359161727, | |
| "grad_norm": 0.33651217818260193, | |
| "learning_rate": 0.0002, | |
| "loss": 0.349, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 0.217840893423411, | |
| "grad_norm": 0.07309216260910034, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3493, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 0.21921963325520474, | |
| "grad_norm": 0.19346486032009125, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3478, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 0.22059837308699848, | |
| "grad_norm": 0.3398933708667755, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3496, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.2219771129187922, | |
| "grad_norm": 0.34032130241394043, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3488, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 0.22335585275058598, | |
| "grad_norm": 0.901030421257019, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3531, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 0.2247345925823797, | |
| "grad_norm": 0.500088632106781, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3516, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 0.22611333241417345, | |
| "grad_norm": 0.3230324387550354, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3546, | |
| "step": 1640 | |
| }, | |
| { | |
| "epoch": 0.22749207224596718, | |
| "grad_norm": 1.2476601600646973, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3512, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.22887081207776092, | |
| "grad_norm": 0.23318485915660858, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3456, | |
| "step": 1660 | |
| }, | |
| { | |
| "epoch": 0.23024955190955468, | |
| "grad_norm": 0.472400963306427, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3551, | |
| "step": 1670 | |
| }, | |
| { | |
| "epoch": 0.2316282917413484, | |
| "grad_norm": 0.04836912825703621, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3505, | |
| "step": 1680 | |
| }, | |
| { | |
| "epoch": 0.23300703157314215, | |
| "grad_norm": 0.34590113162994385, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3507, | |
| "step": 1690 | |
| }, | |
| { | |
| "epoch": 0.23438577140493588, | |
| "grad_norm": 0.23341989517211914, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3478, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.23576451123672962, | |
| "grad_norm": 0.001271920627914369, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3562, | |
| "step": 1710 | |
| }, | |
| { | |
| "epoch": 0.23714325106852338, | |
| "grad_norm": 0.20549911260604858, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3479, | |
| "step": 1720 | |
| }, | |
| { | |
| "epoch": 0.23852199090031712, | |
| "grad_norm": 0.3825775384902954, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3482, | |
| "step": 1730 | |
| }, | |
| { | |
| "epoch": 0.23990073073211085, | |
| "grad_norm": 0.028804048895835876, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3481, | |
| "step": 1740 | |
| }, | |
| { | |
| "epoch": 0.24127947056390459, | |
| "grad_norm": 0.04462611302733421, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.24265821039569832, | |
| "grad_norm": 0.6634818315505981, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3501, | |
| "step": 1760 | |
| }, | |
| { | |
| "epoch": 0.24403695022749208, | |
| "grad_norm": 1.3807406425476074, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3547, | |
| "step": 1770 | |
| }, | |
| { | |
| "epoch": 0.24541569005928582, | |
| "grad_norm": 0.24347831308841705, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3473, | |
| "step": 1780 | |
| }, | |
| { | |
| "epoch": 0.24679442989107955, | |
| "grad_norm": 0.61258465051651, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3492, | |
| "step": 1790 | |
| }, | |
| { | |
| "epoch": 0.2481731697228733, | |
| "grad_norm": 0.011543272994458675, | |
| "learning_rate": 0.0002, | |
| "loss": 0.347, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.24955190955466702, | |
| "grad_norm": 0.09996844828128815, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3474, | |
| "step": 1810 | |
| }, | |
| { | |
| "epoch": 0.25093064938646076, | |
| "grad_norm": 0.17044603824615479, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3471, | |
| "step": 1820 | |
| }, | |
| { | |
| "epoch": 0.2523093892182545, | |
| "grad_norm": 0.17940489947795868, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3478, | |
| "step": 1830 | |
| }, | |
| { | |
| "epoch": 0.2536881290500483, | |
| "grad_norm": 0.21834205090999603, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3461, | |
| "step": 1840 | |
| }, | |
| { | |
| "epoch": 0.255066868881842, | |
| "grad_norm": 0.2272634655237198, | |
| "learning_rate": 0.0002, | |
| "loss": 0.347, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.25644560871363575, | |
| "grad_norm": 0.18734070658683777, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3497, | |
| "step": 1860 | |
| }, | |
| { | |
| "epoch": 0.25782434854542946, | |
| "grad_norm": 0.04078834876418114, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3493, | |
| "step": 1870 | |
| }, | |
| { | |
| "epoch": 0.2592030883772232, | |
| "grad_norm": 0.3463903069496155, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3494, | |
| "step": 1880 | |
| }, | |
| { | |
| "epoch": 0.260581828209017, | |
| "grad_norm": 0.3256634771823883, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3488, | |
| "step": 1890 | |
| }, | |
| { | |
| "epoch": 0.2619605680408107, | |
| "grad_norm": 0.04404434189200401, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3509, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.26333930787260446, | |
| "grad_norm": 0.20446011424064636, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3526, | |
| "step": 1910 | |
| }, | |
| { | |
| "epoch": 0.26471804770439816, | |
| "grad_norm": 0.06089179962873459, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3491, | |
| "step": 1920 | |
| }, | |
| { | |
| "epoch": 0.2660967875361919, | |
| "grad_norm": 0.32661405205726624, | |
| "learning_rate": 0.0002, | |
| "loss": 0.35, | |
| "step": 1930 | |
| }, | |
| { | |
| "epoch": 0.2674755273679857, | |
| "grad_norm": 0.09823151677846909, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3482, | |
| "step": 1940 | |
| }, | |
| { | |
| "epoch": 0.2688542671997794, | |
| "grad_norm": 0.11397412419319153, | |
| "learning_rate": 0.0002, | |
| "loss": 0.347, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.27023300703157316, | |
| "grad_norm": 0.2632172703742981, | |
| "learning_rate": 0.0002, | |
| "loss": 0.352, | |
| "step": 1960 | |
| }, | |
| { | |
| "epoch": 0.27161174686336687, | |
| "grad_norm": 0.27215296030044556, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3482, | |
| "step": 1970 | |
| }, | |
| { | |
| "epoch": 0.27299048669516063, | |
| "grad_norm": 0.20016005635261536, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3489, | |
| "step": 1980 | |
| }, | |
| { | |
| "epoch": 0.2743692265269544, | |
| "grad_norm": 0.3071637749671936, | |
| "learning_rate": 0.0002, | |
| "loss": 0.354, | |
| "step": 1990 | |
| }, | |
| { | |
| "epoch": 0.2757479663587481, | |
| "grad_norm": 1.0373337268829346, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3481, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.27712670619054186, | |
| "grad_norm": 0.49023887515068054, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3501, | |
| "step": 2010 | |
| }, | |
| { | |
| "epoch": 0.27850544602233557, | |
| "grad_norm": 0.7107551097869873, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3515, | |
| "step": 2020 | |
| }, | |
| { | |
| "epoch": 0.27988418585412933, | |
| "grad_norm": 1.5392403602600098, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3468, | |
| "step": 2030 | |
| }, | |
| { | |
| "epoch": 0.2812629256859231, | |
| "grad_norm": 2.7259292602539062, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3585, | |
| "step": 2040 | |
| }, | |
| { | |
| "epoch": 0.2826416655177168, | |
| "grad_norm": 2.783911943435669, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3703, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 0.28402040534951056, | |
| "grad_norm": 0.015472312457859516, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3506, | |
| "step": 2060 | |
| }, | |
| { | |
| "epoch": 0.28539914518130427, | |
| "grad_norm": 0.7833682298660278, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3483, | |
| "step": 2070 | |
| }, | |
| { | |
| "epoch": 0.28677788501309803, | |
| "grad_norm": 0.4828648269176483, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3479, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.2881566248448918, | |
| "grad_norm": 1.035452961921692, | |
| "learning_rate": 0.0002, | |
| "loss": 0.348, | |
| "step": 2090 | |
| }, | |
| { | |
| "epoch": 0.2895353646766855, | |
| "grad_norm": 0.14552044868469238, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3478, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 0.29091410450847927, | |
| "grad_norm": 0.9040600061416626, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3508, | |
| "step": 2110 | |
| }, | |
| { | |
| "epoch": 0.292292844340273, | |
| "grad_norm": 0.36334726214408875, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3472, | |
| "step": 2120 | |
| }, | |
| { | |
| "epoch": 0.29367158417206674, | |
| "grad_norm": 0.6746371984481812, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3546, | |
| "step": 2130 | |
| }, | |
| { | |
| "epoch": 0.2950503240038605, | |
| "grad_norm": 0.49551185965538025, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3464, | |
| "step": 2140 | |
| }, | |
| { | |
| "epoch": 0.2964290638356542, | |
| "grad_norm": 0.04471205919981003, | |
| "learning_rate": 0.0002, | |
| "loss": 0.347, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 0.29780780366744797, | |
| "grad_norm": 0.12050581723451614, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3475, | |
| "step": 2160 | |
| }, | |
| { | |
| "epoch": 0.2991865434992417, | |
| "grad_norm": 0.04496219381690025, | |
| "learning_rate": 0.0002, | |
| "loss": 0.346, | |
| "step": 2170 | |
| }, | |
| { | |
| "epoch": 0.30056528333103544, | |
| "grad_norm": 0.11529727280139923, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3475, | |
| "step": 2180 | |
| }, | |
| { | |
| "epoch": 0.3019440231628292, | |
| "grad_norm": 0.12811416387557983, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3466, | |
| "step": 2190 | |
| }, | |
| { | |
| "epoch": 0.3033227629946229, | |
| "grad_norm": 0.0471930056810379, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3468, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.30470150282641667, | |
| "grad_norm": 2.6910486221313477, | |
| "learning_rate": 0.0002, | |
| "loss": 0.351, | |
| "step": 2210 | |
| }, | |
| { | |
| "epoch": 0.3060802426582104, | |
| "grad_norm": 0.3302823305130005, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3498, | |
| "step": 2220 | |
| }, | |
| { | |
| "epoch": 0.30745898249000414, | |
| "grad_norm": 10.41483211517334, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3694, | |
| "step": 2230 | |
| }, | |
| { | |
| "epoch": 0.3088377223217979, | |
| "grad_norm": 12.485153198242188, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3816, | |
| "step": 2240 | |
| }, | |
| { | |
| "epoch": 0.3102164621535916, | |
| "grad_norm": 1.1303633451461792, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3554, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.3115952019853854, | |
| "grad_norm": 0.08931141346693039, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3824, | |
| "step": 2260 | |
| }, | |
| { | |
| "epoch": 0.3129739418171791, | |
| "grad_norm": 0.20106171071529388, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3483, | |
| "step": 2270 | |
| }, | |
| { | |
| "epoch": 0.31435268164897284, | |
| "grad_norm": 1.7839974164962769, | |
| "learning_rate": 0.0002, | |
| "loss": 0.354, | |
| "step": 2280 | |
| }, | |
| { | |
| "epoch": 0.3157314214807666, | |
| "grad_norm": 0.6848243474960327, | |
| "learning_rate": 0.0002, | |
| "loss": 0.356, | |
| "step": 2290 | |
| }, | |
| { | |
| "epoch": 0.3171101613125603, | |
| "grad_norm": 1.5771201848983765, | |
| "learning_rate": 0.0002, | |
| "loss": 0.35, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 0.3184889011443541, | |
| "grad_norm": 1.2237290143966675, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3521, | |
| "step": 2310 | |
| }, | |
| { | |
| "epoch": 0.3198676409761478, | |
| "grad_norm": 0.04607740044593811, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3455, | |
| "step": 2320 | |
| }, | |
| { | |
| "epoch": 0.32124638080794155, | |
| "grad_norm": 0.0691566988825798, | |
| "learning_rate": 0.0002, | |
| "loss": 0.6835, | |
| "step": 2330 | |
| }, | |
| { | |
| "epoch": 0.3226251206397353, | |
| "grad_norm": 0.17569975554943085, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3471, | |
| "step": 2340 | |
| }, | |
| { | |
| "epoch": 0.324003860471529, | |
| "grad_norm": 0.23300354182720184, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3496, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 0.3253826003033228, | |
| "grad_norm": 0.2859944999217987, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3544, | |
| "step": 2360 | |
| }, | |
| { | |
| "epoch": 0.3267613401351165, | |
| "grad_norm": 140.8813018798828, | |
| "learning_rate": 0.0002, | |
| "loss": 0.5267, | |
| "step": 2370 | |
| }, | |
| { | |
| "epoch": 0.32814007996691025, | |
| "grad_norm": 0.4637663960456848, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3538, | |
| "step": 2380 | |
| }, | |
| { | |
| "epoch": 0.329518819798704, | |
| "grad_norm": 0.2639578580856323, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3482, | |
| "step": 2390 | |
| }, | |
| { | |
| "epoch": 0.3308975596304977, | |
| "grad_norm": 0.02536751516163349, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3468, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.3322762994622915, | |
| "grad_norm": 0.20604869723320007, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3494, | |
| "step": 2410 | |
| }, | |
| { | |
| "epoch": 0.3336550392940852, | |
| "grad_norm": 0.3395155668258667, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3492, | |
| "step": 2420 | |
| }, | |
| { | |
| "epoch": 0.33503377912587895, | |
| "grad_norm": 0.07880198955535889, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3474, | |
| "step": 2430 | |
| }, | |
| { | |
| "epoch": 0.3364125189576727, | |
| "grad_norm": 0.18549232184886932, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3515, | |
| "step": 2440 | |
| }, | |
| { | |
| "epoch": 0.3377912587894664, | |
| "grad_norm": 0.030663492158055305, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3475, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 0.3391699986212602, | |
| "grad_norm": 0.2116040587425232, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3484, | |
| "step": 2460 | |
| }, | |
| { | |
| "epoch": 0.3405487384530539, | |
| "grad_norm": 0.06269362568855286, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3476, | |
| "step": 2470 | |
| }, | |
| { | |
| "epoch": 0.34192747828484765, | |
| "grad_norm": 0.13339760899543762, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 2480 | |
| }, | |
| { | |
| "epoch": 0.3433062181166414, | |
| "grad_norm": 0.3202485740184784, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 2490 | |
| }, | |
| { | |
| "epoch": 0.3446849579484351, | |
| "grad_norm": 0.2552681565284729, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3488, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.3460636977802289, | |
| "grad_norm": 0.5942317247390747, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3513, | |
| "step": 2510 | |
| }, | |
| { | |
| "epoch": 0.3474424376120226, | |
| "grad_norm": 0.865836501121521, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3517, | |
| "step": 2520 | |
| }, | |
| { | |
| "epoch": 0.34882117744381635, | |
| "grad_norm": 0.47955092787742615, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3508, | |
| "step": 2530 | |
| }, | |
| { | |
| "epoch": 0.3501999172756101, | |
| "grad_norm": 0.07025259733200073, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3476, | |
| "step": 2540 | |
| }, | |
| { | |
| "epoch": 0.3515786571074038, | |
| "grad_norm": 0.08550610393285751, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3483, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 0.3529573969391976, | |
| "grad_norm": 0.07628582417964935, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3503, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.3543361367709913, | |
| "grad_norm": 0.09870871156454086, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3506, | |
| "step": 2570 | |
| }, | |
| { | |
| "epoch": 0.35571487660278506, | |
| "grad_norm": 0.03877127543091774, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 2580 | |
| }, | |
| { | |
| "epoch": 0.3570936164345788, | |
| "grad_norm": 0.01288803294301033, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3472, | |
| "step": 2590 | |
| }, | |
| { | |
| "epoch": 0.3584723562663725, | |
| "grad_norm": 0.2647448778152466, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3499, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.3598510960981663, | |
| "grad_norm": 0.5602620840072632, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3478, | |
| "step": 2610 | |
| }, | |
| { | |
| "epoch": 0.36122983592996, | |
| "grad_norm": 0.788970947265625, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3513, | |
| "step": 2620 | |
| }, | |
| { | |
| "epoch": 0.36260857576175376, | |
| "grad_norm": 0.6836252212524414, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3502, | |
| "step": 2630 | |
| }, | |
| { | |
| "epoch": 0.3639873155935475, | |
| "grad_norm": 0.14788776636123657, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3654, | |
| "step": 2640 | |
| }, | |
| { | |
| "epoch": 0.36536605542534123, | |
| "grad_norm": 0.7003443837165833, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3526, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 0.366744795257135, | |
| "grad_norm": 0.600309431552887, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3475, | |
| "step": 2660 | |
| }, | |
| { | |
| "epoch": 0.3681235350889287, | |
| "grad_norm": 0.10942361503839493, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3467, | |
| "step": 2670 | |
| }, | |
| { | |
| "epoch": 0.36950227492072246, | |
| "grad_norm": 0.2913811504840851, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3473, | |
| "step": 2680 | |
| }, | |
| { | |
| "epoch": 0.3708810147525162, | |
| "grad_norm": 0.11519110947847366, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3492, | |
| "step": 2690 | |
| }, | |
| { | |
| "epoch": 0.37225975458430993, | |
| "grad_norm": 0.04020654037594795, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3465, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 0.3736384944161037, | |
| "grad_norm": 0.3286079168319702, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3481, | |
| "step": 2710 | |
| }, | |
| { | |
| "epoch": 0.3750172342478974, | |
| "grad_norm": 0.09239518642425537, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3478, | |
| "step": 2720 | |
| }, | |
| { | |
| "epoch": 0.37639597407969116, | |
| "grad_norm": 0.18499009311199188, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3474, | |
| "step": 2730 | |
| }, | |
| { | |
| "epoch": 0.3777747139114849, | |
| "grad_norm": 0.031250640749931335, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3478, | |
| "step": 2740 | |
| }, | |
| { | |
| "epoch": 0.37915345374327863, | |
| "grad_norm": 0.013040668331086636, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3463, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.3805321935750724, | |
| "grad_norm": 0.11167818307876587, | |
| "learning_rate": 0.0002, | |
| "loss": 0.35, | |
| "step": 2760 | |
| }, | |
| { | |
| "epoch": 0.3819109334068661, | |
| "grad_norm": 0.9923171401023865, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3483, | |
| "step": 2770 | |
| }, | |
| { | |
| "epoch": 0.38328967323865987, | |
| "grad_norm": 0.7532585263252258, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3493, | |
| "step": 2780 | |
| }, | |
| { | |
| "epoch": 0.38466841307045363, | |
| "grad_norm": 0.17266100645065308, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3472, | |
| "step": 2790 | |
| }, | |
| { | |
| "epoch": 0.38604715290224734, | |
| "grad_norm": 0.07107715308666229, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3465, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.3874258927340411, | |
| "grad_norm": 0.05912484973669052, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3454, | |
| "step": 2810 | |
| }, | |
| { | |
| "epoch": 0.3888046325658348, | |
| "grad_norm": 0.18973538279533386, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3485, | |
| "step": 2820 | |
| }, | |
| { | |
| "epoch": 0.39018337239762857, | |
| "grad_norm": 0.3309797942638397, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 2830 | |
| }, | |
| { | |
| "epoch": 0.39156211222942233, | |
| "grad_norm": 0.1095886304974556, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3479, | |
| "step": 2840 | |
| }, | |
| { | |
| "epoch": 0.39294085206121604, | |
| "grad_norm": 0.7054705619812012, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3457, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 0.3943195918930098, | |
| "grad_norm": 0.35234084725379944, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3519, | |
| "step": 2860 | |
| }, | |
| { | |
| "epoch": 0.3956983317248035, | |
| "grad_norm": 1.1833308935165405, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3461, | |
| "step": 2870 | |
| }, | |
| { | |
| "epoch": 0.39707707155659727, | |
| "grad_norm": 0.046266939491033554, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3478, | |
| "step": 2880 | |
| }, | |
| { | |
| "epoch": 0.39845581138839103, | |
| "grad_norm": 0.47695428133010864, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3494, | |
| "step": 2890 | |
| }, | |
| { | |
| "epoch": 0.39983455122018474, | |
| "grad_norm": 0.14398415386676788, | |
| "learning_rate": 0.0002, | |
| "loss": 0.347, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 0.4012132910519785, | |
| "grad_norm": 0.0672549456357956, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3446, | |
| "step": 2910 | |
| }, | |
| { | |
| "epoch": 0.4025920308837722, | |
| "grad_norm": 0.7201761603355408, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3536, | |
| "step": 2920 | |
| }, | |
| { | |
| "epoch": 0.403970770715566, | |
| "grad_norm": 1.0536067485809326, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3448, | |
| "step": 2930 | |
| }, | |
| { | |
| "epoch": 0.40534951054735974, | |
| "grad_norm": 0.4652802050113678, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3572, | |
| "step": 2940 | |
| }, | |
| { | |
| "epoch": 0.40672825037915344, | |
| "grad_norm": 0.2714202404022217, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3506, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 0.4081069902109472, | |
| "grad_norm": 0.056970611214637756, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3466, | |
| "step": 2960 | |
| }, | |
| { | |
| "epoch": 0.4094857300427409, | |
| "grad_norm": 0.08109589666128159, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3478, | |
| "step": 2970 | |
| }, | |
| { | |
| "epoch": 0.4108644698745347, | |
| "grad_norm": 0.2987334728240967, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3482, | |
| "step": 2980 | |
| }, | |
| { | |
| "epoch": 0.41224320970632844, | |
| "grad_norm": 0.3519710898399353, | |
| "learning_rate": 0.0002, | |
| "loss": 0.351, | |
| "step": 2990 | |
| }, | |
| { | |
| "epoch": 0.41362194953812215, | |
| "grad_norm": 0.20754824578762054, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3455, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.4150006893699159, | |
| "grad_norm": 0.00901414267718792, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3497, | |
| "step": 3010 | |
| }, | |
| { | |
| "epoch": 0.4163794292017096, | |
| "grad_norm": 0.5444782972335815, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3698, | |
| "step": 3020 | |
| }, | |
| { | |
| "epoch": 0.4177581690335034, | |
| "grad_norm": 1.026206374168396, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3516, | |
| "step": 3030 | |
| }, | |
| { | |
| "epoch": 0.41913690886529714, | |
| "grad_norm": 0.4680636525154114, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3488, | |
| "step": 3040 | |
| }, | |
| { | |
| "epoch": 0.42051564869709085, | |
| "grad_norm": 0.07180967926979065, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3491, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 0.4218943885288846, | |
| "grad_norm": 1.1045209169387817, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3497, | |
| "step": 3060 | |
| }, | |
| { | |
| "epoch": 0.4232731283606783, | |
| "grad_norm": 1.6731029748916626, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3587, | |
| "step": 3070 | |
| }, | |
| { | |
| "epoch": 0.4246518681924721, | |
| "grad_norm": 0.44295939803123474, | |
| "learning_rate": 0.0002, | |
| "loss": 0.348, | |
| "step": 3080 | |
| }, | |
| { | |
| "epoch": 0.42603060802426584, | |
| "grad_norm": 0.011876541189849377, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3471, | |
| "step": 3090 | |
| }, | |
| { | |
| "epoch": 0.42740934785605955, | |
| "grad_norm": 0.5493383407592773, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3474, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 0.4287880876878533, | |
| "grad_norm": 0.013986635021865368, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3467, | |
| "step": 3110 | |
| }, | |
| { | |
| "epoch": 0.430166827519647, | |
| "grad_norm": 0.11136174947023392, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3465, | |
| "step": 3120 | |
| }, | |
| { | |
| "epoch": 0.4315455673514408, | |
| "grad_norm": 0.18628475069999695, | |
| "learning_rate": 0.0002, | |
| "loss": 0.347, | |
| "step": 3130 | |
| }, | |
| { | |
| "epoch": 0.43292430718323455, | |
| "grad_norm": 0.07927969843149185, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 3140 | |
| }, | |
| { | |
| "epoch": 0.43430304701502825, | |
| "grad_norm": 0.32330620288848877, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 0.435681786846822, | |
| "grad_norm": 0.4383370578289032, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3491, | |
| "step": 3160 | |
| }, | |
| { | |
| "epoch": 0.4370605266786157, | |
| "grad_norm": 0.5952053070068359, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3482, | |
| "step": 3170 | |
| }, | |
| { | |
| "epoch": 0.4384392665104095, | |
| "grad_norm": 1.106527328491211, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3569, | |
| "step": 3180 | |
| }, | |
| { | |
| "epoch": 0.43981800634220325, | |
| "grad_norm": 0.6171185970306396, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3474, | |
| "step": 3190 | |
| }, | |
| { | |
| "epoch": 0.44119674617399696, | |
| "grad_norm": 0.16300754249095917, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3474, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.4425754860057907, | |
| "grad_norm": 0.03562415391206741, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3474, | |
| "step": 3210 | |
| }, | |
| { | |
| "epoch": 0.4439542258375844, | |
| "grad_norm": 0.30006295442581177, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3475, | |
| "step": 3220 | |
| }, | |
| { | |
| "epoch": 0.4453329656693782, | |
| "grad_norm": 0.4837903678417206, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3618, | |
| "step": 3230 | |
| }, | |
| { | |
| "epoch": 0.44671170550117195, | |
| "grad_norm": 0.4861944317817688, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3536, | |
| "step": 3240 | |
| }, | |
| { | |
| "epoch": 0.44809044533296566, | |
| "grad_norm": 1.048814296722412, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3538, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.4494691851647594, | |
| "grad_norm": 0.6691413521766663, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3515, | |
| "step": 3260 | |
| }, | |
| { | |
| "epoch": 0.45084792499655313, | |
| "grad_norm": 0.24432829022407532, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3561, | |
| "step": 3270 | |
| }, | |
| { | |
| "epoch": 0.4522266648283469, | |
| "grad_norm": 0.529619038105011, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3488, | |
| "step": 3280 | |
| }, | |
| { | |
| "epoch": 0.45360540466014065, | |
| "grad_norm": 0.10819733142852783, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3464, | |
| "step": 3290 | |
| }, | |
| { | |
| "epoch": 0.45498414449193436, | |
| "grad_norm": 0.3517175614833832, | |
| "learning_rate": 0.0002, | |
| "loss": 0.347, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 0.4563628843237281, | |
| "grad_norm": 0.23464784026145935, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3484, | |
| "step": 3310 | |
| }, | |
| { | |
| "epoch": 0.45774162415552183, | |
| "grad_norm": 0.1873646080493927, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3517, | |
| "step": 3320 | |
| }, | |
| { | |
| "epoch": 0.4591203639873156, | |
| "grad_norm": 0.20844773948192596, | |
| "learning_rate": 0.0002, | |
| "loss": 0.352, | |
| "step": 3330 | |
| }, | |
| { | |
| "epoch": 0.46049910381910936, | |
| "grad_norm": 0.45384037494659424, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3492, | |
| "step": 3340 | |
| }, | |
| { | |
| "epoch": 0.46187784365090306, | |
| "grad_norm": 0.32738834619522095, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3509, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 0.4632565834826968, | |
| "grad_norm": 1.4738928079605103, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3483, | |
| "step": 3360 | |
| }, | |
| { | |
| "epoch": 0.46463532331449053, | |
| "grad_norm": 0.5815828442573547, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3509, | |
| "step": 3370 | |
| }, | |
| { | |
| "epoch": 0.4660140631462843, | |
| "grad_norm": 0.6721034049987793, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3467, | |
| "step": 3380 | |
| }, | |
| { | |
| "epoch": 0.46739280297807806, | |
| "grad_norm": 0.8967618346214294, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3548, | |
| "step": 3390 | |
| }, | |
| { | |
| "epoch": 0.46877154280987177, | |
| "grad_norm": 2.0388050079345703, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3612, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.47015028264166553, | |
| "grad_norm": 0.2570609450340271, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3489, | |
| "step": 3410 | |
| }, | |
| { | |
| "epoch": 0.47152902247345924, | |
| "grad_norm": 0.6751313209533691, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3486, | |
| "step": 3420 | |
| }, | |
| { | |
| "epoch": 0.472907762305253, | |
| "grad_norm": 0.4083470106124878, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3471, | |
| "step": 3430 | |
| }, | |
| { | |
| "epoch": 0.47428650213704676, | |
| "grad_norm": 0.09502824395895004, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3479, | |
| "step": 3440 | |
| }, | |
| { | |
| "epoch": 0.47566524196884047, | |
| "grad_norm": 0.40624290704727173, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3484, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 0.47704398180063423, | |
| "grad_norm": 0.1163792535662651, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3469, | |
| "step": 3460 | |
| }, | |
| { | |
| "epoch": 0.47842272163242794, | |
| "grad_norm": 0.06055830419063568, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3476, | |
| "step": 3470 | |
| }, | |
| { | |
| "epoch": 0.4798014614642217, | |
| "grad_norm": 0.21893176436424255, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3465, | |
| "step": 3480 | |
| }, | |
| { | |
| "epoch": 0.48118020129601546, | |
| "grad_norm": 0.24376049637794495, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3497, | |
| "step": 3490 | |
| }, | |
| { | |
| "epoch": 0.48255894112780917, | |
| "grad_norm": 0.14530010521411896, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3441, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.48393768095960293, | |
| "grad_norm": 0.4784521460533142, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3585, | |
| "step": 3510 | |
| }, | |
| { | |
| "epoch": 0.48531642079139664, | |
| "grad_norm": 1.5072555541992188, | |
| "learning_rate": 0.0002, | |
| "loss": 0.352, | |
| "step": 3520 | |
| }, | |
| { | |
| "epoch": 0.4866951606231904, | |
| "grad_norm": 1.2513431310653687, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3495, | |
| "step": 3530 | |
| }, | |
| { | |
| "epoch": 0.48807390045498417, | |
| "grad_norm": 1.5765403509140015, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3575, | |
| "step": 3540 | |
| }, | |
| { | |
| "epoch": 0.4894526402867779, | |
| "grad_norm": 2.110595703125, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3464, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 0.49083138011857164, | |
| "grad_norm": 3.3377187252044678, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3518, | |
| "step": 3560 | |
| }, | |
| { | |
| "epoch": 0.49221011995036534, | |
| "grad_norm": 1.1348721981048584, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3461, | |
| "step": 3570 | |
| }, | |
| { | |
| "epoch": 0.4935888597821591, | |
| "grad_norm": 1.115633249282837, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 3580 | |
| }, | |
| { | |
| "epoch": 0.49496759961395287, | |
| "grad_norm": 0.46544066071510315, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3508, | |
| "step": 3590 | |
| }, | |
| { | |
| "epoch": 0.4963463394457466, | |
| "grad_norm": 1.0968178510665894, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3491, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.49772507927754034, | |
| "grad_norm": 0.1590128093957901, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3493, | |
| "step": 3610 | |
| }, | |
| { | |
| "epoch": 0.49910381910933405, | |
| "grad_norm": 0.0618724524974823, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3467, | |
| "step": 3620 | |
| }, | |
| { | |
| "epoch": 0.5004825589411278, | |
| "grad_norm": 0.038633331656455994, | |
| "learning_rate": 0.0002, | |
| "loss": 0.347, | |
| "step": 3630 | |
| }, | |
| { | |
| "epoch": 0.5018612987729215, | |
| "grad_norm": 0.08056730031967163, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3469, | |
| "step": 3640 | |
| }, | |
| { | |
| "epoch": 0.5032400386047153, | |
| "grad_norm": 0.07540078461170197, | |
| "learning_rate": 0.0002, | |
| "loss": 0.347, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 0.504618778436509, | |
| "grad_norm": 0.0971425250172615, | |
| "learning_rate": 0.0002, | |
| "loss": 0.349, | |
| "step": 3660 | |
| }, | |
| { | |
| "epoch": 0.5059975182683027, | |
| "grad_norm": 0.1605909764766693, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3472, | |
| "step": 3670 | |
| }, | |
| { | |
| "epoch": 0.5073762581000966, | |
| "grad_norm": 0.2239840030670166, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3489, | |
| "step": 3680 | |
| }, | |
| { | |
| "epoch": 0.5087549979318903, | |
| "grad_norm": 0.25974148511886597, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3478, | |
| "step": 3690 | |
| }, | |
| { | |
| "epoch": 0.510133737763684, | |
| "grad_norm": 0.7621527314186096, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3501, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 0.5115124775954777, | |
| "grad_norm": 0.6531065702438354, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3473, | |
| "step": 3710 | |
| }, | |
| { | |
| "epoch": 0.5128912174272715, | |
| "grad_norm": 0.0937981903553009, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3494, | |
| "step": 3720 | |
| }, | |
| { | |
| "epoch": 0.5142699572590652, | |
| "grad_norm": 0.353502094745636, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3487, | |
| "step": 3730 | |
| }, | |
| { | |
| "epoch": 0.5156486970908589, | |
| "grad_norm": 0.5151197910308838, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3485, | |
| "step": 3740 | |
| }, | |
| { | |
| "epoch": 0.5170274369226527, | |
| "grad_norm": 0.15747584402561188, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3473, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.5184061767544464, | |
| "grad_norm": 0.18814824521541595, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3474, | |
| "step": 3760 | |
| }, | |
| { | |
| "epoch": 0.5197849165862402, | |
| "grad_norm": 0.040331218391656876, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3474, | |
| "step": 3770 | |
| }, | |
| { | |
| "epoch": 0.521163656418034, | |
| "grad_norm": 0.21510355174541473, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3471, | |
| "step": 3780 | |
| }, | |
| { | |
| "epoch": 0.5225423962498277, | |
| "grad_norm": 0.12755675613880157, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3477, | |
| "step": 3790 | |
| }, | |
| { | |
| "epoch": 0.5239211360816214, | |
| "grad_norm": 0.5592456459999084, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3509, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.5252998759134151, | |
| "grad_norm": 0.30668896436691284, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3475, | |
| "step": 3810 | |
| }, | |
| { | |
| "epoch": 0.5266786157452089, | |
| "grad_norm": 0.12264347821474075, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3691, | |
| "step": 3820 | |
| }, | |
| { | |
| "epoch": 0.5280573555770026, | |
| "grad_norm": 0.21281813085079193, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3493, | |
| "step": 3830 | |
| }, | |
| { | |
| "epoch": 0.5294360954087963, | |
| "grad_norm": 0.547156572341919, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3485, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 0.5308148352405901, | |
| "grad_norm": 0.2829332947731018, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3459, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 0.5321935750723839, | |
| "grad_norm": 0.6659385561943054, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3492, | |
| "step": 3860 | |
| }, | |
| { | |
| "epoch": 0.5335723149041776, | |
| "grad_norm": 0.9354788064956665, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3501, | |
| "step": 3870 | |
| }, | |
| { | |
| "epoch": 0.5349510547359714, | |
| "grad_norm": 0.64557945728302, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3544, | |
| "step": 3880 | |
| }, | |
| { | |
| "epoch": 0.5363297945677651, | |
| "grad_norm": 0.07203350216150284, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3485, | |
| "step": 3890 | |
| }, | |
| { | |
| "epoch": 0.5377085343995588, | |
| "grad_norm": 0.8220388293266296, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3472, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 0.5390872742313525, | |
| "grad_norm": 0.2928883135318756, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3549, | |
| "step": 3910 | |
| }, | |
| { | |
| "epoch": 0.5404660140631463, | |
| "grad_norm": 0.14233669638633728, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3465, | |
| "step": 3920 | |
| }, | |
| { | |
| "epoch": 0.54184475389494, | |
| "grad_norm": 0.3891246020793915, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3463, | |
| "step": 3930 | |
| }, | |
| { | |
| "epoch": 0.5432234937267337, | |
| "grad_norm": 1.5544075965881348, | |
| "learning_rate": 0.0002, | |
| "loss": 0.357, | |
| "step": 3940 | |
| }, | |
| { | |
| "epoch": 0.5446022335585275, | |
| "grad_norm": 0.28166115283966064, | |
| "learning_rate": 0.0002, | |
| "loss": 0.351, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 0.5459809733903213, | |
| "grad_norm": 0.4519428014755249, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3523, | |
| "step": 3960 | |
| }, | |
| { | |
| "epoch": 0.547359713222115, | |
| "grad_norm": 0.05194510146975517, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3518, | |
| "step": 3970 | |
| }, | |
| { | |
| "epoch": 0.5487384530539088, | |
| "grad_norm": 0.5450552105903625, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3492, | |
| "step": 3980 | |
| }, | |
| { | |
| "epoch": 0.5501171928857025, | |
| "grad_norm": 0.24036559462547302, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3473, | |
| "step": 3990 | |
| }, | |
| { | |
| "epoch": 0.5514959327174962, | |
| "grad_norm": 0.15362729132175446, | |
| "learning_rate": 0.0002, | |
| "loss": 0.3471, | |
| "step": 4000 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 100000, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 14, | |
| "save_steps": 1000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 8.182773334612378e+18, | |
| "train_batch_size": 16, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |