| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 10.0, | |
| "global_step": 1462, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 0.0004999422837239569, | |
| "loss": 0.841, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.000499769161545176, | |
| "loss": 0.5851, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 0.0004994807133993966, | |
| "loss": 0.582, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.0004990770724718415, | |
| "loss": 0.5389, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 0.0004985584251357201, | |
| "loss": 0.5349, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.000497925010866175, | |
| "loss": 0.5181, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 0.0004971771221297088, | |
| "loss": 0.5041, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.0004963151042491437, | |
| "loss": 0.5311, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 0.0004953393552441752, | |
| "loss": 0.4979, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.0004942503256475948, | |
| "loss": 0.5101, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 0.000493048518297265, | |
| "loss": 0.5458, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.0004917344881039438, | |
| "loss": 0.5137, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 0.0004903088417950664, | |
| "loss": 0.4865, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.0004887722376345999, | |
| "loss": 0.5004, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 0.00048712538511910353, | |
| "loss": 0.4931, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.0004853690446501323, | |
| "loss": 0.4855, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 0.00048350402718313703, | |
| "loss": 0.5288, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 0.00048153119385302114, | |
| "loss": 0.4931, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.00047945145557652923, | |
| "loss": 0.5064, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.000477265772631649, | |
| "loss": 0.5217, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 0.00047497515421422205, | |
| "loss": 0.4952, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00047258065797196744, | |
| "loss": 0.5501, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 0.00047008338951613397, | |
| "loss": 0.5033, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 0.000467484501911006, | |
| "loss": 0.5217, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 0.000464785195141499, | |
| "loss": 0.5051, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 0.00046198671555909, | |
| "loss": 0.5066, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 0.0004590903553063396, | |
| "loss": 0.4972, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.0004560974517202708, | |
| "loss": 0.5008, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 0.00045300938671487994, | |
| "loss": 0.5091, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.0004498275861430654, | |
| "loss": 0.4764, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 0.00044655351913826794, | |
| "loss": 0.506, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 0.00044318869743612796, | |
| "loss": 0.521, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 0.000439734674676471, | |
| "loss": 0.4923, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 0.00043619304568594545, | |
| "loss": 0.5319, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 0.0004325654457416431, | |
| "loss": 0.4804, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00042922848418325, | |
| "loss": 0.5062, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.0004254421861612533, | |
| "loss": 0.4685, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 0.000421574881182088, | |
| "loss": 0.502, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 0.0004176283548972877, | |
| "loss": 0.5225, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 0.0004136044295372563, | |
| "loss": 0.5375, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 0.00040950496306988853, | |
| "loss": 0.5128, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 0.00040533184834269057, | |
| "loss": 0.5079, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 0.0004010870122087948, | |
| "loss": 0.5297, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 0.0003967724146372738, | |
| "loss": 0.5069, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 0.0003923900478081629, | |
| "loss": 0.5238, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 0.0003879419351926115, | |
| "loss": 0.5052, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 0.0003834301306185841, | |
| "loss": 0.5068, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 0.0003788567173225469, | |
| "loss": 0.497, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 0.0003742238069875736, | |
| "loss": 0.5203, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 0.00036953353876831866, | |
| "loss": 0.4843, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 0.000364788078303304, | |
| "loss": 0.4976, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 0.000359989616714979, | |
| "loss": 0.5101, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 0.0003551403695980124, | |
| "loss": 0.485, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 0.0003502425759962859, | |
| "loss": 0.5139, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 0.0003452984973690597, | |
| "loss": 0.507, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.0003403104165467883, | |
| "loss": 0.4868, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 0.000335280636677069, | |
| "loss": 0.5278, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 0.00033021148016120915, | |
| "loss": 0.4709, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 0.0003251052875819029, | |
| "loss": 0.5023, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 0.0003199644166225136, | |
| "loss": 0.499, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 0.00031479124097846047, | |
| "loss": 0.5014, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 0.0003095881492612115, | |
| "loss": 0.5224, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 0.0003043575438953893, | |
| "loss": 0.5063, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 0.00029910184000949916, | |
| "loss": 0.5092, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 0.0002938234643207913, | |
| "loss": 0.498, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 0.0002885248540147722, | |
| "loss": 0.4861, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 0.00028320845561988255, | |
| "loss": 0.5027, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 0.0002778767238778607, | |
| "loss": 0.5162, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 0.0002725321206103147, | |
| "loss": 0.495, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 0.0002671771135820249, | |
| "loss": 0.4804, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 0.0002618141753615021, | |
| "loss": 0.4684, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 0.0002564457821793282, | |
| "loss": 0.4736, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.00025107441278480563, | |
| "loss": 0.4928, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 0.00024570254730144417, | |
| "loss": 0.5076, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 0.00024033266608181257, | |
| "loss": 0.4936, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 1.04, | |
| "learning_rate": 0.00023496724856228514, | |
| "loss": 0.5076, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 0.00022960877211821102, | |
| "loss": 0.4881, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 0.0002242597109200352, | |
| "loss": 0.5276, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 0.00021892253479089907, | |
| "loss": 0.4963, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 1.09, | |
| "learning_rate": 0.00021359970806624883, | |
| "loss": 0.4923, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 0.00020829368845597693, | |
| "loss": 0.5217, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 0.0002030069259096234, | |
| "loss": 0.4848, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "learning_rate": 0.0001977418614851605, | |
| "loss": 0.4759, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 0.0001925009262218818, | |
| "loss": 0.5065, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 0.0001872865400179182, | |
| "loss": 0.4642, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 0.00018210111051289815, | |
| "loss": 0.4869, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 0.00017694703197626734, | |
| "loss": 0.4859, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 0.0001718266842017823, | |
| "loss": 0.4529, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 0.00016674243140868777, | |
| "loss": 0.5037, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.23, | |
| "learning_rate": 0.0001616966211500851, | |
| "loss": 0.475, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 0.0001566915832289964, | |
| "loss": 0.4951, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 0.00015172962862262362, | |
| "loss": 0.4845, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 0.00014681304841530107, | |
| "loss": 0.4528, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.29, | |
| "learning_rate": 0.00014194411274063212, | |
| "loss": 0.4856, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.3, | |
| "learning_rate": 0.00013712506973330036, | |
| "loss": 0.4764, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.31, | |
| "learning_rate": 0.00013235814449103736, | |
| "loss": 0.4775, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "learning_rate": 0.00012764553804722867, | |
| "loss": 0.4961, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.34, | |
| "learning_rate": 0.00012298942635462923, | |
| "loss": 0.4862, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.35, | |
| "learning_rate": 0.00011839195928066102, | |
| "loss": 0.485, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.37, | |
| "learning_rate": 0.00011385525961475404, | |
| "loss": 0.4862, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 0.00010938142208819007, | |
| "loss": 0.4768, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "learning_rate": 0.00010497251240690254, | |
| "loss": 0.4885, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.41, | |
| "learning_rate": 0.00010063056629767691, | |
| "loss": 0.4758, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.42, | |
| "learning_rate": 9.635758856819508e-05, | |
| "loss": 0.4676, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "learning_rate": 9.215555218135416e-05, | |
| "loss": 0.4858, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.45, | |
| "learning_rate": 8.802639734429043e-05, | |
| "loss": 0.456, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.46, | |
| "learning_rate": 8.39720306125274e-05, | |
| "loss": 0.4776, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.48, | |
| "learning_rate": 7.999432400966063e-05, | |
| "loss": 0.461, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.49, | |
| "learning_rate": 7.609511416298859e-05, | |
| "loss": 0.4893, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.5, | |
| "learning_rate": 7.227620145548624e-05, | |
| "loss": 0.4917, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "learning_rate": 6.853934919451357e-05, | |
| "loss": 0.5223, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.53, | |
| "learning_rate": 6.488628279764369e-05, | |
| "loss": 0.4687, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.55, | |
| "learning_rate": 6.131868899598503e-05, | |
| "loss": 0.4788, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.56, | |
| "learning_rate": 5.783821505536696e-05, | |
| "loss": 0.472, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.57, | |
| "learning_rate": 5.444646801574729e-05, | |
| "loss": 0.4664, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.59, | |
| "learning_rate": 5.114501394919316e-05, | |
| "loss": 0.4547, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "learning_rate": 4.7935377236778654e-05, | |
| "loss": 0.4927, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.61, | |
| "learning_rate": 4.4819039864731625e-05, | |
| "loss": 0.4814, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.63, | |
| "learning_rate": 4.1797440740156204e-05, | |
| "loss": 0.4681, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.64, | |
| "learning_rate": 3.887197502664572e-05, | |
| "loss": 0.4979, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.66, | |
| "learning_rate": 3.6043993500093466e-05, | |
| "loss": 0.4743, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 3.3314801924998973e-05, | |
| "loss": 0.4792, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.68, | |
| "learning_rate": 3.068566045155682e-05, | |
| "loss": 0.4791, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.7, | |
| "learning_rate": 2.8157783033807365e-05, | |
| "loss": 0.4846, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "learning_rate": 2.57323368691173e-05, | |
| "loss": 0.4684, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.72, | |
| "learning_rate": 2.3410441859249538e-05, | |
| "loss": 0.4645, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.74, | |
| "learning_rate": 2.1193170093270847e-05, | |
| "loss": 0.51, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.75, | |
| "learning_rate": 1.908154535253559e-05, | |
| "loss": 0.4606, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.76, | |
| "learning_rate": 1.707654263797534e-05, | |
| "loss": 0.4711, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.78, | |
| "learning_rate": 1.5179087719911138e-05, | |
| "loss": 0.4523, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 1.3390056710597648e-05, | |
| "loss": 0.4997, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 1.81, | |
| "learning_rate": 1.1710275659695596e-05, | |
| "loss": 0.4838, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 1.82, | |
| "learning_rate": 1.0140520172859518e-05, | |
| "loss": 0.4573, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 1.83, | |
| "learning_rate": 8.681515053617473e-06, | |
| "loss": 0.4858, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 1.85, | |
| "learning_rate": 7.333933968707046e-06, | |
| "loss": 0.4855, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.86, | |
| "learning_rate": 6.098399137023425e-06, | |
| "loss": 0.462, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 1.87, | |
| "learning_rate": 4.975481042321816e-06, | |
| "loss": 0.4766, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 1.89, | |
| "learning_rate": 3.965698169808018e-06, | |
| "loss": 0.5164, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "learning_rate": 3.069516766738195e-06, | |
| "loss": 0.472, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "learning_rate": 2.2873506271382515e-06, | |
| "loss": 0.4915, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.93, | |
| "learning_rate": 1.6195609007428057e-06, | |
| "loss": 0.4754, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 1.94, | |
| "learning_rate": 1.066455926241383e-06, | |
| "loss": 0.4655, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 1.96, | |
| "learning_rate": 6.282910889090499e-07, | |
| "loss": 0.4948, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 3.052687026874612e-07, | |
| "loss": 0.501, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 1.98, | |
| "learning_rate": 9.753791677027945e-08, | |
| "loss": 0.473, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "learning_rate": 5.194646736622621e-09, | |
| "loss": 0.4731, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 1462, | |
| "total_flos": 3.404194232389337e+18, | |
| "train_loss": 0.49735359360353076, | |
| "train_runtime": 25276.0399, | |
| "train_samples_per_second": 3.7, | |
| "train_steps_per_second": 0.058 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1462, | |
| "num_train_epochs": 2, | |
| "save_steps": 150, | |
| "total_flos": 3.404194232389337e+18, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |