Invalid JSON:
Unexpected token 'N', ..."ad_norm": NaN,
"... is not valid JSON
| { | |
| "best_global_step": 1000, | |
| "best_metric": 22.1346121489995, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.105658198614319, | |
| "eval_steps": 1000, | |
| "global_step": 5379, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.014434180138568129, | |
| "grad_norm": 259.8495788574219, | |
| "learning_rate": 3.4000000000000003e-07, | |
| "loss": 45.0368, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.028868360277136258, | |
| "grad_norm": 113.2914810180664, | |
| "learning_rate": 8.400000000000001e-07, | |
| "loss": 45.4851, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.04330254041570439, | |
| "grad_norm": 265.2807312011719, | |
| "learning_rate": 1.32e-06, | |
| "loss": 45.0325, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.057736720554272515, | |
| "grad_norm": 235.03025817871094, | |
| "learning_rate": 1.8000000000000001e-06, | |
| "loss": 44.3926, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07217090069284064, | |
| "grad_norm": 186.48748779296875, | |
| "learning_rate": 2.3000000000000004e-06, | |
| "loss": 43.7641, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.08660508083140878, | |
| "grad_norm": 160.73260498046875, | |
| "learning_rate": 2.8000000000000003e-06, | |
| "loss": 42.7003, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.10103926096997691, | |
| "grad_norm": 206.27857971191406, | |
| "learning_rate": 3.3000000000000006e-06, | |
| "loss": 41.574, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 0.11547344110854503, | |
| "grad_norm": 112.64932250976562, | |
| "learning_rate": 3.8000000000000005e-06, | |
| "loss": 40.4041, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.12990762124711316, | |
| "grad_norm": 114.56853485107422, | |
| "learning_rate": 4.3e-06, | |
| "loss": 38.7346, | |
| "step": 225 | |
| }, | |
| { | |
| "epoch": 0.14434180138568128, | |
| "grad_norm": 222.75160217285156, | |
| "learning_rate": 4.800000000000001e-06, | |
| "loss": 37.2739, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.15877598152424943, | |
| "grad_norm": 124.86521911621094, | |
| "learning_rate": 5.300000000000001e-06, | |
| "loss": 35.3557, | |
| "step": 275 | |
| }, | |
| { | |
| "epoch": 0.17321016166281755, | |
| "grad_norm": 186.18553161621094, | |
| "learning_rate": 5.8e-06, | |
| "loss": 31.8464, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.18764434180138567, | |
| "grad_norm": 125.71898651123047, | |
| "learning_rate": 6.300000000000001e-06, | |
| "loss": 25.2322, | |
| "step": 325 | |
| }, | |
| { | |
| "epoch": 0.20207852193995382, | |
| "grad_norm": 536.0091552734375, | |
| "learning_rate": 6.800000000000001e-06, | |
| "loss": 18.1581, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.21651270207852194, | |
| "grad_norm": 45.61540603637695, | |
| "learning_rate": 7.280000000000001e-06, | |
| "loss": 13.44, | |
| "step": 375 | |
| }, | |
| { | |
| "epoch": 0.23094688221709006, | |
| "grad_norm": 124.8426284790039, | |
| "learning_rate": 7.78e-06, | |
| "loss": 10.8561, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.2453810623556582, | |
| "grad_norm": 63.8853874206543, | |
| "learning_rate": 8.28e-06, | |
| "loss": 10.0624, | |
| "step": 425 | |
| }, | |
| { | |
| "epoch": 0.25981524249422633, | |
| "grad_norm": 62.360931396484375, | |
| "learning_rate": 8.740000000000001e-06, | |
| "loss": 10.1205, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.27424942263279445, | |
| "grad_norm": 393.94854736328125, | |
| "learning_rate": 9.100000000000001e-06, | |
| "loss": 10.668, | |
| "step": 475 | |
| }, | |
| { | |
| "epoch": 0.28868360277136257, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.440000000000001e-06, | |
| "loss": 11.4325, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.3031177829099307, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.86e-06, | |
| "loss": 10.0437, | |
| "step": 525 | |
| }, | |
| { | |
| "epoch": 0.31755196304849886, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.967206394753023e-06, | |
| "loss": 9.8891, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.331986143187067, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.918015986882559e-06, | |
| "loss": 9.6078, | |
| "step": 575 | |
| }, | |
| { | |
| "epoch": 0.3464203233256351, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.868825579012093e-06, | |
| "loss": 9.7898, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.3608545034642032, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.829883172781308e-06, | |
| "loss": 9.9106, | |
| "step": 625 | |
| }, | |
| { | |
| "epoch": 0.37528868360277134, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.784791965566716e-06, | |
| "loss": 10.1107, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.38972286374133946, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.737651158024185e-06, | |
| "loss": 9.7033, | |
| "step": 675 | |
| }, | |
| { | |
| "epoch": 0.40415704387990764, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.69460955113753e-06, | |
| "loss": 9.7232, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.41859122401847576, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.645419143267064e-06, | |
| "loss": 9.6178, | |
| "step": 725 | |
| }, | |
| { | |
| "epoch": 0.4330254041570439, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.604427136708342e-06, | |
| "loss": 10.3478, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.447459584295612, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.555236728837878e-06, | |
| "loss": 9.6163, | |
| "step": 775 | |
| }, | |
| { | |
| "epoch": 0.4618937644341801, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.506046320967412e-06, | |
| "loss": 9.5966, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.47632794457274824, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.460955113752818e-06, | |
| "loss": 9.9709, | |
| "step": 825 | |
| }, | |
| { | |
| "epoch": 0.4907621247113164, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.413814306210289e-06, | |
| "loss": 9.4316, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.5051963048498845, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.364623898339825e-06, | |
| "loss": 9.68, | |
| "step": 875 | |
| }, | |
| { | |
| "epoch": 0.5196304849884527, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.321582291453168e-06, | |
| "loss": 9.8841, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.5340646651270208, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.276491084238574e-06, | |
| "loss": 10.0515, | |
| "step": 925 | |
| }, | |
| { | |
| "epoch": 0.5484988452655889, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.22730067636811e-06, | |
| "loss": 9.5889, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.562933025404157, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.186308669809388e-06, | |
| "loss": 10.4236, | |
| "step": 975 | |
| }, | |
| { | |
| "epoch": 0.5773672055427251, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.141217462594794e-06, | |
| "loss": 9.9601, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.5773672055427251, | |
| "eval_bleu": 0.00020263912584436567, | |
| "eval_loss": 8.906854629516602, | |
| "eval_rouge1": 0.0, | |
| "eval_rouge2": 0.0, | |
| "eval_rougeL": 0.0, | |
| "eval_runtime": 176.2389, | |
| "eval_samples_per_second": 16.79, | |
| "eval_sari": 22.1346121489995, | |
| "eval_steps_per_second": 4.199, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.5918013856812933, | |
| "grad_norm": 0.0, | |
| "learning_rate": 9.09202705472433e-06, | |
| "loss": 9.7195, | |
| "step": 1025 | |
| }, | |
| { | |
| "epoch": 0.6062355658198614, | |
| "grad_norm": NaN, | |
| "learning_rate": 9.042836646853864e-06, | |
| "loss": 9.8418, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.6206697459584296, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.995695839311335e-06, | |
| "loss": 9.7688, | |
| "step": 1075 | |
| }, | |
| { | |
| "epoch": 0.6351039260969977, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.950604632096742e-06, | |
| "loss": 9.9273, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.6495381062355658, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.901414224226276e-06, | |
| "loss": 9.5548, | |
| "step": 1125 | |
| }, | |
| { | |
| "epoch": 0.663972286374134, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.852223816355812e-06, | |
| "loss": 9.6378, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.6784064665127021, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.807132609141218e-06, | |
| "loss": 9.9517, | |
| "step": 1175 | |
| }, | |
| { | |
| "epoch": 0.6928406466512702, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.757942201270752e-06, | |
| "loss": 9.6338, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.7072748267898383, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.710801393728223e-06, | |
| "loss": 9.6898, | |
| "step": 1225 | |
| }, | |
| { | |
| "epoch": 0.7217090069284064, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.66571018651363e-06, | |
| "loss": 10.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.7361431870669746, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.622668579626974e-06, | |
| "loss": 10.2099, | |
| "step": 1275 | |
| }, | |
| { | |
| "epoch": 0.7505773672055427, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.581676573068252e-06, | |
| "loss": 10.4226, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.7650115473441108, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.532486165197788e-06, | |
| "loss": 9.6729, | |
| "step": 1325 | |
| }, | |
| { | |
| "epoch": 0.7794457274826789, | |
| "grad_norm": NaN, | |
| "learning_rate": 8.487394957983194e-06, | |
| "loss": 9.9616, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.7938799076212472, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.440254150440665e-06, | |
| "loss": 9.6195, | |
| "step": 1375 | |
| }, | |
| { | |
| "epoch": 0.8083140877598153, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.391063742570199e-06, | |
| "loss": 9.6636, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.8227482678983834, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.339823734371798e-06, | |
| "loss": 9.4639, | |
| "step": 1425 | |
| }, | |
| { | |
| "epoch": 0.8371824480369515, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.292682926829268e-06, | |
| "loss": 9.761, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.8516166281755196, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.249641319942612e-06, | |
| "loss": 10.4667, | |
| "step": 1475 | |
| }, | |
| { | |
| "epoch": 0.8660508083140878, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.200450912072147e-06, | |
| "loss": 9.6719, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.8804849884526559, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.155359704857553e-06, | |
| "loss": 10.0053, | |
| "step": 1525 | |
| }, | |
| { | |
| "epoch": 0.894919168591224, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.108218897315024e-06, | |
| "loss": 9.8738, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.9093533487297921, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.05902848944456e-06, | |
| "loss": 9.7024, | |
| "step": 1575 | |
| }, | |
| { | |
| "epoch": 0.9237875288683602, | |
| "grad_norm": 0.0, | |
| "learning_rate": 8.015986882557903e-06, | |
| "loss": 10.2625, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.9382217090069284, | |
| "grad_norm": 0.0, | |
| "learning_rate": 7.974994875999181e-06, | |
| "loss": 10.1737, | |
| "step": 1625 | |
| }, | |
| { | |
| "epoch": 0.9526558891454965, | |
| "grad_norm": 0.0, | |
| "learning_rate": 7.925804468128715e-06, | |
| "loss": 9.6332, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.9670900692840647, | |
| "grad_norm": 0.0, | |
| "learning_rate": 7.876614060258251e-06, | |
| "loss": 9.6052, | |
| "step": 1675 | |
| }, | |
| { | |
| "epoch": 0.9815242494226328, | |
| "grad_norm": 0.0, | |
| "learning_rate": 7.831522853043657e-06, | |
| "loss": 10.0193, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.995958429561201, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 24.3393, | |
| "step": 1725 | |
| }, | |
| { | |
| "epoch": 1.010392609699769, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 1.0248267898383372, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 1775 | |
| }, | |
| { | |
| "epoch": 1.0392609699769053, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 1.0536951501154734, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 1825 | |
| }, | |
| { | |
| "epoch": 1.0681293302540416, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 1.0825635103926097, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 1875 | |
| }, | |
| { | |
| "epoch": 1.0969976905311778, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 1.111431870669746, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 1925 | |
| }, | |
| { | |
| "epoch": 1.125866050808314, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 1.1403002309468822, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 1975 | |
| }, | |
| { | |
| "epoch": 1.1547344110854503, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.1547344110854503, | |
| "eval_bleu": 0.0, | |
| "eval_loss": NaN, | |
| "eval_rouge1": 0.0, | |
| "eval_rouge2": 0.0, | |
| "eval_rougeL": 0.0, | |
| "eval_runtime": 4368.391, | |
| "eval_samples_per_second": 0.677, | |
| "eval_sari": 29.13427959896359, | |
| "eval_steps_per_second": 0.169, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.1691685912240184, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2025 | |
| }, | |
| { | |
| "epoch": 1.1836027713625865, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2050 | |
| }, | |
| { | |
| "epoch": 1.1980369515011549, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2075 | |
| }, | |
| { | |
| "epoch": 1.212471131639723, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2100 | |
| }, | |
| { | |
| "epoch": 1.226905311778291, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2125 | |
| }, | |
| { | |
| "epoch": 1.2413394919168592, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2150 | |
| }, | |
| { | |
| "epoch": 1.2557736720554273, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2175 | |
| }, | |
| { | |
| "epoch": 1.2702078521939955, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 1.2846420323325636, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2225 | |
| }, | |
| { | |
| "epoch": 1.2990762124711317, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 1.3135103926096998, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2275 | |
| }, | |
| { | |
| "epoch": 1.327944572748268, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2300 | |
| }, | |
| { | |
| "epoch": 1.342378752886836, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2325 | |
| }, | |
| { | |
| "epoch": 1.3568129330254042, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2350 | |
| }, | |
| { | |
| "epoch": 1.3712471131639723, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2375 | |
| }, | |
| { | |
| "epoch": 1.3856812933025404, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 1.4001154734411085, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2425 | |
| }, | |
| { | |
| "epoch": 1.4145496535796767, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2450 | |
| }, | |
| { | |
| "epoch": 1.4289838337182448, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2475 | |
| }, | |
| { | |
| "epoch": 1.443418013856813, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.457852193995381, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2525 | |
| }, | |
| { | |
| "epoch": 1.4722863741339491, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2550 | |
| }, | |
| { | |
| "epoch": 1.4867205542725173, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2575 | |
| }, | |
| { | |
| "epoch": 1.5011547344110854, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 1.5155889145496535, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2625 | |
| }, | |
| { | |
| "epoch": 1.5300230946882216, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2650 | |
| }, | |
| { | |
| "epoch": 1.5444572748267897, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2675 | |
| }, | |
| { | |
| "epoch": 1.5588914549653579, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2700 | |
| }, | |
| { | |
| "epoch": 1.573325635103926, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2725 | |
| }, | |
| { | |
| "epoch": 1.587759815242494, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 1.6021939953810622, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2775 | |
| }, | |
| { | |
| "epoch": 1.6166281755196303, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 1.6310623556581985, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2825 | |
| }, | |
| { | |
| "epoch": 1.6454965357967666, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2850 | |
| }, | |
| { | |
| "epoch": 1.659930715935335, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2875 | |
| }, | |
| { | |
| "epoch": 1.674364896073903, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2900 | |
| }, | |
| { | |
| "epoch": 1.6887990762124712, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2925 | |
| }, | |
| { | |
| "epoch": 1.7032332563510393, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2950 | |
| }, | |
| { | |
| "epoch": 1.7176674364896074, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 2975 | |
| }, | |
| { | |
| "epoch": 1.7321016166281755, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.7321016166281755, | |
| "eval_bleu": 0.0, | |
| "eval_loss": NaN, | |
| "eval_rouge1": 0.0, | |
| "eval_rouge2": 0.0, | |
| "eval_rougeL": 0.0, | |
| "eval_runtime": 4354.8586, | |
| "eval_samples_per_second": 0.679, | |
| "eval_sari": 29.13427959896359, | |
| "eval_steps_per_second": 0.17, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.7465357967667436, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3025 | |
| }, | |
| { | |
| "epoch": 1.7609699769053118, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3050 | |
| }, | |
| { | |
| "epoch": 1.7754041570438799, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3075 | |
| }, | |
| { | |
| "epoch": 1.789838337182448, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3100 | |
| }, | |
| { | |
| "epoch": 1.8042725173210161, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3125 | |
| }, | |
| { | |
| "epoch": 1.8187066974595842, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3150 | |
| }, | |
| { | |
| "epoch": 1.8331408775981526, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3175 | |
| }, | |
| { | |
| "epoch": 1.8475750577367207, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 1.8620092378752888, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3225 | |
| }, | |
| { | |
| "epoch": 1.876443418013857, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 1.890877598152425, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3275 | |
| }, | |
| { | |
| "epoch": 1.9053117782909932, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3300 | |
| }, | |
| { | |
| "epoch": 1.9197459584295613, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3325 | |
| }, | |
| { | |
| "epoch": 1.9341801385681294, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3350 | |
| }, | |
| { | |
| "epoch": 1.9486143187066975, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3375 | |
| }, | |
| { | |
| "epoch": 1.9630484988452657, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 1.9774826789838338, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3425 | |
| }, | |
| { | |
| "epoch": 1.991916859122402, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3450 | |
| }, | |
| { | |
| "epoch": 2.00635103926097, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3475 | |
| }, | |
| { | |
| "epoch": 2.020785219399538, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.0352193995381063, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3525 | |
| }, | |
| { | |
| "epoch": 2.0496535796766744, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3550 | |
| }, | |
| { | |
| "epoch": 2.0640877598152425, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3575 | |
| }, | |
| { | |
| "epoch": 2.0785219399538106, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 2.0929561200923787, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3625 | |
| }, | |
| { | |
| "epoch": 2.107390300230947, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3650 | |
| }, | |
| { | |
| "epoch": 2.121824480369515, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3675 | |
| }, | |
| { | |
| "epoch": 2.136258660508083, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3700 | |
| }, | |
| { | |
| "epoch": 2.1506928406466512, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3725 | |
| }, | |
| { | |
| "epoch": 2.1651270207852193, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 2.1795612009237875, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3775 | |
| }, | |
| { | |
| "epoch": 2.1939953810623556, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 2.2084295612009237, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3825 | |
| }, | |
| { | |
| "epoch": 2.222863741339492, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3850 | |
| }, | |
| { | |
| "epoch": 2.23729792147806, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3875 | |
| }, | |
| { | |
| "epoch": 2.251732101616628, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3900 | |
| }, | |
| { | |
| "epoch": 2.266166281755196, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3925 | |
| }, | |
| { | |
| "epoch": 2.2806004618937643, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3950 | |
| }, | |
| { | |
| "epoch": 2.2950346420323324, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 3975 | |
| }, | |
| { | |
| "epoch": 2.3094688221709005, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.3094688221709005, | |
| "eval_bleu": 0.0, | |
| "eval_loss": NaN, | |
| "eval_rouge1": 0.0, | |
| "eval_rouge2": 0.0, | |
| "eval_rougeL": 0.0, | |
| "eval_runtime": 4387.6137, | |
| "eval_samples_per_second": 0.674, | |
| "eval_sari": 29.13427959896359, | |
| "eval_steps_per_second": 0.169, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.3239030023094687, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4025 | |
| }, | |
| { | |
| "epoch": 2.338337182448037, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4050 | |
| }, | |
| { | |
| "epoch": 2.352771362586605, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4075 | |
| }, | |
| { | |
| "epoch": 2.367205542725173, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4100 | |
| }, | |
| { | |
| "epoch": 2.381639722863741, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4125 | |
| }, | |
| { | |
| "epoch": 2.3960739030023097, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4150 | |
| }, | |
| { | |
| "epoch": 2.4105080831408774, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4175 | |
| }, | |
| { | |
| "epoch": 2.424942263279446, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 2.4393764434180136, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4225 | |
| }, | |
| { | |
| "epoch": 2.453810623556582, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 2.46824480369515, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4275 | |
| }, | |
| { | |
| "epoch": 2.4826789838337184, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4300 | |
| }, | |
| { | |
| "epoch": 2.4971131639722866, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4325 | |
| }, | |
| { | |
| "epoch": 2.5115473441108547, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4350 | |
| }, | |
| { | |
| "epoch": 2.525981524249423, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4375 | |
| }, | |
| { | |
| "epoch": 2.540415704387991, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 2.554849884526559, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4425 | |
| }, | |
| { | |
| "epoch": 2.569284064665127, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4450 | |
| }, | |
| { | |
| "epoch": 2.5837182448036953, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4475 | |
| }, | |
| { | |
| "epoch": 2.5981524249422634, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.6125866050808315, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4525 | |
| }, | |
| { | |
| "epoch": 2.6270207852193996, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4550 | |
| }, | |
| { | |
| "epoch": 2.6414549653579678, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4575 | |
| }, | |
| { | |
| "epoch": 2.655889145496536, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 2.670323325635104, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4625 | |
| }, | |
| { | |
| "epoch": 2.684757505773672, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4650 | |
| }, | |
| { | |
| "epoch": 2.6991916859122402, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4675 | |
| }, | |
| { | |
| "epoch": 2.7136258660508084, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4700 | |
| }, | |
| { | |
| "epoch": 2.7280600461893765, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4725 | |
| }, | |
| { | |
| "epoch": 2.7424942263279446, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 2.7569284064665127, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4775 | |
| }, | |
| { | |
| "epoch": 2.771362586605081, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 2.785796766743649, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4825 | |
| }, | |
| { | |
| "epoch": 2.800230946882217, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4850 | |
| }, | |
| { | |
| "epoch": 2.814665127020785, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4875 | |
| }, | |
| { | |
| "epoch": 2.8290993071593533, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4900 | |
| }, | |
| { | |
| "epoch": 2.8435334872979214, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4925 | |
| }, | |
| { | |
| "epoch": 2.8579676674364896, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4950 | |
| }, | |
| { | |
| "epoch": 2.8724018475750577, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 4975 | |
| }, | |
| { | |
| "epoch": 2.886836027713626, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.886836027713626, | |
| "eval_bleu": 0.0, | |
| "eval_loss": NaN, | |
| "eval_rouge1": 0.0, | |
| "eval_rouge2": 0.0, | |
| "eval_rougeL": 0.0, | |
| "eval_runtime": 4379.0209, | |
| "eval_samples_per_second": 0.676, | |
| "eval_sari": 29.13427959896359, | |
| "eval_steps_per_second": 0.169, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.901270207852194, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5025 | |
| }, | |
| { | |
| "epoch": 2.915704387990762, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5050 | |
| }, | |
| { | |
| "epoch": 2.93013856812933, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5075 | |
| }, | |
| { | |
| "epoch": 2.9445727482678983, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5100 | |
| }, | |
| { | |
| "epoch": 2.9590069284064664, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5125 | |
| }, | |
| { | |
| "epoch": 2.9734411085450345, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5150 | |
| }, | |
| { | |
| "epoch": 2.9878752886836026, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5175 | |
| }, | |
| { | |
| "epoch": 3.0023094688221708, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 3.016743648960739, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5225 | |
| }, | |
| { | |
| "epoch": 3.031177829099307, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 3.045612009237875, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5275 | |
| }, | |
| { | |
| "epoch": 3.0600461893764432, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5300 | |
| }, | |
| { | |
| "epoch": 3.0744803695150114, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5325 | |
| }, | |
| { | |
| "epoch": 3.0889145496535795, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5350 | |
| }, | |
| { | |
| "epoch": 3.1033487297921476, | |
| "grad_norm": NaN, | |
| "learning_rate": 7.817175650748105e-06, | |
| "loss": 0.0, | |
| "step": 5375 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 5379, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 4, | |
| "save_steps": 2000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9345650817761280.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |