| { | |
| "best_metric": 3.1981074810028076, | |
| "best_model_checkpoint": "checkpoints/it5-base/checkpoint-27001", | |
| "epoch": 9.751173708920188, | |
| "eval_steps": 2077, | |
| "global_step": 27001, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.14987360057782592, | |
| "eval_g2l_cer": 100.4364, | |
| "eval_g2l_gen_len": 5.3498, | |
| "eval_g2l_rouge1": 23.9754, | |
| "eval_g2l_rouge2": 12.4646, | |
| "eval_g2l_rougeL": 23.7793, | |
| "eval_g2l_rougeLsum": 23.7651, | |
| "eval_l2ex_cer": 108.3181, | |
| "eval_l2ex_gen_len": 40.6639, | |
| "eval_l2ex_rouge1": 25.3285, | |
| "eval_l2ex_rouge2": 11.5065, | |
| "eval_l2ex_rougeL": 22.7839, | |
| "eval_l2ex_rougeLsum": 22.7439, | |
| "eval_l2g_cer": 97.287, | |
| "eval_l2g_gen_len": 23.9843, | |
| "eval_l2g_rouge1": 25.9551, | |
| "eval_l2g_rouge2": 11.8024, | |
| "eval_l2g_rougeL": 24.1441, | |
| "eval_l2g_rougeLsum": 24.1774, | |
| "eval_loss": 3.9933629035949707, | |
| "eval_runtime": 160.4997, | |
| "eval_samples_per_second": 61.813, | |
| "eval_steps_per_second": 0.972, | |
| "step": 415 | |
| }, | |
| { | |
| "epoch": 0.15023474178403756, | |
| "grad_norm": 14.013622283935547, | |
| "learning_rate": 0.00014026974951830443, | |
| "loss": 5.5983, | |
| "step": 416 | |
| }, | |
| { | |
| "epoch": 0.3004694835680751, | |
| "grad_norm": 47.43400192260742, | |
| "learning_rate": 0.00028053949903660886, | |
| "loss": 4.0514, | |
| "step": 832 | |
| }, | |
| { | |
| "epoch": 0.4507042253521127, | |
| "grad_norm": 87.22700500488281, | |
| "learning_rate": 0.0004208092485549133, | |
| "loss": 3.8797, | |
| "step": 1248 | |
| }, | |
| { | |
| "epoch": 0.6009389671361502, | |
| "grad_norm": 117.74351501464844, | |
| "learning_rate": 0.0005610789980732177, | |
| "loss": 3.7982, | |
| "step": 1664 | |
| }, | |
| { | |
| "epoch": 0.7500902853015529, | |
| "eval_g2l_cer": 52.9873, | |
| "eval_g2l_gen_len": 3.4046, | |
| "eval_g2l_rouge1": 36.7676, | |
| "eval_g2l_rouge2": 27.4065, | |
| "eval_g2l_rougeL": 36.653, | |
| "eval_g2l_rougeLsum": 36.6757, | |
| "eval_l2ex_cer": 82.3042, | |
| "eval_l2ex_gen_len": 18.221, | |
| "eval_l2ex_rouge1": 29.9987, | |
| "eval_l2ex_rouge2": 15.0081, | |
| "eval_l2ex_rougeL": 27.336, | |
| "eval_l2ex_rougeLsum": 27.3153, | |
| "eval_l2g_cer": 72.0808, | |
| "eval_l2g_gen_len": 10.2902, | |
| "eval_l2g_rouge1": 32.6068, | |
| "eval_l2g_rouge2": 20.974, | |
| "eval_l2g_rougeL": 31.1844, | |
| "eval_l2g_rougeLsum": 31.2287, | |
| "eval_loss": 3.5869500637054443, | |
| "eval_runtime": 137.2692, | |
| "eval_samples_per_second": 72.274, | |
| "eval_steps_per_second": 1.136, | |
| "step": 2077 | |
| }, | |
| { | |
| "epoch": 0.7511737089201878, | |
| "grad_norm": 132.14553833007812, | |
| "learning_rate": 0.0006999999822586204, | |
| "loss": 3.7522, | |
| "step": 2080 | |
| }, | |
| { | |
| "epoch": 0.9014084507042254, | |
| "grad_norm": 163.82469177246094, | |
| "learning_rate": 0.0006998044195197278, | |
| "loss": 3.7004, | |
| "step": 2496 | |
| }, | |
| { | |
| "epoch": 1.051643192488263, | |
| "grad_norm": 134.45631408691406, | |
| "learning_rate": 0.0006992253249390411, | |
| "loss": 3.6596, | |
| "step": 2912 | |
| }, | |
| { | |
| "epoch": 1.2018779342723005, | |
| "grad_norm": 110.51844787597656, | |
| "learning_rate": 0.0006982633339003477, | |
| "loss": 3.5916, | |
| "step": 3328 | |
| }, | |
| { | |
| "epoch": 1.352112676056338, | |
| "grad_norm": 119.85750579833984, | |
| "learning_rate": 0.000696919501902232, | |
| "loss": 3.5667, | |
| "step": 3744 | |
| }, | |
| { | |
| "epoch": 1.500180570603106, | |
| "eval_g2l_cer": 53.0855, | |
| "eval_g2l_gen_len": 4.9383, | |
| "eval_g2l_rouge1": 41.8458, | |
| "eval_g2l_rouge2": 30.8627, | |
| "eval_g2l_rougeL": 41.5679, | |
| "eval_g2l_rougeLsum": 41.5758, | |
| "eval_l2ex_cer": 85.4973, | |
| "eval_l2ex_gen_len": 30.0565, | |
| "eval_l2ex_rouge1": 30.6637, | |
| "eval_l2ex_rouge2": 14.7084, | |
| "eval_l2ex_rougeL": 27.5063, | |
| "eval_l2ex_rougeLsum": 27.5683, | |
| "eval_l2g_cer": 79.9636, | |
| "eval_l2g_gen_len": 23.344, | |
| "eval_l2g_rouge1": 38.4413, | |
| "eval_l2g_rouge2": 25.3548, | |
| "eval_l2g_rougeL": 36.2653, | |
| "eval_l2g_rougeLsum": 36.3346, | |
| "eval_loss": 3.4180662631988525, | |
| "eval_runtime": 157.8624, | |
| "eval_samples_per_second": 62.846, | |
| "eval_steps_per_second": 0.988, | |
| "step": 4154 | |
| }, | |
| { | |
| "epoch": 1.5023474178403755, | |
| "grad_norm": 130.5960235595703, | |
| "learning_rate": 0.000695195303399979, | |
| "loss": 3.5481, | |
| "step": 4160 | |
| }, | |
| { | |
| "epoch": 1.652582159624413, | |
| "grad_norm": 128.95892333984375, | |
| "learning_rate": 0.0006930926301877994, | |
| "loss": 3.5237, | |
| "step": 4576 | |
| }, | |
| { | |
| "epoch": 1.8028169014084507, | |
| "grad_norm": 129.52719116210938, | |
| "learning_rate": 0.000690613789323149, | |
| "loss": 3.5171, | |
| "step": 4992 | |
| }, | |
| { | |
| "epoch": 1.9530516431924883, | |
| "grad_norm": 122.73914337158203, | |
| "learning_rate": 0.0006877615005954206, | |
| "loss": 3.5051, | |
| "step": 5408 | |
| }, | |
| { | |
| "epoch": 2.103286384976526, | |
| "grad_norm": 100.69551849365234, | |
| "learning_rate": 0.0006845388935417848, | |
| "loss": 3.4086, | |
| "step": 5824 | |
| }, | |
| { | |
| "epoch": 2.2502708559046587, | |
| "eval_g2l_cer": 50.7779, | |
| "eval_g2l_gen_len": 4.8103, | |
| "eval_g2l_rouge1": 42.8195, | |
| "eval_g2l_rouge2": 31.8093, | |
| "eval_g2l_rougeL": 42.5575, | |
| "eval_g2l_rougeLsum": 42.5624, | |
| "eval_l2ex_cer": 83.3686, | |
| "eval_l2ex_gen_len": 29.3011, | |
| "eval_l2ex_rouge1": 31.5473, | |
| "eval_l2ex_rouge2": 15.1028, | |
| "eval_l2ex_rougeL": 28.0691, | |
| "eval_l2ex_rougeLsum": 28.1825, | |
| "eval_l2g_cer": 77.4412, | |
| "eval_l2g_gen_len": 21.7602, | |
| "eval_l2g_rouge1": 39.093, | |
| "eval_l2g_rouge2": 25.9346, | |
| "eval_l2g_rougeL": 37.0244, | |
| "eval_l2g_rougeLsum": 37.0876, | |
| "eval_loss": 3.3524162769317627, | |
| "eval_runtime": 157.4639, | |
| "eval_samples_per_second": 63.005, | |
| "eval_steps_per_second": 0.991, | |
| "step": 6231 | |
| }, | |
| { | |
| "epoch": 2.2535211267605635, | |
| "grad_norm": 101.63910675048828, | |
| "learning_rate": 0.0006809495040134555, | |
| "loss": 3.3874, | |
| "step": 6240 | |
| }, | |
| { | |
| "epoch": 2.403755868544601, | |
| "grad_norm": 99.6649398803711, | |
| "learning_rate": 0.000676997270296147, | |
| "loss": 3.3843, | |
| "step": 6656 | |
| }, | |
| { | |
| "epoch": 2.5539906103286385, | |
| "grad_norm": 102.08024597167969, | |
| "learning_rate": 0.0006726865287889783, | |
| "loss": 3.379, | |
| "step": 7072 | |
| }, | |
| { | |
| "epoch": 2.704225352112676, | |
| "grad_norm": 93.55581665039062, | |
| "learning_rate": 0.0006680220092465666, | |
| "loss": 3.3681, | |
| "step": 7488 | |
| }, | |
| { | |
| "epoch": 2.8544600938967135, | |
| "grad_norm": 95.19955444335938, | |
| "learning_rate": 0.0006630088295895313, | |
| "loss": 3.3544, | |
| "step": 7904 | |
| }, | |
| { | |
| "epoch": 3.0003611412062114, | |
| "eval_g2l_cer": 47.3806, | |
| "eval_g2l_gen_len": 3.8524, | |
| "eval_g2l_rouge1": 44.4831, | |
| "eval_g2l_rouge2": 34.1168, | |
| "eval_g2l_rougeL": 44.3015, | |
| "eval_g2l_rougeLsum": 44.3284, | |
| "eval_l2ex_cer": 85.008, | |
| "eval_l2ex_gen_len": 20.6814, | |
| "eval_l2ex_rouge1": 31.6888, | |
| "eval_l2ex_rouge2": 16.0577, | |
| "eval_l2ex_rougeL": 28.2507, | |
| "eval_l2ex_rougeLsum": 28.3238, | |
| "eval_l2g_cer": 75.025, | |
| "eval_l2g_gen_len": 15.1432, | |
| "eval_l2g_rouge1": 38.7981, | |
| "eval_l2g_rouge2": 26.3158, | |
| "eval_l2g_rougeL": 36.9181, | |
| "eval_l2g_rougeLsum": 36.9696, | |
| "eval_loss": 3.283109188079834, | |
| "eval_runtime": 151.9761, | |
| "eval_samples_per_second": 65.28, | |
| "eval_steps_per_second": 1.026, | |
| "step": 8308 | |
| }, | |
| { | |
| "epoch": 3.004694835680751, | |
| "grad_norm": 93.19324493408203, | |
| "learning_rate": 0.0006576524902890998, | |
| "loss": 3.3457, | |
| "step": 8320 | |
| }, | |
| { | |
| "epoch": 3.1549295774647885, | |
| "grad_norm": 104.09616088867188, | |
| "learning_rate": 0.0006519588683319802, | |
| "loss": 3.2525, | |
| "step": 8736 | |
| }, | |
| { | |
| "epoch": 3.3051643192488265, | |
| "grad_norm": 104.19088745117188, | |
| "learning_rate": 0.0006459342107721181, | |
| "loss": 3.2478, | |
| "step": 9152 | |
| }, | |
| { | |
| "epoch": 3.455399061032864, | |
| "grad_norm": 83.33271789550781, | |
| "learning_rate": 0.0006395851278764164, | |
| "loss": 3.2538, | |
| "step": 9568 | |
| }, | |
| { | |
| "epoch": 3.6056338028169015, | |
| "grad_norm": 97.30130767822266, | |
| "learning_rate": 0.0006329185858719365, | |
| "loss": 3.2493, | |
| "step": 9984 | |
| }, | |
| { | |
| "epoch": 3.7504514265077646, | |
| "eval_g2l_cer": 48.347, | |
| "eval_g2l_gen_len": 4.6986, | |
| "eval_g2l_rouge1": 45.126, | |
| "eval_g2l_rouge2": 34.0899, | |
| "eval_g2l_rougeL": 44.8883, | |
| "eval_g2l_rougeLsum": 44.8974, | |
| "eval_l2ex_cer": 83.8663, | |
| "eval_l2ex_gen_len": 27.5262, | |
| "eval_l2ex_rouge1": 32.7987, | |
| "eval_l2ex_rouge2": 15.8604, | |
| "eval_l2ex_rougeL": 28.7942, | |
| "eval_l2ex_rougeLsum": 28.8651, | |
| "eval_l2g_cer": 76.6028, | |
| "eval_l2g_gen_len": 21.6109, | |
| "eval_l2g_rouge1": 39.8458, | |
| "eval_l2g_rouge2": 26.7789, | |
| "eval_l2g_rougeL": 37.6794, | |
| "eval_l2g_rougeLsum": 37.7404, | |
| "eval_loss": 3.271592855453491, | |
| "eval_runtime": 157.264, | |
| "eval_samples_per_second": 63.085, | |
| "eval_steps_per_second": 0.992, | |
| "step": 10385 | |
| }, | |
| { | |
| "epoch": 3.755868544600939, | |
| "grad_norm": 87.18195343017578, | |
| "learning_rate": 0.0006259418993025392, | |
| "loss": 3.2387, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 3.9061032863849765, | |
| "grad_norm": 77.52426147460938, | |
| "learning_rate": 0.0006186627230033526, | |
| "loss": 3.2361, | |
| "step": 10816 | |
| }, | |
| { | |
| "epoch": 4.056338028169014, | |
| "grad_norm": 101.6712646484375, | |
| "learning_rate": 0.0006110890437018696, | |
| "loss": 3.1968, | |
| "step": 11232 | |
| }, | |
| { | |
| "epoch": 4.206572769953052, | |
| "grad_norm": 99.43510437011719, | |
| "learning_rate": 0.0006032291712548952, | |
| "loss": 3.1304, | |
| "step": 11648 | |
| }, | |
| { | |
| "epoch": 4.356807511737089, | |
| "grad_norm": 57.04045867919922, | |
| "learning_rate": 0.0005950917295309531, | |
| "loss": 3.1457, | |
| "step": 12064 | |
| }, | |
| { | |
| "epoch": 4.500541711809317, | |
| "eval_g2l_cer": 47.8444, | |
| "eval_g2l_gen_len": 4.6609, | |
| "eval_g2l_rouge1": 45.1043, | |
| "eval_g2l_rouge2": 34.1441, | |
| "eval_g2l_rougeL": 44.9302, | |
| "eval_g2l_rougeLsum": 44.9463, | |
| "eval_l2ex_cer": 82.1447, | |
| "eval_l2ex_gen_len": 26.408, | |
| "eval_l2ex_rouge1": 33.2278, | |
| "eval_l2ex_rouge2": 16.1616, | |
| "eval_l2ex_rougeL": 29.2879, | |
| "eval_l2ex_rougeLsum": 29.3229, | |
| "eval_l2g_cer": 76.1324, | |
| "eval_l2g_gen_len": 20.8059, | |
| "eval_l2g_rouge1": 40.2785, | |
| "eval_l2g_rouge2": 27.06, | |
| "eval_l2g_rougeL": 38.0678, | |
| "eval_l2g_rougeLsum": 38.1592, | |
| "eval_loss": 3.266465902328491, | |
| "eval_runtime": 157.9782, | |
| "eval_samples_per_second": 62.8, | |
| "eval_steps_per_second": 0.987, | |
| "step": 12462 | |
| }, | |
| { | |
| "epoch": 4.507042253521127, | |
| "grad_norm": 67.55484771728516, | |
| "learning_rate": 0.0005866856469481595, | |
| "loss": 3.145, | |
| "step": 12480 | |
| }, | |
| { | |
| "epoch": 4.657276995305164, | |
| "grad_norm": 76.89288330078125, | |
| "learning_rate": 0.0005780201466779439, | |
| "loss": 3.1236, | |
| "step": 12896 | |
| }, | |
| { | |
| "epoch": 4.807511737089202, | |
| "grad_norm": 87.32029724121094, | |
| "learning_rate": 0.0005691047365253672, | |
| "loss": 3.1392, | |
| "step": 13312 | |
| }, | |
| { | |
| "epoch": 4.957746478873239, | |
| "grad_norm": 109.48633575439453, | |
| "learning_rate": 0.0005599491984971357, | |
| "loss": 3.1438, | |
| "step": 13728 | |
| }, | |
| { | |
| "epoch": 5.107981220657277, | |
| "grad_norm": 66.15205383300781, | |
| "learning_rate": 0.0005505635780687648, | |
| "loss": 3.0644, | |
| "step": 14144 | |
| }, | |
| { | |
| "epoch": 5.250631997110871, | |
| "eval_g2l_cer": 46.2679, | |
| "eval_g2l_gen_len": 3.7853, | |
| "eval_g2l_rouge1": 45.7284, | |
| "eval_g2l_rouge2": 35.1094, | |
| "eval_g2l_rougeL": 45.5565, | |
| "eval_g2l_rougeLsum": 45.5772, | |
| "eval_l2ex_cer": 81.8173, | |
| "eval_l2ex_gen_len": 18.9137, | |
| "eval_l2ex_rouge1": 33.0179, | |
| "eval_l2ex_rouge2": 16.8664, | |
| "eval_l2ex_rougeL": 29.3623, | |
| "eval_l2ex_rougeLsum": 29.4514, | |
| "eval_l2g_cer": 70.134, | |
| "eval_l2g_gen_len": 14.2539, | |
| "eval_l2g_rouge1": 40.477, | |
| "eval_l2g_rouge2": 27.4563, | |
| "eval_l2g_rougeL": 38.398, | |
| "eval_l2g_rougeLsum": 38.4554, | |
| "eval_loss": 3.223316192626953, | |
| "eval_runtime": 147.2815, | |
| "eval_samples_per_second": 67.361, | |
| "eval_steps_per_second": 1.059, | |
| "step": 14539 | |
| }, | |
| { | |
| "epoch": 5.258215962441315, | |
| "grad_norm": 120.2764663696289, | |
| "learning_rate": 0.0005409581731626619, | |
| "loss": 3.0359, | |
| "step": 14560 | |
| }, | |
| { | |
| "epoch": 5.408450704225352, | |
| "grad_norm": 66.7710189819336, | |
| "learning_rate": 0.0005311435228492235, | |
| "loss": 3.0459, | |
| "step": 14976 | |
| }, | |
| { | |
| "epoch": 5.55868544600939, | |
| "grad_norm": 103.15011596679688, | |
| "learning_rate": 0.0005211303957833462, | |
| "loss": 3.0499, | |
| "step": 15392 | |
| }, | |
| { | |
| "epoch": 5.708920187793427, | |
| "grad_norm": 91.70545196533203, | |
| "learning_rate": 0.0005109297783890348, | |
| "loss": 3.0455, | |
| "step": 15808 | |
| }, | |
| { | |
| "epoch": 5.859154929577465, | |
| "grad_norm": 62.21952819824219, | |
| "learning_rate": 0.0005005528628050757, | |
| "loss": 3.0468, | |
| "step": 16224 | |
| }, | |
| { | |
| "epoch": 6.000722282412423, | |
| "eval_g2l_cer": 46.2085, | |
| "eval_g2l_gen_len": 4.1369, | |
| "eval_g2l_rouge1": 46.8001, | |
| "eval_g2l_rouge2": 35.9998, | |
| "eval_g2l_rougeL": 46.6066, | |
| "eval_g2l_rougeLsum": 46.6501, | |
| "eval_l2ex_cer": 79.8794, | |
| "eval_l2ex_gen_len": 18.741, | |
| "eval_l2ex_rouge1": 33.8832, | |
| "eval_l2ex_rouge2": 17.2827, | |
| "eval_l2ex_rougeL": 29.9003, | |
| "eval_l2ex_rougeLsum": 29.9671, | |
| "eval_l2g_cer": 70.1127, | |
| "eval_l2g_gen_len": 14.8234, | |
| "eval_l2g_rouge1": 40.9429, | |
| "eval_l2g_rouge2": 27.7443, | |
| "eval_l2g_rougeL": 38.7561, | |
| "eval_l2g_rougeLsum": 38.812, | |
| "eval_loss": 3.198634624481201, | |
| "eval_runtime": 147.3585, | |
| "eval_samples_per_second": 67.326, | |
| "eval_steps_per_second": 1.059, | |
| "step": 16616 | |
| }, | |
| { | |
| "epoch": 6.009389671361502, | |
| "grad_norm": 60.71475601196289, | |
| "learning_rate": 0.0004900110346049988, | |
| "loss": 3.0478, | |
| "step": 16640 | |
| }, | |
| { | |
| "epoch": 6.15962441314554, | |
| "grad_norm": 74.91094207763672, | |
| "learning_rate": 0.0004793158603048017, | |
| "loss": 2.9501, | |
| "step": 17056 | |
| }, | |
| { | |
| "epoch": 6.309859154929577, | |
| "grad_norm": 98.47882843017578, | |
| "learning_rate": 0.00046847907467214455, | |
| "loss": 2.9655, | |
| "step": 17472 | |
| }, | |
| { | |
| "epoch": 6.460093896713615, | |
| "grad_norm": 56.426727294921875, | |
| "learning_rate": 0.00045751256785093856, | |
| "loss": 2.9753, | |
| "step": 17888 | |
| }, | |
| { | |
| "epoch": 6.610328638497653, | |
| "grad_norm": 47.2915153503418, | |
| "learning_rate": 0.00044642837231545463, | |
| "loss": 2.9699, | |
| "step": 18304 | |
| }, | |
| { | |
| "epoch": 6.750812567713976, | |
| "eval_g2l_cer": 46.2359, | |
| "eval_g2l_gen_len": 4.4804, | |
| "eval_g2l_rouge1": 47.0405, | |
| "eval_g2l_rouge2": 35.9417, | |
| "eval_g2l_rougeL": 46.8609, | |
| "eval_g2l_rougeLsum": 46.8948, | |
| "eval_l2ex_cer": 81.7484, | |
| "eval_l2ex_gen_len": 23.0452, | |
| "eval_l2ex_rouge1": 33.3772, | |
| "eval_l2ex_rouge2": 16.3629, | |
| "eval_l2ex_rougeL": 29.2446, | |
| "eval_l2ex_rougeLsum": 29.3549, | |
| "eval_l2g_cer": 74.5769, | |
| "eval_l2g_gen_len": 19.6127, | |
| "eval_l2g_rouge1": 41.2652, | |
| "eval_l2g_rouge2": 27.8695, | |
| "eval_l2g_rougeL": 38.8879, | |
| "eval_l2g_rougeLsum": 38.9752, | |
| "eval_loss": 3.2092766761779785, | |
| "eval_runtime": 156.9963, | |
| "eval_samples_per_second": 63.193, | |
| "eval_steps_per_second": 0.994, | |
| "step": 18693 | |
| }, | |
| { | |
| "epoch": 6.76056338028169, | |
| "grad_norm": 40.62468338012695, | |
| "learning_rate": 0.0004352386496682677, | |
| "loss": 2.9756, | |
| "step": 18720 | |
| }, | |
| { | |
| "epoch": 6.910798122065728, | |
| "grad_norm": 44.248905181884766, | |
| "learning_rate": 0.00042395567729652095, | |
| "loss": 2.9631, | |
| "step": 19136 | |
| }, | |
| { | |
| "epoch": 7.061032863849765, | |
| "grad_norm": 71.54096984863281, | |
| "learning_rate": 0.00041259183490115084, | |
| "loss": 2.9346, | |
| "step": 19552 | |
| }, | |
| { | |
| "epoch": 7.211267605633803, | |
| "grad_norm": 63.520111083984375, | |
| "learning_rate": 0.0004011595909138531, | |
| "loss": 2.8878, | |
| "step": 19968 | |
| }, | |
| { | |
| "epoch": 7.36150234741784, | |
| "grad_norm": 52.090240478515625, | |
| "learning_rate": 0.0003896714888166938, | |
| "loss": 2.8877, | |
| "step": 20384 | |
| }, | |
| { | |
| "epoch": 7.500902853015529, | |
| "eval_g2l_cer": 45.7698, | |
| "eval_g2l_gen_len": 4.4545, | |
| "eval_g2l_rouge1": 47.583, | |
| "eval_g2l_rouge2": 36.2797, | |
| "eval_g2l_rougeL": 47.433, | |
| "eval_g2l_rougeLsum": 47.4196, | |
| "eval_l2ex_cer": 81.5805, | |
| "eval_l2ex_gen_len": 23.9979, | |
| "eval_l2ex_rouge1": 33.9624, | |
| "eval_l2ex_rouge2": 16.6357, | |
| "eval_l2ex_rougeL": 29.8228, | |
| "eval_l2ex_rougeLsum": 29.9123, | |
| "eval_l2g_cer": 73.7736, | |
| "eval_l2g_gen_len": 19.3481, | |
| "eval_l2g_rouge1": 41.2698, | |
| "eval_l2g_rouge2": 27.863, | |
| "eval_l2g_rougeL": 38.869, | |
| "eval_l2g_rougeLsum": 38.9562, | |
| "eval_loss": 3.214210033416748, | |
| "eval_runtime": 157.1314, | |
| "eval_samples_per_second": 63.138, | |
| "eval_steps_per_second": 0.993, | |
| "step": 20770 | |
| }, | |
| { | |
| "epoch": 7.511737089201878, | |
| "grad_norm": 44.08253479003906, | |
| "learning_rate": 0.0003781401333793737, | |
| "loss": 2.888, | |
| "step": 20800 | |
| }, | |
| { | |
| "epoch": 7.661971830985916, | |
| "grad_norm": 50.90373611450195, | |
| "learning_rate": 0.00036657817682924954, | |
| "loss": 2.9017, | |
| "step": 21216 | |
| }, | |
| { | |
| "epoch": 7.812206572769953, | |
| "grad_norm": 70.20822143554688, | |
| "learning_rate": 0.0003549983049692832, | |
| "loss": 2.8951, | |
| "step": 21632 | |
| }, | |
| { | |
| "epoch": 7.962441314553991, | |
| "grad_norm": 24.146835327148438, | |
| "learning_rate": 0.00034341322325915155, | |
| "loss": 2.9011, | |
| "step": 22048 | |
| }, | |
| { | |
| "epoch": 8.112676056338028, | |
| "grad_norm": 43.61560821533203, | |
| "learning_rate": 0.0003318356428747905, | |
| "loss": 2.8542, | |
| "step": 22464 | |
| }, | |
| { | |
| "epoch": 8.250993138317082, | |
| "eval_g2l_cer": 45.6053, | |
| "eval_g2l_gen_len": 3.9989, | |
| "eval_g2l_rouge1": 47.3053, | |
| "eval_g2l_rouge2": 36.3191, | |
| "eval_g2l_rougeL": 47.2008, | |
| "eval_g2l_rougeLsum": 47.2596, | |
| "eval_l2ex_cer": 81.0598, | |
| "eval_l2ex_gen_len": 20.2055, | |
| "eval_l2ex_rouge1": 33.9028, | |
| "eval_l2ex_rouge2": 16.9085, | |
| "eval_l2ex_rougeL": 29.7011, | |
| "eval_l2ex_rougeLsum": 29.7791, | |
| "eval_l2g_cer": 71.3827, | |
| "eval_l2g_gen_len": 15.4007, | |
| "eval_l2g_rouge1": 41.2614, | |
| "eval_l2g_rouge2": 28.0203, | |
| "eval_l2g_rougeL": 39.0253, | |
| "eval_l2g_rougeLsum": 39.0954, | |
| "eval_loss": 3.200995922088623, | |
| "eval_runtime": 148.5568, | |
| "eval_samples_per_second": 66.783, | |
| "eval_steps_per_second": 1.05, | |
| "step": 22847 | |
| }, | |
| { | |
| "epoch": 8.262910798122066, | |
| "grad_norm": 34.18248748779297, | |
| "learning_rate": 0.0003202782667616653, | |
| "loss": 2.8359, | |
| "step": 22880 | |
| }, | |
| { | |
| "epoch": 8.413145539906104, | |
| "grad_norm": 46.180694580078125, | |
| "learning_rate": 0.00030875377569707275, | |
| "loss": 2.8378, | |
| "step": 23296 | |
| }, | |
| { | |
| "epoch": 8.56338028169014, | |
| "grad_norm": 42.6771354675293, | |
| "learning_rate": 0.000297274814376765, | |
| "loss": 2.8327, | |
| "step": 23712 | |
| }, | |
| { | |
| "epoch": 8.713615023474178, | |
| "grad_norm": 19.82225227355957, | |
| "learning_rate": 0.0002858539775411638, | |
| "loss": 2.843, | |
| "step": 24128 | |
| }, | |
| { | |
| "epoch": 8.863849765258216, | |
| "grad_norm": 19.00598907470703, | |
| "learning_rate": 0.00027450379615638425, | |
| "loss": 2.8231, | |
| "step": 24544 | |
| }, | |
| { | |
| "epoch": 9.001083423618635, | |
| "eval_g2l_cer": 45.4637, | |
| "eval_g2l_gen_len": 4.0383, | |
| "eval_g2l_rouge1": 47.3272, | |
| "eval_g2l_rouge2": 36.2943, | |
| "eval_g2l_rougeL": 47.1452, | |
| "eval_g2l_rougeLsum": 47.2213, | |
| "eval_l2ex_cer": 80.295, | |
| "eval_l2ex_gen_len": 19.4255, | |
| "eval_l2ex_rouge1": 34.2944, | |
| "eval_l2ex_rouge2": 17.3206, | |
| "eval_l2ex_rougeL": 30.0912, | |
| "eval_l2ex_rougeLsum": 30.1909, | |
| "eval_l2g_cer": 69.8778, | |
| "eval_l2g_gen_len": 14.8225, | |
| "eval_l2g_rouge1": 41.2308, | |
| "eval_l2g_rouge2": 27.9775, | |
| "eval_l2g_rougeL": 39.0866, | |
| "eval_l2g_rougeLsum": 39.1425, | |
| "eval_loss": 3.2024292945861816, | |
| "eval_runtime": 146.3445, | |
| "eval_samples_per_second": 67.792, | |
| "eval_steps_per_second": 1.066, | |
| "step": 24924 | |
| }, | |
| { | |
| "epoch": 9.014084507042254, | |
| "grad_norm": 22.52915382385254, | |
| "learning_rate": 0.00026323672366523274, | |
| "loss": 2.8413, | |
| "step": 24960 | |
| }, | |
| { | |
| "epoch": 9.164319248826292, | |
| "grad_norm": 30.310894012451172, | |
| "learning_rate": 0.0002520651223232633, | |
| "loss": 2.7777, | |
| "step": 25376 | |
| }, | |
| { | |
| "epoch": 9.314553990610328, | |
| "grad_norm": 31.759746551513672, | |
| "learning_rate": 0.000241001249634885, | |
| "loss": 2.7963, | |
| "step": 25792 | |
| }, | |
| { | |
| "epoch": 9.464788732394366, | |
| "grad_norm": 13.962241172790527, | |
| "learning_rate": 0.00023005724490440243, | |
| "loss": 2.7968, | |
| "step": 26208 | |
| }, | |
| { | |
| "epoch": 9.615023474178404, | |
| "grad_norm": 20.10390853881836, | |
| "learning_rate": 0.0002192451159167455, | |
| "loss": 2.7902, | |
| "step": 26624 | |
| }, | |
| { | |
| "epoch": 9.751173708920188, | |
| "eval_g2l_cer": 45.3083, | |
| "eval_g2l_gen_len": 4.3145, | |
| "eval_g2l_rouge1": 47.8559, | |
| "eval_g2l_rouge2": 36.8346, | |
| "eval_g2l_rougeL": 47.6856, | |
| "eval_g2l_rougeLsum": 47.712, | |
| "eval_l2ex_cer": 80.353, | |
| "eval_l2ex_gen_len": 21.4861, | |
| "eval_l2ex_rouge1": 34.306, | |
| "eval_l2ex_rouge2": 16.8625, | |
| "eval_l2ex_rougeL": 29.7995, | |
| "eval_l2ex_rougeLsum": 29.9518, | |
| "eval_l2g_cer": 71.8262, | |
| "eval_l2g_gen_len": 17.528, | |
| "eval_l2g_rouge1": 41.8619, | |
| "eval_l2g_rouge2": 28.3549, | |
| "eval_l2g_rougeL": 39.5168, | |
| "eval_l2g_rougeLsum": 39.5953, | |
| "eval_loss": 3.1981074810028076, | |
| "eval_runtime": 153.7484, | |
| "eval_samples_per_second": 64.527, | |
| "eval_steps_per_second": 1.015, | |
| "step": 27001 | |
| } | |
| ], | |
| "logging_steps": 416, | |
| "max_steps": 41535, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 15, | |
| "save_steps": 2077, | |
| "total_flos": 2.9580064377012224e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |