| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0269105579241737, | |
| "eval_steps": 200, | |
| "global_step": 11200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "eval_bertscore": 0.7401605248451233, | |
| "eval_loss": 1.9530484676361084, | |
| "eval_rouge1": 0.6562857460474375, | |
| "eval_rouge2": 0.3640670727106235, | |
| "eval_rougeL": 0.5655212336424695, | |
| "eval_rougeLsum": 0.6414840198810386, | |
| "eval_runtime": 21.7196, | |
| "eval_samples_per_second": 1.381, | |
| "eval_steps_per_second": 0.691, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "grad_norm": 0.25105270743370056, | |
| "learning_rate": 0.00019771674842969145, | |
| "loss": 1.7353, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "eval_bertscore": 0.7432050108909607, | |
| "eval_loss": 1.9583823680877686, | |
| "eval_rouge1": 0.6554226269617707, | |
| "eval_rouge2": 0.36661086995296877, | |
| "eval_rougeL": 0.5637448790342183, | |
| "eval_rougeLsum": 0.6419796784912521, | |
| "eval_runtime": 21.9623, | |
| "eval_samples_per_second": 1.366, | |
| "eval_steps_per_second": 0.683, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "grad_norm": 0.26550447940826416, | |
| "learning_rate": 0.00019542432717436156, | |
| "loss": 1.7786, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "eval_bertscore": 0.7469045519828796, | |
| "eval_loss": 1.9245686531066895, | |
| "eval_rouge1": 0.6662431635890791, | |
| "eval_rouge2": 0.3735263724826765, | |
| "eval_rougeL": 0.5755071616151013, | |
| "eval_rougeLsum": 0.6538383087686117, | |
| "eval_runtime": 21.5302, | |
| "eval_samples_per_second": 1.393, | |
| "eval_steps_per_second": 0.697, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "grad_norm": 0.1538015753030777, | |
| "learning_rate": 0.0001931319059190317, | |
| "loss": 1.8851, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "eval_bertscore": 0.7442477941513062, | |
| "eval_loss": 1.9187489748001099, | |
| "eval_rouge1": 0.6606221897489035, | |
| "eval_rouge2": 0.368654563659435, | |
| "eval_rougeL": 0.5731546210408094, | |
| "eval_rougeLsum": 0.6470590823125606, | |
| "eval_runtime": 21.9831, | |
| "eval_samples_per_second": 1.365, | |
| "eval_steps_per_second": 0.682, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "grad_norm": 0.1681252270936966, | |
| "learning_rate": 0.0001908394846637018, | |
| "loss": 1.8919, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "eval_bertscore": 0.7458053231239319, | |
| "eval_loss": 1.9159075021743774, | |
| "eval_rouge1": 0.6621259186456026, | |
| "eval_rouge2": 0.372024043683234, | |
| "eval_rougeL": 0.5743354509339939, | |
| "eval_rougeLsum": 0.6491550893780276, | |
| "eval_runtime": 21.7159, | |
| "eval_samples_per_second": 1.381, | |
| "eval_steps_per_second": 0.691, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "eval_bertscore": 0.7468854784965515, | |
| "eval_loss": 1.9140182733535767, | |
| "eval_rouge1": 0.6626581781149132, | |
| "eval_rouge2": 0.37318557504782157, | |
| "eval_rougeL": 0.5759264203594217, | |
| "eval_rougeLsum": 0.6490702446275723, | |
| "eval_runtime": 21.6486, | |
| "eval_samples_per_second": 1.386, | |
| "eval_steps_per_second": 0.693, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "grad_norm": 0.1552441120147705, | |
| "learning_rate": 0.00018854706340837193, | |
| "loss": 1.9052, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "eval_bertscore": 0.7475314736366272, | |
| "eval_loss": 1.913794755935669, | |
| "eval_rouge1": 0.6648687174353192, | |
| "eval_rouge2": 0.3760379232448734, | |
| "eval_rougeL": 0.5784915488164926, | |
| "eval_rougeLsum": 0.6513864520108938, | |
| "eval_runtime": 21.664, | |
| "eval_samples_per_second": 1.385, | |
| "eval_steps_per_second": 0.692, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "grad_norm": 0.14638397097587585, | |
| "learning_rate": 0.00018625464215304204, | |
| "loss": 1.8843, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "eval_bertscore": 0.747238039970398, | |
| "eval_loss": 1.9117029905319214, | |
| "eval_rouge1": 0.6638085237198453, | |
| "eval_rouge2": 0.3742779818055127, | |
| "eval_rougeL": 0.5754209460423059, | |
| "eval_rougeLsum": 0.6506476155592722, | |
| "eval_runtime": 21.9308, | |
| "eval_samples_per_second": 1.368, | |
| "eval_steps_per_second": 0.684, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "grad_norm": 0.15738993883132935, | |
| "learning_rate": 0.00018396222089771218, | |
| "loss": 1.8964, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "eval_bertscore": 0.7473016381263733, | |
| "eval_loss": 1.9117563962936401, | |
| "eval_rouge1": 0.6620053151663765, | |
| "eval_rouge2": 0.37406692119411245, | |
| "eval_rougeL": 0.5758911607323577, | |
| "eval_rougeLsum": 0.6494070575604445, | |
| "eval_runtime": 21.6727, | |
| "eval_samples_per_second": 1.384, | |
| "eval_steps_per_second": 0.692, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "grad_norm": 0.1588907092809677, | |
| "learning_rate": 0.00018166979964238228, | |
| "loss": 1.8827, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "eval_bertscore": 0.7485987544059753, | |
| "eval_loss": 1.9126006364822388, | |
| "eval_rouge1": 0.6641836156334741, | |
| "eval_rouge2": 0.37320215574735827, | |
| "eval_rougeL": 0.5783015040447993, | |
| "eval_rougeLsum": 0.6522235940423647, | |
| "eval_runtime": 21.9759, | |
| "eval_samples_per_second": 1.365, | |
| "eval_steps_per_second": 0.683, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "eval_bertscore": 0.7482583522796631, | |
| "eval_loss": 1.9075205326080322, | |
| "eval_rouge1": 0.6658219484766166, | |
| "eval_rouge2": 0.37723364952258465, | |
| "eval_rougeL": 0.5769040785174693, | |
| "eval_rougeLsum": 0.6511328888044219, | |
| "eval_runtime": 21.5892, | |
| "eval_samples_per_second": 1.39, | |
| "eval_steps_per_second": 0.695, | |
| "step": 2200 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "grad_norm": 0.15247465670108795, | |
| "learning_rate": 0.00017937737838705242, | |
| "loss": 1.8831, | |
| "step": 2250 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "eval_bertscore": 0.7460805177688599, | |
| "eval_loss": 1.9088668823242188, | |
| "eval_rouge1": 0.6627321043292516, | |
| "eval_rouge2": 0.3696581195003696, | |
| "eval_rougeL": 0.5740988544467178, | |
| "eval_rougeLsum": 0.6478729042661874, | |
| "eval_runtime": 21.9221, | |
| "eval_samples_per_second": 1.368, | |
| "eval_steps_per_second": 0.684, | |
| "step": 2400 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "grad_norm": 0.1587379276752472, | |
| "learning_rate": 0.00017708495713172253, | |
| "loss": 1.8829, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "eval_bertscore": 0.7472203373908997, | |
| "eval_loss": 1.906219482421875, | |
| "eval_rouge1": 0.6637415370426804, | |
| "eval_rouge2": 0.37565276875837994, | |
| "eval_rougeL": 0.5773879369079004, | |
| "eval_rougeLsum": 0.6488719947518645, | |
| "eval_runtime": 21.8112, | |
| "eval_samples_per_second": 1.375, | |
| "eval_steps_per_second": 0.688, | |
| "step": 2600 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "grad_norm": 0.1558646410703659, | |
| "learning_rate": 0.00017479253587639266, | |
| "loss": 1.8978, | |
| "step": 2750 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "eval_bertscore": 0.7466126680374146, | |
| "eval_loss": 1.9045982360839844, | |
| "eval_rouge1": 0.6616225540296956, | |
| "eval_rouge2": 0.37370762164745913, | |
| "eval_rougeL": 0.5759418528371097, | |
| "eval_rougeLsum": 0.6479977636906877, | |
| "eval_runtime": 21.8772, | |
| "eval_samples_per_second": 1.371, | |
| "eval_steps_per_second": 0.686, | |
| "step": 2800 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "grad_norm": 0.14783035218715668, | |
| "learning_rate": 0.00017250011462106277, | |
| "loss": 1.8978, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "eval_bertscore": 0.7485571503639221, | |
| "eval_loss": 1.9035439491271973, | |
| "eval_rouge1": 0.6664050030501707, | |
| "eval_rouge2": 0.379492440917784, | |
| "eval_rougeL": 0.5806973731221475, | |
| "eval_rougeLsum": 0.6524346156604702, | |
| "eval_runtime": 21.9217, | |
| "eval_samples_per_second": 1.369, | |
| "eval_steps_per_second": 0.684, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "eval_bertscore": 0.7483461499214172, | |
| "eval_loss": 1.9022458791732788, | |
| "eval_rouge1": 0.6618989733136488, | |
| "eval_rouge2": 0.37377379177271053, | |
| "eval_rougeL": 0.5780989082173933, | |
| "eval_rougeLsum": 0.6490379362631586, | |
| "eval_runtime": 21.7847, | |
| "eval_samples_per_second": 1.377, | |
| "eval_steps_per_second": 0.689, | |
| "step": 3200 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "grad_norm": 0.16484151780605316, | |
| "learning_rate": 0.0001702076933657329, | |
| "loss": 1.8715, | |
| "step": 3250 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "eval_bertscore": 0.7490711212158203, | |
| "eval_loss": 1.9013088941574097, | |
| "eval_rouge1": 0.6638141306545007, | |
| "eval_rouge2": 0.37356255553691553, | |
| "eval_rougeL": 0.577975450251653, | |
| "eval_rougeLsum": 0.6492478632295806, | |
| "eval_runtime": 21.8807, | |
| "eval_samples_per_second": 1.371, | |
| "eval_steps_per_second": 0.686, | |
| "step": 3400 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "grad_norm": 0.14130128920078278, | |
| "learning_rate": 0.000167915272110403, | |
| "loss": 1.8819, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "eval_bertscore": 0.7475283741950989, | |
| "eval_loss": 1.9002223014831543, | |
| "eval_rouge1": 0.6628836314413511, | |
| "eval_rouge2": 0.37179988805094977, | |
| "eval_rougeL": 0.5764222388923268, | |
| "eval_rougeLsum": 0.649864229310889, | |
| "eval_runtime": 22.124, | |
| "eval_samples_per_second": 1.356, | |
| "eval_steps_per_second": 0.678, | |
| "step": 3600 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "grad_norm": 0.1494186818599701, | |
| "learning_rate": 0.00016562285085507315, | |
| "loss": 1.8828, | |
| "step": 3750 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "eval_bertscore": 0.7486498951911926, | |
| "eval_loss": 1.9011151790618896, | |
| "eval_rouge1": 0.6669673680023924, | |
| "eval_rouge2": 0.3771780440183751, | |
| "eval_rougeL": 0.5792518624130161, | |
| "eval_rougeLsum": 0.6534484242953056, | |
| "eval_runtime": 21.813, | |
| "eval_samples_per_second": 1.375, | |
| "eval_steps_per_second": 0.688, | |
| "step": 3800 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "grad_norm": 0.14803479611873627, | |
| "learning_rate": 0.00016333042959974325, | |
| "loss": 1.8761, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "eval_bertscore": 0.7471507787704468, | |
| "eval_loss": 1.9001713991165161, | |
| "eval_rouge1": 0.6651735220672027, | |
| "eval_rouge2": 0.3736698451416937, | |
| "eval_rougeL": 0.5779938808281732, | |
| "eval_rougeLsum": 0.6509815118131576, | |
| "eval_runtime": 21.5004, | |
| "eval_samples_per_second": 1.395, | |
| "eval_steps_per_second": 0.698, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "eval_bertscore": 0.7485501766204834, | |
| "eval_loss": 1.8993827104568481, | |
| "eval_rouge1": 0.6646424082737133, | |
| "eval_rouge2": 0.37318485364862475, | |
| "eval_rougeL": 0.5773338159759467, | |
| "eval_rougeLsum": 0.6507594353103527, | |
| "eval_runtime": 21.2963, | |
| "eval_samples_per_second": 1.409, | |
| "eval_steps_per_second": 0.704, | |
| "step": 4200 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "grad_norm": 0.15562959015369415, | |
| "learning_rate": 0.0001610380083444134, | |
| "loss": 1.8672, | |
| "step": 4250 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "eval_bertscore": 0.7469989061355591, | |
| "eval_loss": 1.900540828704834, | |
| "eval_rouge1": 0.6620664558691891, | |
| "eval_rouge2": 0.37299419371215703, | |
| "eval_rougeL": 0.5765442194831125, | |
| "eval_rougeLsum": 0.6472642385429858, | |
| "eval_runtime": 21.9086, | |
| "eval_samples_per_second": 1.369, | |
| "eval_steps_per_second": 0.685, | |
| "step": 4400 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "grad_norm": 0.15420928597450256, | |
| "learning_rate": 0.0001587455870890835, | |
| "loss": 1.8754, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "eval_bertscore": 0.7475299835205078, | |
| "eval_loss": 1.8988685607910156, | |
| "eval_rouge1": 0.6656661780424216, | |
| "eval_rouge2": 0.37467258880478527, | |
| "eval_rougeL": 0.5770800519970718, | |
| "eval_rougeLsum": 0.6522703864288166, | |
| "eval_runtime": 22.063, | |
| "eval_samples_per_second": 1.36, | |
| "eval_steps_per_second": 0.68, | |
| "step": 4600 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "grad_norm": 0.15809176862239838, | |
| "learning_rate": 0.00015645316583375363, | |
| "loss": 1.8848, | |
| "step": 4750 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "eval_bertscore": 0.7490234375, | |
| "eval_loss": 1.8991097211837769, | |
| "eval_rouge1": 0.6651730257289085, | |
| "eval_rouge2": 0.3778893043274054, | |
| "eval_rougeL": 0.5782673838033503, | |
| "eval_rougeLsum": 0.6516865674488727, | |
| "eval_runtime": 22.0202, | |
| "eval_samples_per_second": 1.362, | |
| "eval_steps_per_second": 0.681, | |
| "step": 4800 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "grad_norm": 0.17979757487773895, | |
| "learning_rate": 0.00015416074457842374, | |
| "loss": 1.8851, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "eval_bertscore": 0.7492111325263977, | |
| "eval_loss": 1.897339940071106, | |
| "eval_rouge1": 0.665920573890169, | |
| "eval_rouge2": 0.37917993898535385, | |
| "eval_rougeL": 0.5800236892888617, | |
| "eval_rougeLsum": 0.6529131688355863, | |
| "eval_runtime": 21.6103, | |
| "eval_samples_per_second": 1.388, | |
| "eval_steps_per_second": 0.694, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "eval_bertscore": 0.7491253614425659, | |
| "eval_loss": 1.897528052330017, | |
| "eval_rouge1": 0.6653452054219615, | |
| "eval_rouge2": 0.3759208437918665, | |
| "eval_rougeL": 0.5776757077854651, | |
| "eval_rougeLsum": 0.6511876484723524, | |
| "eval_runtime": 21.3101, | |
| "eval_samples_per_second": 1.408, | |
| "eval_steps_per_second": 0.704, | |
| "step": 5200 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 0.16869671642780304, | |
| "learning_rate": 0.00015186832332309387, | |
| "loss": 1.8783, | |
| "step": 5250 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "eval_bertscore": 0.7494469881057739, | |
| "eval_loss": 1.895969271659851, | |
| "eval_rouge1": 0.6660951369469854, | |
| "eval_rouge2": 0.3764077134133328, | |
| "eval_rougeL": 0.578785826234568, | |
| "eval_rougeLsum": 0.6525967284041656, | |
| "eval_runtime": 21.7955, | |
| "eval_samples_per_second": 1.376, | |
| "eval_steps_per_second": 0.688, | |
| "step": 5400 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "grad_norm": 0.15996231138706207, | |
| "learning_rate": 0.00014957590206776398, | |
| "loss": 1.8805, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "eval_bertscore": 0.7486470341682434, | |
| "eval_loss": 1.8955131769180298, | |
| "eval_rouge1": 0.6670292173522965, | |
| "eval_rouge2": 0.37457018529010144, | |
| "eval_rougeL": 0.5775243235432015, | |
| "eval_rougeLsum": 0.652574079807632, | |
| "eval_runtime": 21.7576, | |
| "eval_samples_per_second": 1.379, | |
| "eval_steps_per_second": 0.689, | |
| "step": 5600 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "grad_norm": 0.17192547023296356, | |
| "learning_rate": 0.00014728348081243412, | |
| "loss": 1.8884, | |
| "step": 5750 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "eval_bertscore": 0.7483081817626953, | |
| "eval_loss": 1.895763874053955, | |
| "eval_rouge1": 0.6659275328276997, | |
| "eval_rouge2": 0.3778666475350364, | |
| "eval_rougeL": 0.579425140056643, | |
| "eval_rougeLsum": 0.6515870828784887, | |
| "eval_runtime": 21.6648, | |
| "eval_samples_per_second": 1.385, | |
| "eval_steps_per_second": 0.692, | |
| "step": 5800 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "grad_norm": 0.15838442742824554, | |
| "learning_rate": 0.00014499105955710422, | |
| "loss": 1.8913, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "eval_bertscore": 0.7493732571601868, | |
| "eval_loss": 1.8914682865142822, | |
| "eval_rouge1": 0.6669695240447069, | |
| "eval_rouge2": 0.3769441114214874, | |
| "eval_rougeL": 0.5798986667152066, | |
| "eval_rougeLsum": 0.6534527583592111, | |
| "eval_runtime": 21.4686, | |
| "eval_samples_per_second": 1.397, | |
| "eval_steps_per_second": 0.699, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "eval_bertscore": 0.7510559558868408, | |
| "eval_loss": 1.8923884630203247, | |
| "eval_rouge1": 0.6677938121282943, | |
| "eval_rouge2": 0.37854575387307554, | |
| "eval_rougeL": 0.5817052753830161, | |
| "eval_rougeLsum": 0.6534737907551461, | |
| "eval_runtime": 21.593, | |
| "eval_samples_per_second": 1.389, | |
| "eval_steps_per_second": 0.695, | |
| "step": 6200 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 0.15312573313713074, | |
| "learning_rate": 0.00014269863830177433, | |
| "loss": 1.8705, | |
| "step": 6250 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "eval_bertscore": 0.7479371428489685, | |
| "eval_loss": 1.891802430152893, | |
| "eval_rouge1": 0.6658674357402252, | |
| "eval_rouge2": 0.3757712649269345, | |
| "eval_rougeL": 0.5791817270712349, | |
| "eval_rougeLsum": 0.6509960265397259, | |
| "eval_runtime": 21.8726, | |
| "eval_samples_per_second": 1.372, | |
| "eval_steps_per_second": 0.686, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 0.15844614803791046, | |
| "learning_rate": 0.00014040621704644447, | |
| "loss": 1.8643, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "eval_bertscore": 0.7484550476074219, | |
| "eval_loss": 1.8903728723526, | |
| "eval_rouge1": 0.6683828816523312, | |
| "eval_rouge2": 0.37811618722345436, | |
| "eval_rougeL": 0.5802581730590705, | |
| "eval_rougeLsum": 0.6534402764651661, | |
| "eval_runtime": 21.8343, | |
| "eval_samples_per_second": 1.374, | |
| "eval_steps_per_second": 0.687, | |
| "step": 6600 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "grad_norm": 0.1661410629749298, | |
| "learning_rate": 0.00013811379579111458, | |
| "loss": 1.877, | |
| "step": 6750 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "eval_bertscore": 0.747416615486145, | |
| "eval_loss": 1.8915189504623413, | |
| "eval_rouge1": 0.6644777881148224, | |
| "eval_rouge2": 0.3747657029706615, | |
| "eval_rougeL": 0.5793454557198501, | |
| "eval_rougeLsum": 0.6521716611395593, | |
| "eval_runtime": 21.523, | |
| "eval_samples_per_second": 1.394, | |
| "eval_steps_per_second": 0.697, | |
| "step": 6800 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.16483080387115479, | |
| "learning_rate": 0.00013582137453578468, | |
| "loss": 1.8792, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "eval_bertscore": 0.7480576634407043, | |
| "eval_loss": 1.8913365602493286, | |
| "eval_rouge1": 0.6655764268912302, | |
| "eval_rouge2": 0.3757671289735428, | |
| "eval_rougeL": 0.577951380212153, | |
| "eval_rougeLsum": 0.6507587412359694, | |
| "eval_runtime": 21.3067, | |
| "eval_samples_per_second": 1.408, | |
| "eval_steps_per_second": 0.704, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "eval_bertscore": 0.7505319714546204, | |
| "eval_loss": 1.889721155166626, | |
| "eval_rouge1": 0.6706532239207523, | |
| "eval_rouge2": 0.37986537729431724, | |
| "eval_rougeL": 0.5824624008038861, | |
| "eval_rougeLsum": 0.6571986550416876, | |
| "eval_runtime": 21.8193, | |
| "eval_samples_per_second": 1.375, | |
| "eval_steps_per_second": 0.687, | |
| "step": 7200 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "grad_norm": 0.1685444712638855, | |
| "learning_rate": 0.00013352895328045482, | |
| "loss": 1.8748, | |
| "step": 7250 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "eval_bertscore": 0.7472131252288818, | |
| "eval_loss": 1.889514684677124, | |
| "eval_rouge1": 0.6647481520892182, | |
| "eval_rouge2": 0.3727968089505218, | |
| "eval_rougeL": 0.5772333167389081, | |
| "eval_rougeLsum": 0.6503920840351167, | |
| "eval_runtime": 21.5794, | |
| "eval_samples_per_second": 1.39, | |
| "eval_steps_per_second": 0.695, | |
| "step": 7400 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "grad_norm": 0.16196218132972717, | |
| "learning_rate": 0.00013123653202512493, | |
| "loss": 1.8958, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "eval_bertscore": 0.7467525005340576, | |
| "eval_loss": 1.8874704837799072, | |
| "eval_rouge1": 0.6652789954777591, | |
| "eval_rouge2": 0.3747211875622626, | |
| "eval_rougeL": 0.5781018250975862, | |
| "eval_rougeLsum": 0.6512065884264598, | |
| "eval_runtime": 21.6436, | |
| "eval_samples_per_second": 1.386, | |
| "eval_steps_per_second": 0.693, | |
| "step": 7600 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "grad_norm": 0.17379231750965118, | |
| "learning_rate": 0.00012894411076979506, | |
| "loss": 1.8655, | |
| "step": 7750 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "eval_bertscore": 0.7478018403053284, | |
| "eval_loss": 1.8879252672195435, | |
| "eval_rouge1": 0.6676077444849423, | |
| "eval_rouge2": 0.37550824667101645, | |
| "eval_rougeL": 0.5792625587400696, | |
| "eval_rougeLsum": 0.6537654224373248, | |
| "eval_runtime": 21.8026, | |
| "eval_samples_per_second": 1.376, | |
| "eval_steps_per_second": 0.688, | |
| "step": 7800 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "grad_norm": 0.17975503206253052, | |
| "learning_rate": 0.00012665168951446517, | |
| "loss": 1.8593, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "eval_bertscore": 0.7490061521530151, | |
| "eval_loss": 1.8872514963150024, | |
| "eval_rouge1": 0.6677074837057098, | |
| "eval_rouge2": 0.37723681410973775, | |
| "eval_rougeL": 0.5806554105436175, | |
| "eval_rougeLsum": 0.6531691046113964, | |
| "eval_runtime": 21.2682, | |
| "eval_samples_per_second": 1.411, | |
| "eval_steps_per_second": 0.705, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "eval_bertscore": 0.7476587295532227, | |
| "eval_loss": 1.8857940435409546, | |
| "eval_rouge1": 0.6675733171919529, | |
| "eval_rouge2": 0.37667421034338344, | |
| "eval_rougeL": 0.5804128987718613, | |
| "eval_rougeLsum": 0.6534287804714597, | |
| "eval_runtime": 21.5325, | |
| "eval_samples_per_second": 1.393, | |
| "eval_steps_per_second": 0.697, | |
| "step": 8200 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 0.1596900373697281, | |
| "learning_rate": 0.0001243592682591353, | |
| "loss": 1.8627, | |
| "step": 8250 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "eval_bertscore": 0.7444086074829102, | |
| "eval_loss": 1.8874648809432983, | |
| "eval_rouge1": 0.6633779669482168, | |
| "eval_rouge2": 0.3710094509675216, | |
| "eval_rougeL": 0.5760576627400225, | |
| "eval_rougeLsum": 0.6499803336918719, | |
| "eval_runtime": 21.4464, | |
| "eval_samples_per_second": 1.399, | |
| "eval_steps_per_second": 0.699, | |
| "step": 8400 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "grad_norm": 0.16890183091163635, | |
| "learning_rate": 0.00012206684700380542, | |
| "loss": 1.8534, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "eval_bertscore": 0.7483052611351013, | |
| "eval_loss": 1.8880757093429565, | |
| "eval_rouge1": 0.6686948143176776, | |
| "eval_rouge2": 0.3803796130427515, | |
| "eval_rougeL": 0.5802459813261722, | |
| "eval_rougeLsum": 0.6536962466082527, | |
| "eval_runtime": 21.5416, | |
| "eval_samples_per_second": 1.393, | |
| "eval_steps_per_second": 0.696, | |
| "step": 8600 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 0.1596900373697281, | |
| "learning_rate": 0.00011977442574847555, | |
| "loss": 1.882, | |
| "step": 8750 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "eval_bertscore": 0.748338520526886, | |
| "eval_loss": 1.8871524333953857, | |
| "eval_rouge1": 0.6673919143770407, | |
| "eval_rouge2": 0.3761761743795482, | |
| "eval_rougeL": 0.5797615995019129, | |
| "eval_rougeLsum": 0.6526650363891257, | |
| "eval_runtime": 21.8432, | |
| "eval_samples_per_second": 1.373, | |
| "eval_steps_per_second": 0.687, | |
| "step": 8800 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "grad_norm": 0.16380883753299713, | |
| "learning_rate": 0.00011748200449314565, | |
| "loss": 1.8781, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "eval_bertscore": 0.7473989129066467, | |
| "eval_loss": 1.885389804840088, | |
| "eval_rouge1": 0.6660513187618474, | |
| "eval_rouge2": 0.3728645884799071, | |
| "eval_rougeL": 0.5767833607673931, | |
| "eval_rougeLsum": 0.6518177265346137, | |
| "eval_runtime": 21.5415, | |
| "eval_samples_per_second": 1.393, | |
| "eval_steps_per_second": 0.696, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "eval_bertscore": 0.7469697594642639, | |
| "eval_loss": 1.8835673332214355, | |
| "eval_rouge1": 0.6655382276884847, | |
| "eval_rouge2": 0.3743925229327822, | |
| "eval_rougeL": 0.5808516524350132, | |
| "eval_rougeLsum": 0.6518276923554284, | |
| "eval_runtime": 21.7289, | |
| "eval_samples_per_second": 1.381, | |
| "eval_steps_per_second": 0.69, | |
| "step": 9200 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "grad_norm": 0.17286422848701477, | |
| "learning_rate": 0.00011518958323781579, | |
| "loss": 1.8672, | |
| "step": 9250 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "eval_bertscore": 0.7491498589515686, | |
| "eval_loss": 1.8845998048782349, | |
| "eval_rouge1": 0.6670160490080832, | |
| "eval_rouge2": 0.37860182825781935, | |
| "eval_rougeL": 0.5797856034485049, | |
| "eval_rougeLsum": 0.6531203725936218, | |
| "eval_runtime": 21.5625, | |
| "eval_samples_per_second": 1.391, | |
| "eval_steps_per_second": 0.696, | |
| "step": 9400 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "grad_norm": 0.16658568382263184, | |
| "learning_rate": 0.0001128971619824859, | |
| "loss": 1.8691, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "eval_bertscore": 0.7493313550949097, | |
| "eval_loss": 1.8821747303009033, | |
| "eval_rouge1": 0.6659791441681278, | |
| "eval_rouge2": 0.3796033834485131, | |
| "eval_rougeL": 0.580414529806212, | |
| "eval_rougeLsum": 0.6528068238734432, | |
| "eval_runtime": 21.8698, | |
| "eval_samples_per_second": 1.372, | |
| "eval_steps_per_second": 0.686, | |
| "step": 9600 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "grad_norm": 0.1733073741197586, | |
| "learning_rate": 0.00011060474072715603, | |
| "loss": 1.8575, | |
| "step": 9750 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "eval_bertscore": 0.7497690320014954, | |
| "eval_loss": 1.8809062242507935, | |
| "eval_rouge1": 0.6683202809005669, | |
| "eval_rouge2": 0.379647408271533, | |
| "eval_rougeL": 0.5812799059293663, | |
| "eval_rougeLsum": 0.6549076224428805, | |
| "eval_runtime": 21.461, | |
| "eval_samples_per_second": 1.398, | |
| "eval_steps_per_second": 0.699, | |
| "step": 9800 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "grad_norm": 0.16828681528568268, | |
| "learning_rate": 0.00010831231947182614, | |
| "loss": 1.8799, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "eval_bertscore": 0.7487274408340454, | |
| "eval_loss": 1.8800114393234253, | |
| "eval_rouge1": 0.6694707226380743, | |
| "eval_rouge2": 0.37780830529690856, | |
| "eval_rougeL": 0.5789377835641822, | |
| "eval_rougeLsum": 0.6540561492044448, | |
| "eval_runtime": 21.6228, | |
| "eval_samples_per_second": 1.387, | |
| "eval_steps_per_second": 0.694, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "eval_bertscore": 0.7495086789131165, | |
| "eval_loss": 1.8811218738555908, | |
| "eval_rouge1": 0.6714277794869861, | |
| "eval_rouge2": 0.3814957239141348, | |
| "eval_rougeL": 0.5817721016839257, | |
| "eval_rougeLsum": 0.6566092952916721, | |
| "eval_runtime": 23.1282, | |
| "eval_samples_per_second": 1.297, | |
| "eval_steps_per_second": 0.649, | |
| "step": 10200 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "grad_norm": 0.16498848795890808, | |
| "learning_rate": 0.00010601989821649627, | |
| "loss": 1.8656, | |
| "step": 10250 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "eval_bertscore": 0.749505877494812, | |
| "eval_loss": 1.8809926509857178, | |
| "eval_rouge1": 0.6720420767359538, | |
| "eval_rouge2": 0.38239237549289784, | |
| "eval_rougeL": 0.5825845512902208, | |
| "eval_rougeLsum": 0.6590116525116119, | |
| "eval_runtime": 21.5266, | |
| "eval_samples_per_second": 1.394, | |
| "eval_steps_per_second": 0.697, | |
| "step": 10400 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 0.1661728322505951, | |
| "learning_rate": 0.00010372747696116638, | |
| "loss": 1.8633, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "eval_bertscore": 0.7484509944915771, | |
| "eval_loss": 1.8795918226242065, | |
| "eval_rouge1": 0.66861224256168, | |
| "eval_rouge2": 0.3810938571231235, | |
| "eval_rougeL": 0.581338929419374, | |
| "eval_rougeLsum": 0.6556287448758898, | |
| "eval_runtime": 21.6144, | |
| "eval_samples_per_second": 1.388, | |
| "eval_steps_per_second": 0.694, | |
| "step": 10600 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "grad_norm": 0.1695539355278015, | |
| "learning_rate": 0.00010143505570583652, | |
| "loss": 1.8778, | |
| "step": 10750 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "eval_bertscore": 0.747430145740509, | |
| "eval_loss": 1.8807307481765747, | |
| "eval_rouge1": 0.6659775067192504, | |
| "eval_rouge2": 0.37723044840422537, | |
| "eval_rougeL": 0.5790798830214317, | |
| "eval_rougeLsum": 0.6509981906464294, | |
| "eval_runtime": 21.9658, | |
| "eval_samples_per_second": 1.366, | |
| "eval_steps_per_second": 0.683, | |
| "step": 10800 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "grad_norm": 0.18244074285030365, | |
| "learning_rate": 9.914263445050664e-05, | |
| "loss": 1.8425, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "eval_bertscore": 0.7464674711227417, | |
| "eval_loss": 1.8850181102752686, | |
| "eval_rouge1": 0.6682062462715245, | |
| "eval_rouge2": 0.377961045305675, | |
| "eval_rougeL": 0.5785946041032981, | |
| "eval_rougeLsum": 0.6544658695180745, | |
| "eval_runtime": 21.4985, | |
| "eval_samples_per_second": 1.395, | |
| "eval_steps_per_second": 0.698, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "eval_bertscore": 0.748903214931488, | |
| "eval_loss": 1.8819694519042969, | |
| "eval_rouge1": 0.6702994540242251, | |
| "eval_rouge2": 0.38293287997414793, | |
| "eval_rougeL": 0.5814513237567966, | |
| "eval_rougeLsum": 0.6559726946972199, | |
| "eval_runtime": 21.4139, | |
| "eval_samples_per_second": 1.401, | |
| "eval_steps_per_second": 0.7, | |
| "step": 11200 | |
| } | |
| ], | |
| "logging_steps": 250, | |
| "max_steps": 21812, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 800, | |
| "total_flos": 7.549926966086861e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |