| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.5675488430095608, | |
| "eval_steps": 1024, | |
| "global_step": 12288, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.011823934229365849, | |
| "grad_norm": 1.1337779760360718, | |
| "learning_rate": 1.9615384615384617e-05, | |
| "loss": 10.3487, | |
| "step": 256 | |
| }, | |
| { | |
| "epoch": 0.023647868458731697, | |
| "grad_norm": 1.2268586158752441, | |
| "learning_rate": 3.930769230769231e-05, | |
| "loss": 7.8944, | |
| "step": 512 | |
| }, | |
| { | |
| "epoch": 0.03547180268809755, | |
| "grad_norm": 1.0524195432662964, | |
| "learning_rate": 4.999617095521894e-05, | |
| "loss": 5.6243, | |
| "step": 768 | |
| }, | |
| { | |
| "epoch": 0.047295736917463395, | |
| "grad_norm": 0.627362072467804, | |
| "learning_rate": 4.9961092368776736e-05, | |
| "loss": 3.7534, | |
| "step": 1024 | |
| }, | |
| { | |
| "epoch": 0.047295736917463395, | |
| "eval_across_var": 0.0030250440715263558, | |
| "eval_bleu": 0.5480678351145061, | |
| "eval_ce_loss": 2.361846002813888, | |
| "eval_cos_loss": 0.9333138501263101, | |
| "eval_cov": 0.07032504147046233, | |
| "eval_global_var": 0.2857983732876712, | |
| "eval_loss": 2.7384781478202505, | |
| "eval_mse_loss": 1.9327595538744644, | |
| "eval_per_var": 0.2780265633918379, | |
| "eval_within_var": 0.2828233463170866, | |
| "step": 1024 | |
| }, | |
| { | |
| "epoch": 0.047295736917463395, | |
| "eval_across_var": 0.0030250440715263558, | |
| "eval_bleu": 0.5480678351145061, | |
| "eval_ce_loss": 2.361846002813888, | |
| "eval_cos_loss": 0.9333138501263101, | |
| "eval_cov": 0.07032504147046233, | |
| "eval_global_var": 0.2857983732876712, | |
| "eval_loss": 2.7384781478202505, | |
| "eval_mse_loss": 1.9327595538744644, | |
| "eval_per_var": 0.2780265633918379, | |
| "eval_runtime": 151.7756, | |
| "eval_samples_per_second": 184.437, | |
| "eval_steps_per_second": 2.886, | |
| "eval_within_var": 0.2828233463170866, | |
| "step": 1024 | |
| }, | |
| { | |
| "epoch": 0.05911967114682925, | |
| "grad_norm": 0.3857733905315399, | |
| "learning_rate": 4.988941132556799e-05, | |
| "loss": 2.6721, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 0.0709436053761951, | |
| "grad_norm": 0.3293663561344147, | |
| "learning_rate": 4.9781232937269974e-05, | |
| "loss": 2.0559, | |
| "step": 1536 | |
| }, | |
| { | |
| "epoch": 0.08276753960556095, | |
| "grad_norm": 0.2576558291912079, | |
| "learning_rate": 4.963671583455164e-05, | |
| "loss": 1.6534, | |
| "step": 1792 | |
| }, | |
| { | |
| "epoch": 0.09459147383492679, | |
| "grad_norm": 0.24152210354804993, | |
| "learning_rate": 4.945607193446079e-05, | |
| "loss": 1.3739, | |
| "step": 2048 | |
| }, | |
| { | |
| "epoch": 0.09459147383492679, | |
| "eval_across_var": 0.0044681047000093, | |
| "eval_bleu": 0.6452645235714047, | |
| "eval_ce_loss": 0.7001769141247284, | |
| "eval_cos_loss": 0.7716340200813938, | |
| "eval_cov": 0.07174250633204908, | |
| "eval_global_var": 0.4126723476740868, | |
| "eval_loss": 1.0148827189996363, | |
| "eval_mse_loss": 1.6643117905751754, | |
| "eval_per_var": 0.4014117793949772, | |
| "eval_within_var": 0.40839217984240894, | |
| "step": 2048 | |
| }, | |
| { | |
| "epoch": 0.09459147383492679, | |
| "eval_across_var": 0.0044681047000093, | |
| "eval_bleu": 0.6452645235714047, | |
| "eval_ce_loss": 0.7001769141247284, | |
| "eval_cos_loss": 0.7716340200813938, | |
| "eval_cov": 0.07174250633204908, | |
| "eval_global_var": 0.4126723476740868, | |
| "eval_loss": 1.0148827189996363, | |
| "eval_mse_loss": 1.6643117905751754, | |
| "eval_per_var": 0.4014117793949772, | |
| "eval_runtime": 150.792, | |
| "eval_samples_per_second": 185.64, | |
| "eval_steps_per_second": 2.905, | |
| "eval_within_var": 0.40839217984240894, | |
| "step": 2048 | |
| }, | |
| { | |
| "epoch": 0.10641540806429264, | |
| "grad_norm": 0.18892866373062134, | |
| "learning_rate": 4.923956612967301e-05, | |
| "loss": 1.1708, | |
| "step": 2304 | |
| }, | |
| { | |
| "epoch": 0.1182393422936585, | |
| "grad_norm": 0.18101266026496887, | |
| "learning_rate": 4.898751590005826e-05, | |
| "loss": 1.0095, | |
| "step": 2560 | |
| }, | |
| { | |
| "epoch": 0.13006327652302435, | |
| "grad_norm": 0.15639935433864594, | |
| "learning_rate": 4.870029084713462e-05, | |
| "loss": 0.8841, | |
| "step": 2816 | |
| }, | |
| { | |
| "epoch": 0.1418872107523902, | |
| "grad_norm": 0.14697179198265076, | |
| "learning_rate": 4.837831215209188e-05, | |
| "loss": 0.7841, | |
| "step": 3072 | |
| }, | |
| { | |
| "epoch": 0.1418872107523902, | |
| "eval_across_var": 0.0055386394200650935, | |
| "eval_bleu": 0.7978306727589363, | |
| "eval_ce_loss": 0.32526156282452146, | |
| "eval_cos_loss": 0.6155619193974151, | |
| "eval_cov": 0.0707132696561073, | |
| "eval_global_var": 0.508560529038242, | |
| "eval_loss": 0.5787542584143817, | |
| "eval_mse_loss": 1.3764822866818676, | |
| "eval_per_var": 0.4943646992722603, | |
| "eval_within_var": 0.5033302515336912, | |
| "step": 3072 | |
| }, | |
| { | |
| "epoch": 0.1418872107523902, | |
| "eval_across_var": 0.0055386394200650935, | |
| "eval_bleu": 0.7978306727589363, | |
| "eval_ce_loss": 0.32526156282452146, | |
| "eval_cos_loss": 0.6155619193974151, | |
| "eval_cov": 0.0707132696561073, | |
| "eval_global_var": 0.508560529038242, | |
| "eval_loss": 0.5787542584143817, | |
| "eval_mse_loss": 1.3764822866818676, | |
| "eval_per_var": 0.4943646992722603, | |
| "eval_runtime": 150.3038, | |
| "eval_samples_per_second": 186.243, | |
| "eval_steps_per_second": 2.914, | |
| "eval_within_var": 0.5033302515336912, | |
| "step": 3072 | |
| }, | |
| { | |
| "epoch": 0.15371114498175603, | |
| "grad_norm": 0.13465629518032074, | |
| "learning_rate": 4.802205195817963e-05, | |
| "loss": 0.7042, | |
| "step": 3328 | |
| }, | |
| { | |
| "epoch": 0.1655350792111219, | |
| "grad_norm": 0.12159380316734314, | |
| "learning_rate": 4.763203267836576e-05, | |
| "loss": 0.6359, | |
| "step": 3584 | |
| }, | |
| { | |
| "epoch": 0.17735901344048774, | |
| "grad_norm": 0.11759158223867416, | |
| "learning_rate": 4.720882622928019e-05, | |
| "loss": 0.5793, | |
| "step": 3840 | |
| }, | |
| { | |
| "epoch": 0.18918294766985358, | |
| "grad_norm": 0.11546578258275986, | |
| "learning_rate": 4.675305319256765e-05, | |
| "loss": 0.5323, | |
| "step": 4096 | |
| }, | |
| { | |
| "epoch": 0.18918294766985358, | |
| "eval_across_var": 0.006188327843136235, | |
| "eval_bleu": 0.830698384342328, | |
| "eval_ce_loss": 0.18860791746067673, | |
| "eval_cos_loss": 0.4954083457793275, | |
| "eval_cov": 0.0701905690371718, | |
| "eval_global_var": 0.574416069135274, | |
| "eval_loss": 0.3946296600582393, | |
| "eval_mse_loss": 1.1479846312575144, | |
| "eval_per_var": 0.558419841609589, | |
| "eval_within_var": 0.5686683095481297, | |
| "step": 4096 | |
| }, | |
| { | |
| "epoch": 0.18918294766985358, | |
| "eval_across_var": 0.006188327843136235, | |
| "eval_bleu": 0.830698384342328, | |
| "eval_ce_loss": 0.18860791746067673, | |
| "eval_cos_loss": 0.4954083457793275, | |
| "eval_cov": 0.0701905690371718, | |
| "eval_global_var": 0.574416069135274, | |
| "eval_loss": 0.3946296600582393, | |
| "eval_mse_loss": 1.1479846312575144, | |
| "eval_per_var": 0.558419841609589, | |
| "eval_runtime": 149.9304, | |
| "eval_samples_per_second": 186.707, | |
| "eval_steps_per_second": 2.921, | |
| "eval_within_var": 0.5686683095481297, | |
| "step": 4096 | |
| }, | |
| { | |
| "epoch": 0.20100688189921945, | |
| "grad_norm": 0.11826465278863907, | |
| "learning_rate": 4.6265381904878854e-05, | |
| "loss": 0.4896, | |
| "step": 4352 | |
| }, | |
| { | |
| "epoch": 0.2128308161285853, | |
| "grad_norm": 0.10021153837442398, | |
| "learning_rate": 4.57465274778347e-05, | |
| "loss": 0.4576, | |
| "step": 4608 | |
| }, | |
| { | |
| "epoch": 0.22465475035795113, | |
| "grad_norm": 0.10055450350046158, | |
| "learning_rate": 4.519725074940068e-05, | |
| "loss": 0.4235, | |
| "step": 4864 | |
| }, | |
| { | |
| "epoch": 0.236478684587317, | |
| "grad_norm": 0.0877346396446228, | |
| "learning_rate": 4.461835716820895e-05, | |
| "loss": 0.3965, | |
| "step": 5120 | |
| }, | |
| { | |
| "epoch": 0.236478684587317, | |
| "eval_across_var": 0.006611716227614444, | |
| "eval_bleu": 0.9513684417284821, | |
| "eval_ce_loss": 0.12323284007983121, | |
| "eval_cos_loss": 0.4073775019411627, | |
| "eval_cov": 0.06992845230450913, | |
| "eval_global_var": 0.621496191852169, | |
| "eval_loss": 0.2943976874969321, | |
| "eval_mse_loss": 0.9790318277600694, | |
| "eval_per_var": 0.604084171660959, | |
| "eval_within_var": 0.615415731113251, | |
| "step": 5120 | |
| }, | |
| { | |
| "epoch": 0.236478684587317, | |
| "eval_across_var": 0.006611716227614444, | |
| "eval_bleu": 0.9513684417284821, | |
| "eval_ce_loss": 0.12323284007983121, | |
| "eval_cos_loss": 0.4073775019411627, | |
| "eval_cov": 0.06992845230450913, | |
| "eval_global_var": 0.621496191852169, | |
| "eval_loss": 0.2943976874969321, | |
| "eval_mse_loss": 0.9790318277600694, | |
| "eval_per_var": 0.604084171660959, | |
| "eval_runtime": 150.6714, | |
| "eval_samples_per_second": 185.788, | |
| "eval_steps_per_second": 2.907, | |
| "eval_within_var": 0.615415731113251, | |
| "step": 5120 | |
| }, | |
| { | |
| "epoch": 0.24830261881668284, | |
| "grad_norm": 0.0914112776517868, | |
| "learning_rate": 4.401069561246422e-05, | |
| "loss": 0.3725, | |
| "step": 5376 | |
| }, | |
| { | |
| "epoch": 0.2601265530460487, | |
| "grad_norm": 0.10334540158510208, | |
| "learning_rate": 4.337515714516545e-05, | |
| "loss": 0.3507, | |
| "step": 5632 | |
| }, | |
| { | |
| "epoch": 0.27195048727541454, | |
| "grad_norm": 0.09348654747009277, | |
| "learning_rate": 4.2712673707468434e-05, | |
| "loss": 0.335, | |
| "step": 5888 | |
| }, | |
| { | |
| "epoch": 0.2837744215047804, | |
| "grad_norm": 0.08614271879196167, | |
| "learning_rate": 4.202421675210565e-05, | |
| "loss": 0.3182, | |
| "step": 6144 | |
| }, | |
| { | |
| "epoch": 0.2837744215047804, | |
| "eval_across_var": 0.006925537744239313, | |
| "eval_bleu": 0.9630394430269803, | |
| "eval_ce_loss": 0.08711560829181104, | |
| "eval_cos_loss": 0.34602166433312576, | |
| "eval_cov": 0.06972263283925513, | |
| "eval_global_var": 0.6587248501712328, | |
| "eval_loss": 0.23407978237900015, | |
| "eval_mse_loss": 0.8631533832038374, | |
| "eval_per_var": 0.6400832084760274, | |
| "eval_within_var": 0.6524244369981496, | |
| "step": 6144 | |
| }, | |
| { | |
| "epoch": 0.2837744215047804, | |
| "eval_across_var": 0.006925537744239313, | |
| "eval_bleu": 0.9630394430269803, | |
| "eval_ce_loss": 0.08711560829181104, | |
| "eval_cos_loss": 0.34602166433312576, | |
| "eval_cov": 0.06972263283925513, | |
| "eval_global_var": 0.6587248501712328, | |
| "eval_loss": 0.23407978237900015, | |
| "eval_mse_loss": 0.8631533832038374, | |
| "eval_per_var": 0.6400832084760274, | |
| "eval_runtime": 149.2005, | |
| "eval_samples_per_second": 187.62, | |
| "eval_steps_per_second": 2.936, | |
| "eval_within_var": 0.6524244369981496, | |
| "step": 6144 | |
| }, | |
| { | |
| "epoch": 0.2955983557341462, | |
| "grad_norm": 0.08157943934202194, | |
| "learning_rate": 4.131079581886694e-05, | |
| "loss": 0.3007, | |
| "step": 6400 | |
| }, | |
| { | |
| "epoch": 0.30742228996351206, | |
| "grad_norm": 0.08066653460264206, | |
| "learning_rate": 4.057345705423016e-05, | |
| "loss": 0.2877, | |
| "step": 6656 | |
| }, | |
| { | |
| "epoch": 0.3192462241928779, | |
| "grad_norm": 0.08315503597259521, | |
| "learning_rate": 3.981328167731251e-05, | |
| "loss": 0.277, | |
| "step": 6912 | |
| }, | |
| { | |
| "epoch": 0.3310701584222438, | |
| "grad_norm": 0.09480269253253937, | |
| "learning_rate": 3.9031384394391954e-05, | |
| "loss": 0.263, | |
| "step": 7168 | |
| }, | |
| { | |
| "epoch": 0.3310701584222438, | |
| "eval_across_var": 0.007185437002104439, | |
| "eval_bleu": 0.7796619623453035, | |
| "eval_ce_loss": 0.06453313103549557, | |
| "eval_cos_loss": 0.3031023931285562, | |
| "eval_cov": 0.06950845239369292, | |
| "eval_global_var": 0.6862046054509132, | |
| "eval_loss": 0.19471138987911346, | |
| "eval_mse_loss": 0.7849507213455357, | |
| "eval_per_var": 0.6667993275542238, | |
| "eval_within_var": 0.679719850761161, | |
| "step": 7168 | |
| }, | |
| { | |
| "epoch": 0.3310701584222438, | |
| "eval_across_var": 0.007185437002104439, | |
| "eval_bleu": 0.7796619623453035, | |
| "eval_ce_loss": 0.06453313103549557, | |
| "eval_cos_loss": 0.3031023931285562, | |
| "eval_cov": 0.06950845239369292, | |
| "eval_global_var": 0.6862046054509132, | |
| "eval_loss": 0.19471138987911346, | |
| "eval_mse_loss": 0.7849507213455357, | |
| "eval_per_var": 0.6667993275542238, | |
| "eval_runtime": 153.3468, | |
| "eval_samples_per_second": 182.547, | |
| "eval_steps_per_second": 2.856, | |
| "eval_within_var": 0.679719850761161, | |
| "step": 7168 | |
| }, | |
| { | |
| "epoch": 0.34289409265160964, | |
| "grad_norm": 0.08880002796649933, | |
| "learning_rate": 3.822891176432382e-05, | |
| "loss": 0.2551, | |
| "step": 7424 | |
| }, | |
| { | |
| "epoch": 0.3547180268809755, | |
| "grad_norm": 0.0781559944152832, | |
| "learning_rate": 3.7407040517249335e-05, | |
| "loss": 0.2457, | |
| "step": 7680 | |
| }, | |
| { | |
| "epoch": 0.3665419611103413, | |
| "grad_norm": 0.07617169618606567, | |
| "learning_rate": 3.6566975829061614e-05, | |
| "loss": 0.2364, | |
| "step": 7936 | |
| }, | |
| { | |
| "epoch": 0.37836589533970716, | |
| "grad_norm": 0.06962994486093521, | |
| "learning_rate": 3.5709949554159355e-05, | |
| "loss": 0.2276, | |
| "step": 8192 | |
| }, | |
| { | |
| "epoch": 0.37836589533970716, | |
| "eval_across_var": 0.007326101608889084, | |
| "eval_bleu": 0.8712716886983086, | |
| "eval_ce_loss": 0.05016031793688666, | |
| "eval_cos_loss": 0.2737333217644256, | |
| "eval_cov": 0.06918223481200057, | |
| "eval_global_var": 0.7070301352026256, | |
| "eval_loss": 0.16903498234678077, | |
| "eval_mse_loss": 0.7350932731203836, | |
| "eval_per_var": 0.6870072595605022, | |
| "eval_within_var": 0.7004638244844463, | |
| "step": 8192 | |
| }, | |
| { | |
| "epoch": 0.37836589533970716, | |
| "eval_across_var": 0.007326101608889084, | |
| "eval_bleu": 0.8712716886983086, | |
| "eval_ce_loss": 0.05016031793688666, | |
| "eval_cos_loss": 0.2737333217644256, | |
| "eval_cov": 0.06918223481200057, | |
| "eval_global_var": 0.7070301352026256, | |
| "eval_loss": 0.16903498234678077, | |
| "eval_mse_loss": 0.7350932731203836, | |
| "eval_per_var": 0.6870072595605022, | |
| "eval_runtime": 253.015, | |
| "eval_samples_per_second": 110.638, | |
| "eval_steps_per_second": 1.731, | |
| "eval_within_var": 0.7004638244844463, | |
| "step": 8192 | |
| }, | |
| { | |
| "epoch": 0.390189829569073, | |
| "grad_norm": 0.07793359458446503, | |
| "learning_rate": 3.483721841907964e-05, | |
| "loss": 0.2218, | |
| "step": 8448 | |
| }, | |
| { | |
| "epoch": 0.4020137637984389, | |
| "grad_norm": 0.09409929066896439, | |
| "learning_rate": 3.395006217965885e-05, | |
| "loss": 0.2157, | |
| "step": 8704 | |
| }, | |
| { | |
| "epoch": 0.41383769802780473, | |
| "grad_norm": 0.07621984928846359, | |
| "learning_rate": 3.3049781744423665e-05, | |
| "loss": 0.2083, | |
| "step": 8960 | |
| }, | |
| { | |
| "epoch": 0.4256616322571706, | |
| "grad_norm": 0.07183349132537842, | |
| "learning_rate": 3.213769726696439e-05, | |
| "loss": 0.2036, | |
| "step": 9216 | |
| }, | |
| { | |
| "epoch": 0.4256616322571706, | |
| "eval_across_var": 0.007464996506098539, | |
| "eval_bleu": 0.8745999403603312, | |
| "eval_ce_loss": 0.04043501646568378, | |
| "eval_cos_loss": 0.2525300460114871, | |
| "eval_cov": 0.0689811532355879, | |
| "eval_global_var": 0.7244934360730594, | |
| "eval_loss": 0.15128187069745913, | |
| "eval_mse_loss": 0.7017567277498985, | |
| "eval_per_var": 0.703834011130137, | |
| "eval_within_var": 0.717845245039082, | |
| "step": 9216 | |
| }, | |
| { | |
| "epoch": 0.4256616322571706, | |
| "eval_across_var": 0.007464996506098539, | |
| "eval_bleu": 0.8745999403603312, | |
| "eval_ce_loss": 0.04043501646568378, | |
| "eval_cos_loss": 0.2525300460114871, | |
| "eval_cov": 0.0689811532355879, | |
| "eval_global_var": 0.7244934360730594, | |
| "eval_loss": 0.15128187069745913, | |
| "eval_mse_loss": 0.7017567277498985, | |
| "eval_per_var": 0.703834011130137, | |
| "eval_runtime": 147.2422, | |
| "eval_samples_per_second": 190.115, | |
| "eval_steps_per_second": 2.975, | |
| "eval_within_var": 0.717845245039082, | |
| "step": 9216 | |
| }, | |
| { | |
| "epoch": 0.4374855664865364, | |
| "grad_norm": 0.07419689744710922, | |
| "learning_rate": 3.121514621008757e-05, | |
| "loss": 0.1988, | |
| "step": 9472 | |
| }, | |
| { | |
| "epoch": 0.44930950071590225, | |
| "grad_norm": 0.07019222527742386, | |
| "learning_rate": 3.0283481384586697e-05, | |
| "loss": 0.1953, | |
| "step": 9728 | |
| }, | |
| { | |
| "epoch": 0.4611334349452681, | |
| "grad_norm": 0.08114204555749893, | |
| "learning_rate": 2.9344068965507027e-05, | |
| "loss": 0.1903, | |
| "step": 9984 | |
| }, | |
| { | |
| "epoch": 0.472957369174634, | |
| "grad_norm": 0.07998168468475342, | |
| "learning_rate": 2.839828648881323e-05, | |
| "loss": 0.1878, | |
| "step": 10240 | |
| }, | |
| { | |
| "epoch": 0.472957369174634, | |
| "eval_across_var": 0.00757191778101095, | |
| "eval_bleu": 0.8768266780689754, | |
| "eval_ce_loss": 0.033580983376564226, | |
| "eval_cos_loss": 0.23719462929250987, | |
| "eval_cov": 0.06886953414847317, | |
| "eval_global_var": 0.7387516944920092, | |
| "eval_loss": 0.1387409362856928, | |
| "eval_mse_loss": 0.6800311979365675, | |
| "eval_per_var": 0.7177277308076484, | |
| "eval_within_var": 0.7320368596135753, | |
| "step": 10240 | |
| }, | |
| { | |
| "epoch": 0.472957369174634, | |
| "eval_across_var": 0.00757191778101095, | |
| "eval_bleu": 0.8768266780689754, | |
| "eval_ce_loss": 0.033580983376564226, | |
| "eval_cos_loss": 0.23719462929250987, | |
| "eval_cov": 0.06886953414847317, | |
| "eval_global_var": 0.7387516944920092, | |
| "eval_loss": 0.1387409362856928, | |
| "eval_mse_loss": 0.6800311979365675, | |
| "eval_per_var": 0.7177277308076484, | |
| "eval_runtime": 156.8381, | |
| "eval_samples_per_second": 178.483, | |
| "eval_steps_per_second": 2.793, | |
| "eval_within_var": 0.7320368596135753, | |
| "step": 10240 | |
| }, | |
| { | |
| "epoch": 0.48478130340399983, | |
| "grad_norm": 0.08175234496593475, | |
| "learning_rate": 2.7447520831397623e-05, | |
| "loss": 0.1832, | |
| "step": 10496 | |
| }, | |
| { | |
| "epoch": 0.49660523763336567, | |
| "grad_norm": 0.07749126106500626, | |
| "learning_rate": 2.6493166177391138e-05, | |
| "loss": 0.1793, | |
| "step": 10752 | |
| }, | |
| { | |
| "epoch": 0.5084291718627315, | |
| "grad_norm": 0.06436038762331009, | |
| "learning_rate": 2.5536621973758952e-05, | |
| "loss": 0.1781, | |
| "step": 11008 | |
| }, | |
| { | |
| "epoch": 0.5202531060920974, | |
| "grad_norm": 0.08284164220094681, | |
| "learning_rate": 2.4579290878178904e-05, | |
| "loss": 0.1743, | |
| "step": 11264 | |
| }, | |
| { | |
| "epoch": 0.5202531060920974, | |
| "eval_across_var": 0.00768219434962391, | |
| "eval_bleu": 0.87828493142736, | |
| "eval_ce_loss": 0.028931678281288984, | |
| "eval_cos_loss": 0.22550881647219942, | |
| "eval_cov": 0.06867876444777397, | |
| "eval_global_var": 0.7501025613584474, | |
| "eval_loss": 0.1298226385492168, | |
| "eval_mse_loss": 0.6647662283623055, | |
| "eval_per_var": 0.7285691352739726, | |
| "eval_within_var": 0.7433075608183805, | |
| "step": 11264 | |
| }, | |
| { | |
| "epoch": 0.5202531060920974, | |
| "eval_across_var": 0.00768219434962391, | |
| "eval_bleu": 0.87828493142736, | |
| "eval_ce_loss": 0.028931678281288984, | |
| "eval_cos_loss": 0.22550881647219942, | |
| "eval_cov": 0.06867876444777397, | |
| "eval_global_var": 0.7501025613584474, | |
| "eval_loss": 0.1298226385492168, | |
| "eval_mse_loss": 0.6647662283623055, | |
| "eval_per_var": 0.7285691352739726, | |
| "eval_runtime": 147.0377, | |
| "eval_samples_per_second": 190.38, | |
| "eval_steps_per_second": 2.979, | |
| "eval_within_var": 0.7433075608183805, | |
| "step": 11264 | |
| }, | |
| { | |
| "epoch": 0.5320770403214632, | |
| "grad_norm": 0.08053084462881088, | |
| "learning_rate": 2.362257670221181e-05, | |
| "loss": 0.1714, | |
| "step": 11520 | |
| }, | |
| { | |
| "epoch": 0.5439009745508291, | |
| "grad_norm": 0.06910347938537598, | |
| "learning_rate": 2.2667882352779608e-05, | |
| "loss": 0.1699, | |
| "step": 11776 | |
| }, | |
| { | |
| "epoch": 0.5557249087801949, | |
| "grad_norm": 0.06619574874639511, | |
| "learning_rate": 2.17166077749702e-05, | |
| "loss": 0.1677, | |
| "step": 12032 | |
| }, | |
| { | |
| "epoch": 0.5675488430095608, | |
| "grad_norm": 0.07791094481945038, | |
| "learning_rate": 2.0770147899185404e-05, | |
| "loss": 0.1665, | |
| "step": 12288 | |
| }, | |
| { | |
| "epoch": 0.5675488430095608, | |
| "eval_across_var": 0.007755262901598256, | |
| "eval_bleu": 0.8793172460885812, | |
| "eval_ce_loss": 0.02565789532520251, | |
| "eval_cos_loss": 0.2167755546022768, | |
| "eval_cov": 0.06850555280572203, | |
| "eval_global_var": 0.759103435359589, | |
| "eval_loss": 0.12342031796773274, | |
| "eval_mse_loss": 0.6545950485963256, | |
| "eval_per_var": 0.73747079230879, | |
| "eval_within_var": 0.7522586970568792, | |
| "step": 12288 | |
| }, | |
| { | |
| "epoch": 0.5675488430095608, | |
| "eval_across_var": 0.007755262901598256, | |
| "eval_bleu": 0.8793172460885812, | |
| "eval_ce_loss": 0.02565789532520251, | |
| "eval_cos_loss": 0.2167755546022768, | |
| "eval_cov": 0.06850555280572203, | |
| "eval_global_var": 0.759103435359589, | |
| "eval_loss": 0.12342031796773274, | |
| "eval_mse_loss": 0.6545950485963256, | |
| "eval_per_var": 0.73747079230879, | |
| "eval_runtime": 145.6821, | |
| "eval_samples_per_second": 192.151, | |
| "eval_steps_per_second": 3.007, | |
| "eval_within_var": 0.7522586970568792, | |
| "step": 12288 | |
| } | |
| ], | |
| "logging_steps": 256, | |
| "max_steps": 21651, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 1024, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 64, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |