| { | |
| "best_metric": 42.9505, | |
| "best_model_checkpoint": "opus_base_adapt_wce_gloss_train-sampled_bands_5_ubweight_1.25/checkpoint-80000", | |
| "epoch": 3.943217665615142, | |
| "global_step": 80000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 1.9753918572555207e-05, | |
| "loss": 0.2034, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "eval_bleu": 35.1893, | |
| "eval_gen_len": 40.3663, | |
| "eval_loss": 0.10897237807512283, | |
| "eval_runtime": 177.0634, | |
| "eval_samples_per_second": 5.891, | |
| "eval_steps_per_second": 0.186, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 1.950746746845426e-05, | |
| "loss": 0.1705, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "eval_bleu": 37.7679, | |
| "eval_gen_len": 37.7632, | |
| "eval_loss": 0.1064961776137352, | |
| "eval_runtime": 157.7862, | |
| "eval_samples_per_second": 6.61, | |
| "eval_steps_per_second": 0.209, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 1.9261016364353314e-05, | |
| "loss": 0.1637, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "eval_bleu": 41.5566, | |
| "eval_gen_len": 34.2157, | |
| "eval_loss": 0.10526148974895477, | |
| "eval_runtime": 141.3674, | |
| "eval_samples_per_second": 7.378, | |
| "eval_steps_per_second": 0.233, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 1.9014565260252367e-05, | |
| "loss": 0.1583, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "eval_bleu": 41.4171, | |
| "eval_gen_len": 35.5072, | |
| "eval_loss": 0.104159876704216, | |
| "eval_runtime": 123.7589, | |
| "eval_samples_per_second": 8.428, | |
| "eval_steps_per_second": 0.267, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 1.876811415615142e-05, | |
| "loss": 0.156, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "eval_bleu": 41.7523, | |
| "eval_gen_len": 35.6031, | |
| "eval_loss": 0.10359229892492294, | |
| "eval_runtime": 148.6643, | |
| "eval_samples_per_second": 7.016, | |
| "eval_steps_per_second": 0.222, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "learning_rate": 1.8521786277602524e-05, | |
| "loss": 0.1446, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.18, | |
| "eval_bleu": 41.5818, | |
| "eval_gen_len": 35.7987, | |
| "eval_loss": 0.10351784527301788, | |
| "eval_runtime": 131.6747, | |
| "eval_samples_per_second": 7.921, | |
| "eval_steps_per_second": 0.251, | |
| "step": 24000 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "learning_rate": 1.8275520011829655e-05, | |
| "loss": 0.1444, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 1.38, | |
| "eval_bleu": 41.7604, | |
| "eval_gen_len": 35.0968, | |
| "eval_loss": 0.1027546375989914, | |
| "eval_runtime": 137.6573, | |
| "eval_samples_per_second": 7.577, | |
| "eval_steps_per_second": 0.24, | |
| "step": 28000 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "learning_rate": 1.8029130520504733e-05, | |
| "loss": 0.144, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 1.58, | |
| "eval_bleu": 41.9671, | |
| "eval_gen_len": 35.488, | |
| "eval_loss": 0.10217112302780151, | |
| "eval_runtime": 124.3235, | |
| "eval_samples_per_second": 8.389, | |
| "eval_steps_per_second": 0.265, | |
| "step": 32000 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "learning_rate": 1.7782802641955836e-05, | |
| "loss": 0.1426, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 1.77, | |
| "eval_bleu": 41.2336, | |
| "eval_gen_len": 36.3039, | |
| "eval_loss": 0.1017456203699112, | |
| "eval_runtime": 112.1734, | |
| "eval_samples_per_second": 9.298, | |
| "eval_steps_per_second": 0.294, | |
| "step": 36000 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "learning_rate": 1.7536413150630914e-05, | |
| "loss": 0.1418, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 1.97, | |
| "eval_bleu": 42.312, | |
| "eval_gen_len": 34.4314, | |
| "eval_loss": 0.10187986493110657, | |
| "eval_runtime": 114.096, | |
| "eval_samples_per_second": 9.141, | |
| "eval_steps_per_second": 0.289, | |
| "step": 40000 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "learning_rate": 1.729008527208202e-05, | |
| "loss": 0.1344, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 2.17, | |
| "eval_bleu": 42.1662, | |
| "eval_gen_len": 35.4765, | |
| "eval_loss": 0.10221881419420242, | |
| "eval_runtime": 123.1753, | |
| "eval_samples_per_second": 8.468, | |
| "eval_steps_per_second": 0.268, | |
| "step": 44000 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "learning_rate": 1.70436957807571e-05, | |
| "loss": 0.1335, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 2.37, | |
| "eval_bleu": 42.1347, | |
| "eval_gen_len": 35.303, | |
| "eval_loss": 0.10202226787805557, | |
| "eval_runtime": 136.0863, | |
| "eval_samples_per_second": 7.664, | |
| "eval_steps_per_second": 0.242, | |
| "step": 48000 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 1.6797367902208205e-05, | |
| "loss": 0.1325, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "eval_bleu": 42.362, | |
| "eval_gen_len": 34.1179, | |
| "eval_loss": 0.10167563706636429, | |
| "eval_runtime": 138.7955, | |
| "eval_samples_per_second": 7.515, | |
| "eval_steps_per_second": 0.238, | |
| "step": 52000 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "learning_rate": 1.6551040023659308e-05, | |
| "loss": 0.1327, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 2.76, | |
| "eval_bleu": 42.0947, | |
| "eval_gen_len": 35.8955, | |
| "eval_loss": 0.10157355666160583, | |
| "eval_runtime": 162.7774, | |
| "eval_samples_per_second": 6.408, | |
| "eval_steps_per_second": 0.203, | |
| "step": 56000 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "learning_rate": 1.630471214511041e-05, | |
| "loss": 0.1331, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 2.96, | |
| "eval_bleu": 42.504, | |
| "eval_gen_len": 34.5292, | |
| "eval_loss": 0.10091494768857956, | |
| "eval_runtime": 143.7894, | |
| "eval_samples_per_second": 7.254, | |
| "eval_steps_per_second": 0.23, | |
| "step": 60000 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "learning_rate": 1.6058384266561514e-05, | |
| "loss": 0.1268, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 3.15, | |
| "eval_bleu": 41.9688, | |
| "eval_gen_len": 33.8217, | |
| "eval_loss": 0.10167799890041351, | |
| "eval_runtime": 128.1156, | |
| "eval_samples_per_second": 8.141, | |
| "eval_steps_per_second": 0.258, | |
| "step": 64000 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "learning_rate": 1.5812118000788645e-05, | |
| "loss": 0.1263, | |
| "step": 68000 | |
| }, | |
| { | |
| "epoch": 3.35, | |
| "eval_bleu": 42.6248, | |
| "eval_gen_len": 34.6213, | |
| "eval_loss": 0.1019180566072464, | |
| "eval_runtime": 136.0479, | |
| "eval_samples_per_second": 7.666, | |
| "eval_steps_per_second": 0.243, | |
| "step": 68000 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "learning_rate": 1.5565728509463723e-05, | |
| "loss": 0.1251, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 3.55, | |
| "eval_bleu": 42.3349, | |
| "eval_gen_len": 35.0364, | |
| "eval_loss": 0.10193591564893723, | |
| "eval_runtime": 101.1208, | |
| "eval_samples_per_second": 10.314, | |
| "eval_steps_per_second": 0.326, | |
| "step": 72000 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "learning_rate": 1.5319400630914826e-05, | |
| "loss": 0.1256, | |
| "step": 76000 | |
| }, | |
| { | |
| "epoch": 3.75, | |
| "eval_bleu": 41.4387, | |
| "eval_gen_len": 37.6692, | |
| "eval_loss": 0.10164961963891983, | |
| "eval_runtime": 125.4417, | |
| "eval_samples_per_second": 8.315, | |
| "eval_steps_per_second": 0.263, | |
| "step": 76000 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "learning_rate": 1.5073011139589906e-05, | |
| "loss": 0.1264, | |
| "step": 80000 | |
| }, | |
| { | |
| "epoch": 3.94, | |
| "eval_bleu": 42.9505, | |
| "eval_gen_len": 34.2924, | |
| "eval_loss": 0.10143885016441345, | |
| "eval_runtime": 116.3426, | |
| "eval_samples_per_second": 8.965, | |
| "eval_steps_per_second": 0.284, | |
| "step": 80000 | |
| } | |
| ], | |
| "max_steps": 324608, | |
| "num_train_epochs": 16, | |
| "total_flos": 1.3796012702564352e+17, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |