| { | |
| "best_metric": 5.84649133682251, | |
| "best_model_checkpoint": "./results/models/checkpoint-23424", | |
| "epoch": 12.0, | |
| "eval_steps": 500, | |
| "global_step": 23424, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 0.009948770491803278, | |
| "loss": 5.9831, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.009897540983606557, | |
| "loss": 5.9498, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 0.009846311475409836, | |
| "loss": 5.9616, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 5.937765121459961, | |
| "eval_runtime": 3.4294, | |
| "eval_samples_per_second": 291.596, | |
| "eval_steps_per_second": 0.583, | |
| "step": 1952 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 0.009795081967213116, | |
| "loss": 5.9374, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 1.28, | |
| "learning_rate": 0.009743852459016393, | |
| "loss": 5.9391, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 1.54, | |
| "learning_rate": 0.009692622950819673, | |
| "loss": 5.9287, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 1.79, | |
| "learning_rate": 0.00964139344262295, | |
| "loss": 5.9149, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 5.907990455627441, | |
| "eval_runtime": 3.1854, | |
| "eval_samples_per_second": 313.928, | |
| "eval_steps_per_second": 0.628, | |
| "step": 3904 | |
| }, | |
| { | |
| "epoch": 2.05, | |
| "learning_rate": 0.00959016393442623, | |
| "loss": 5.9179, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 2.31, | |
| "learning_rate": 0.009538934426229509, | |
| "loss": 5.9105, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 2.56, | |
| "learning_rate": 0.009487704918032787, | |
| "loss": 5.9086, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 2.82, | |
| "learning_rate": 0.009436475409836066, | |
| "loss": 5.899, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_loss": 5.894569396972656, | |
| "eval_runtime": 3.0831, | |
| "eval_samples_per_second": 324.347, | |
| "eval_steps_per_second": 0.649, | |
| "step": 5856 | |
| }, | |
| { | |
| "epoch": 3.07, | |
| "learning_rate": 0.009385245901639345, | |
| "loss": 5.8975, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 3.33, | |
| "learning_rate": 0.009334016393442625, | |
| "loss": 5.8936, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 3.59, | |
| "learning_rate": 0.009282786885245902, | |
| "loss": 5.8886, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 3.84, | |
| "learning_rate": 0.00923155737704918, | |
| "loss": 5.8882, | |
| "step": 7500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 5.879088878631592, | |
| "eval_runtime": 3.2638, | |
| "eval_samples_per_second": 306.393, | |
| "eval_steps_per_second": 0.613, | |
| "step": 7808 | |
| }, | |
| { | |
| "epoch": 4.1, | |
| "learning_rate": 0.009180327868852459, | |
| "loss": 5.883, | |
| "step": 8000 | |
| }, | |
| { | |
| "epoch": 4.35, | |
| "learning_rate": 0.009129098360655738, | |
| "loss": 5.879, | |
| "step": 8500 | |
| }, | |
| { | |
| "epoch": 4.61, | |
| "learning_rate": 0.009077868852459018, | |
| "loss": 5.8752, | |
| "step": 9000 | |
| }, | |
| { | |
| "epoch": 4.87, | |
| "learning_rate": 0.009026639344262295, | |
| "loss": 5.8742, | |
| "step": 9500 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_loss": 5.866305828094482, | |
| "eval_runtime": 3.2, | |
| "eval_samples_per_second": 312.496, | |
| "eval_steps_per_second": 0.625, | |
| "step": 9760 | |
| }, | |
| { | |
| "epoch": 5.12, | |
| "learning_rate": 0.008975409836065575, | |
| "loss": 5.8721, | |
| "step": 10000 | |
| }, | |
| { | |
| "epoch": 5.38, | |
| "learning_rate": 0.008924180327868852, | |
| "loss": 5.8674, | |
| "step": 10500 | |
| }, | |
| { | |
| "epoch": 5.64, | |
| "learning_rate": 0.008872950819672131, | |
| "loss": 5.8722, | |
| "step": 11000 | |
| }, | |
| { | |
| "epoch": 5.89, | |
| "learning_rate": 0.00882172131147541, | |
| "loss": 5.8702, | |
| "step": 11500 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_loss": 5.863623142242432, | |
| "eval_runtime": 3.1064, | |
| "eval_samples_per_second": 321.914, | |
| "eval_steps_per_second": 0.644, | |
| "step": 11712 | |
| }, | |
| { | |
| "epoch": 6.15, | |
| "learning_rate": 0.008770491803278688, | |
| "loss": 5.8684, | |
| "step": 12000 | |
| }, | |
| { | |
| "epoch": 6.4, | |
| "learning_rate": 0.008719262295081968, | |
| "loss": 5.8664, | |
| "step": 12500 | |
| }, | |
| { | |
| "epoch": 6.66, | |
| "learning_rate": 0.008668032786885245, | |
| "loss": 5.8657, | |
| "step": 13000 | |
| }, | |
| { | |
| "epoch": 6.92, | |
| "learning_rate": 0.008616803278688525, | |
| "loss": 5.8616, | |
| "step": 13500 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_loss": 5.859433174133301, | |
| "eval_runtime": 3.1303, | |
| "eval_samples_per_second": 319.454, | |
| "eval_steps_per_second": 0.639, | |
| "step": 13664 | |
| }, | |
| { | |
| "epoch": 7.17, | |
| "learning_rate": 0.008565573770491804, | |
| "loss": 5.8638, | |
| "step": 14000 | |
| }, | |
| { | |
| "epoch": 7.43, | |
| "learning_rate": 0.008514344262295082, | |
| "loss": 5.8628, | |
| "step": 14500 | |
| }, | |
| { | |
| "epoch": 7.68, | |
| "learning_rate": 0.008463114754098361, | |
| "loss": 5.8659, | |
| "step": 15000 | |
| }, | |
| { | |
| "epoch": 7.94, | |
| "learning_rate": 0.008411885245901638, | |
| "loss": 5.8659, | |
| "step": 15500 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_loss": 5.862513065338135, | |
| "eval_runtime": 3.0899, | |
| "eval_samples_per_second": 323.637, | |
| "eval_steps_per_second": 0.647, | |
| "step": 15616 | |
| }, | |
| { | |
| "epoch": 8.2, | |
| "learning_rate": 0.008360655737704918, | |
| "loss": 5.8653, | |
| "step": 16000 | |
| }, | |
| { | |
| "epoch": 8.45, | |
| "learning_rate": 0.008309426229508197, | |
| "loss": 5.8625, | |
| "step": 16500 | |
| }, | |
| { | |
| "epoch": 8.71, | |
| "learning_rate": 0.008258196721311475, | |
| "loss": 5.8673, | |
| "step": 17000 | |
| }, | |
| { | |
| "epoch": 8.97, | |
| "learning_rate": 0.008206967213114754, | |
| "loss": 5.8662, | |
| "step": 17500 | |
| }, | |
| { | |
| "epoch": 9.0, | |
| "eval_loss": 5.861349105834961, | |
| "eval_runtime": 3.0909, | |
| "eval_samples_per_second": 323.532, | |
| "eval_steps_per_second": 0.647, | |
| "step": 17568 | |
| }, | |
| { | |
| "epoch": 9.22, | |
| "learning_rate": 0.008155737704918033, | |
| "loss": 5.8644, | |
| "step": 18000 | |
| }, | |
| { | |
| "epoch": 9.48, | |
| "learning_rate": 0.008104508196721313, | |
| "loss": 5.8615, | |
| "step": 18500 | |
| }, | |
| { | |
| "epoch": 9.73, | |
| "learning_rate": 0.00805327868852459, | |
| "loss": 5.8613, | |
| "step": 19000 | |
| }, | |
| { | |
| "epoch": 9.99, | |
| "learning_rate": 0.00800204918032787, | |
| "loss": 5.8591, | |
| "step": 19500 | |
| }, | |
| { | |
| "epoch": 10.0, | |
| "eval_loss": 5.853126525878906, | |
| "eval_runtime": 3.1333, | |
| "eval_samples_per_second": 319.15, | |
| "eval_steps_per_second": 0.638, | |
| "step": 19520 | |
| }, | |
| { | |
| "epoch": 10.25, | |
| "learning_rate": 0.007950819672131147, | |
| "loss": 5.8565, | |
| "step": 20000 | |
| }, | |
| { | |
| "epoch": 10.5, | |
| "learning_rate": 0.007899590163934427, | |
| "loss": 5.8547, | |
| "step": 20500 | |
| }, | |
| { | |
| "epoch": 10.76, | |
| "learning_rate": 0.007848360655737706, | |
| "loss": 5.8554, | |
| "step": 21000 | |
| }, | |
| { | |
| "epoch": 11.0, | |
| "eval_loss": 5.850937843322754, | |
| "eval_runtime": 3.092, | |
| "eval_samples_per_second": 323.419, | |
| "eval_steps_per_second": 0.647, | |
| "step": 21472 | |
| }, | |
| { | |
| "epoch": 11.01, | |
| "learning_rate": 0.0077971311475409835, | |
| "loss": 5.8557, | |
| "step": 21500 | |
| }, | |
| { | |
| "epoch": 11.27, | |
| "learning_rate": 0.007745901639344263, | |
| "loss": 5.8544, | |
| "step": 22000 | |
| }, | |
| { | |
| "epoch": 11.53, | |
| "learning_rate": 0.007694672131147541, | |
| "loss": 5.8536, | |
| "step": 22500 | |
| }, | |
| { | |
| "epoch": 11.78, | |
| "learning_rate": 0.0076434426229508206, | |
| "loss": 5.8531, | |
| "step": 23000 | |
| }, | |
| { | |
| "epoch": 12.0, | |
| "eval_loss": 5.84649133682251, | |
| "eval_runtime": 3.1942, | |
| "eval_samples_per_second": 313.065, | |
| "eval_steps_per_second": 0.626, | |
| "step": 23424 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 97600, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 50, | |
| "save_steps": 500, | |
| "total_flos": 5.51042373648384e+17, | |
| "train_batch_size": 512, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |