| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9823182711198428, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 8.999464304375242e-06, | |
| "loss": 0.7019, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 8.997857345043108e-06, | |
| "loss": 0.6739, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 8.99517950459963e-06, | |
| "loss": 0.6677, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 8.99143142060366e-06, | |
| "loss": 0.6625, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 8.98661398542506e-06, | |
| "loss": 0.6649, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 8.980728346032255e-06, | |
| "loss": 0.6528, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 8.973775903719142e-06, | |
| "loss": 0.6442, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 8.965758313771466e-06, | |
| "loss": 0.6375, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 8.956677485072722e-06, | |
| "loss": 0.6303, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 8.946535579649664e-06, | |
| "loss": 0.634, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "eval_accuracy": 0.0, | |
| "eval_loss": 0.6457224488258362, | |
| "eval_runtime": 154.2695, | |
| "eval_samples_per_second": 87.989, | |
| "eval_steps_per_second": 14.669, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 8.935335012157564e-06, | |
| "loss": 0.6236, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 8.92307844930531e-06, | |
| "loss": 0.6235, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 8.909768809220503e-06, | |
| "loss": 0.6301, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 8.895409260754679e-06, | |
| "loss": 0.6227, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 8.880003222728855e-06, | |
| "loss": 0.6187, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 8.863554363119547e-06, | |
| "loss": 0.6201, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 8.846066598185479e-06, | |
| "loss": 0.614, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.44, | |
| "learning_rate": 8.827544091535166e-06, | |
| "loss": 0.607, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 8.80799125313562e-06, | |
| "loss": 0.6176, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 8.787412738262386e-06, | |
| "loss": 0.5999, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "eval_accuracy": 0.0, | |
| "eval_loss": 0.6475000977516174, | |
| "eval_runtime": 154.4877, | |
| "eval_samples_per_second": 87.865, | |
| "eval_steps_per_second": 14.648, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 8.765813446391192e-06, | |
| "loss": 0.6055, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 8.743198520031437e-06, | |
| "loss": 0.6142, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 8.719573343501835e-06, | |
| "loss": 0.6006, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 8.694943541648477e-06, | |
| "loss": 0.5965, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 8.669314978505624e-06, | |
| "loss": 0.5951, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 8.642693755899563e-06, | |
| "loss": 0.5695, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 8.615086211995838e-06, | |
| "loss": 0.5758, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 8.586498919790219e-06, | |
| "loss": 0.5733, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 8.556938685543754e-06, | |
| "loss": 0.5728, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 8.526412547162289e-06, | |
| "loss": 0.5616, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "eval_accuracy": 0.0, | |
| "eval_loss": 0.6478520035743713, | |
| "eval_runtime": 154.2746, | |
| "eval_samples_per_second": 87.986, | |
| "eval_steps_per_second": 14.669, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 8.494927772520829e-06, | |
| "loss": 0.5707, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 8.46249185773317e-06, | |
| "loss": 0.5655, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 8.429112525367142e-06, | |
| "loss": 0.5504, | |
| "step": 1650 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 8.394797722605998e-06, | |
| "loss": 0.5526, | |
| "step": 1700 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 8.35955561935627e-06, | |
| "loss": 0.5489, | |
| "step": 1750 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 8.323394606302632e-06, | |
| "loss": 0.5429, | |
| "step": 1800 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 8.28632329291019e-06, | |
| "loss": 0.5427, | |
| "step": 1850 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 8.248350505374668e-06, | |
| "loss": 0.536, | |
| "step": 1900 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 8.20948528452102e-06, | |
| "loss": 0.5352, | |
| "step": 1950 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 8.169736883650921e-06, | |
| "loss": 0.5149, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "eval_accuracy": 0.0, | |
| "eval_loss": 0.6605007648468018, | |
| "eval_runtime": 154.4529, | |
| "eval_samples_per_second": 87.884, | |
| "eval_steps_per_second": 14.652, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 10180, | |
| "num_train_epochs": 5, | |
| "save_steps": 500, | |
| "total_flos": 0.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |