| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 8.361204013377927, | |
| "eval_steps": 500, | |
| "global_step": 7500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 2.8327759197324414e-05, | |
| "loss": 0.2283, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_accuracy": 0.711111111111111, | |
| "eval_loss": 0.18087100982666016, | |
| "eval_runtime": 8.0851, | |
| "eval_samples_per_second": 27.829, | |
| "eval_steps_per_second": 1.237, | |
| "step": 897 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 2.665551839464883e-05, | |
| "loss": 0.2091, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.67, | |
| "learning_rate": 2.4983277591973243e-05, | |
| "loss": 0.2122, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_accuracy": 0.7644444444444445, | |
| "eval_loss": 0.1918957531452179, | |
| "eval_runtime": 8.2002, | |
| "eval_samples_per_second": 27.438, | |
| "eval_steps_per_second": 1.219, | |
| "step": 1794 | |
| }, | |
| { | |
| "epoch": 2.23, | |
| "learning_rate": 2.331103678929766e-05, | |
| "loss": 0.2129, | |
| "step": 2000 | |
| }, | |
| { | |
| "epoch": 2.79, | |
| "learning_rate": 2.1638795986622073e-05, | |
| "loss": 0.1977, | |
| "step": 2500 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "eval_accuracy": 0.7733333333333334, | |
| "eval_loss": 0.1987731158733368, | |
| "eval_runtime": 8.2946, | |
| "eval_samples_per_second": 27.126, | |
| "eval_steps_per_second": 1.206, | |
| "step": 2691 | |
| }, | |
| { | |
| "epoch": 3.34, | |
| "learning_rate": 1.996655518394649e-05, | |
| "loss": 0.1975, | |
| "step": 3000 | |
| }, | |
| { | |
| "epoch": 3.9, | |
| "learning_rate": 1.8294314381270902e-05, | |
| "loss": 0.2049, | |
| "step": 3500 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_accuracy": 0.7377777777777779, | |
| "eval_loss": 0.20192378759384155, | |
| "eval_runtime": 8.5276, | |
| "eval_samples_per_second": 26.385, | |
| "eval_steps_per_second": 1.173, | |
| "step": 3588 | |
| }, | |
| { | |
| "epoch": 4.46, | |
| "learning_rate": 1.662207357859532e-05, | |
| "loss": 0.202, | |
| "step": 4000 | |
| }, | |
| { | |
| "epoch": 5.0, | |
| "eval_accuracy": 0.7733333333333332, | |
| "eval_loss": 0.2126660943031311, | |
| "eval_runtime": 8.5344, | |
| "eval_samples_per_second": 26.364, | |
| "eval_steps_per_second": 1.172, | |
| "step": 4485 | |
| }, | |
| { | |
| "epoch": 5.02, | |
| "learning_rate": 1.4949832775919733e-05, | |
| "loss": 0.1849, | |
| "step": 4500 | |
| }, | |
| { | |
| "epoch": 5.57, | |
| "learning_rate": 1.3277591973244148e-05, | |
| "loss": 0.2007, | |
| "step": 5000 | |
| }, | |
| { | |
| "epoch": 6.0, | |
| "eval_accuracy": 0.7955555555555556, | |
| "eval_loss": 0.20662625133991241, | |
| "eval_runtime": 8.6442, | |
| "eval_samples_per_second": 26.029, | |
| "eval_steps_per_second": 1.157, | |
| "step": 5382 | |
| }, | |
| { | |
| "epoch": 6.13, | |
| "learning_rate": 1.1605351170568563e-05, | |
| "loss": 0.1798, | |
| "step": 5500 | |
| }, | |
| { | |
| "epoch": 6.69, | |
| "learning_rate": 9.933110367892978e-06, | |
| "loss": 0.1888, | |
| "step": 6000 | |
| }, | |
| { | |
| "epoch": 7.0, | |
| "eval_accuracy": 0.7911111111111111, | |
| "eval_loss": 0.20552584528923035, | |
| "eval_runtime": 8.6299, | |
| "eval_samples_per_second": 26.072, | |
| "eval_steps_per_second": 1.159, | |
| "step": 6279 | |
| }, | |
| { | |
| "epoch": 7.25, | |
| "learning_rate": 8.260869565217392e-06, | |
| "loss": 0.1983, | |
| "step": 6500 | |
| }, | |
| { | |
| "epoch": 7.8, | |
| "learning_rate": 6.588628762541806e-06, | |
| "loss": 0.1844, | |
| "step": 7000 | |
| }, | |
| { | |
| "epoch": 8.0, | |
| "eval_accuracy": 0.8, | |
| "eval_loss": 0.21319705247879028, | |
| "eval_runtime": 8.5588, | |
| "eval_samples_per_second": 26.289, | |
| "eval_steps_per_second": 1.168, | |
| "step": 7176 | |
| }, | |
| { | |
| "epoch": 8.36, | |
| "learning_rate": 4.916387959866221e-06, | |
| "loss": 0.176, | |
| "step": 7500 | |
| } | |
| ], | |
| "logging_steps": 500, | |
| "max_steps": 8970, | |
| "num_train_epochs": 10, | |
| "save_steps": 1500, | |
| "total_flos": 1834764136380000.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |