| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 37, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 0.0001, | |
| "loss": 0.4496, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 0.0002, | |
| "loss": 0.468, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 0.00019959742939952392, | |
| "loss": 1.2381, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 0.00019839295885986296, | |
| "loss": 0.4261, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 0.00019639628606958533, | |
| "loss": 0.5796, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 0.00019362348706397373, | |
| "loss": 0.4525, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 0.0001900968867902419, | |
| "loss": 0.338, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 0.00018584487936018661, | |
| "loss": 0.5157, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 0.00018090169943749476, | |
| "loss": 0.4248, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 0.00017530714660036112, | |
| "loss": 0.3697, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 0.00016910626489868649, | |
| "loss": 0.3531, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 0.00016234898018587337, | |
| "loss": 0.3644, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 0.00015508969814521025, | |
| "loss": 0.3464, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 0.00014738686624729986, | |
| "loss": 0.3658, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 0.00013930250316539238, | |
| "loss": 0.3644, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 0.00013090169943749476, | |
| "loss": 0.4509, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 0.00012225209339563145, | |
| "loss": 0.3305, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 0.00011342332658176555, | |
| "loss": 0.3533, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 0.00010448648303505151, | |
| "loss": 0.3492, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 9.551351696494854e-05, | |
| "loss": 0.355, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 8.657667341823448e-05, | |
| "loss": 0.3802, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 7.774790660436858e-05, | |
| "loss": 0.3821, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 6.909830056250527e-05, | |
| "loss": 0.3527, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 6.069749683460765e-05, | |
| "loss": 0.3324, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 5.261313375270014e-05, | |
| "loss": 0.3403, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 4.491030185478976e-05, | |
| "loss": 0.2953, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 3.7651019814126654e-05, | |
| "loss": 0.4014, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 3.089373510131354e-05, | |
| "loss": 0.3394, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 2.4692853399638917e-05, | |
| "loss": 0.385, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 1.9098300562505266e-05, | |
| "loss": 0.2792, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.415512063981339e-05, | |
| "loss": 0.3258, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 9.903113209758096e-06, | |
| "loss": 0.2917, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 6.37651293602628e-06, | |
| "loss": 0.3328, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 3.6037139304146762e-06, | |
| "loss": 0.3621, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "learning_rate": 1.6070411401370334e-06, | |
| "loss": 0.3362, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 4.025706004760932e-07, | |
| "loss": 0.2866, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 0.0, | |
| "loss": 0.3412, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 37, | |
| "total_flos": 286018191360.0, | |
| "train_loss": 0.39620911108480916, | |
| "train_runtime": 157.9012, | |
| "train_samples_per_second": 1.862, | |
| "train_steps_per_second": 0.234 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 37, | |
| "num_train_epochs": 1, | |
| "save_steps": 50000, | |
| "total_flos": 286018191360.0, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |