| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.9835082458770614, | |
| "eval_steps": 25, | |
| "global_step": 166, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.13193403298350825, | |
| "grad_norm": 119.62747955322266, | |
| "learning_rate": 1.6000000000000003e-05, | |
| "loss": 60.0685, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.2638680659670165, | |
| "grad_norm": 76.251220703125, | |
| "learning_rate": 1.983619906947144e-05, | |
| "loss": 53.8545, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.29985007496251875, | |
| "eval_loss": 3.3249454498291016, | |
| "eval_runtime": 89.9708, | |
| "eval_samples_per_second": 5.557, | |
| "eval_steps_per_second": 1.856, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.39580209895052476, | |
| "grad_norm": 67.91344451904297, | |
| "learning_rate": 1.9199794436588244e-05, | |
| "loss": 52.8293, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.527736131934033, | |
| "grad_norm": 75.1031265258789, | |
| "learning_rate": 1.811377838556573e-05, | |
| "loss": 52.3377, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.5997001499250375, | |
| "eval_loss": 3.2520751953125, | |
| "eval_runtime": 89.7398, | |
| "eval_samples_per_second": 5.572, | |
| "eval_steps_per_second": 1.861, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6596701649175413, | |
| "grad_norm": 70.66634368896484, | |
| "learning_rate": 1.6631226582407954e-05, | |
| "loss": 52.2969, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7916041979010495, | |
| "grad_norm": 74.07559204101562, | |
| "learning_rate": 1.4824594148071936e-05, | |
| "loss": 51.8169, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.8995502248875562, | |
| "eval_loss": 3.2324743270874023, | |
| "eval_runtime": 90.1521, | |
| "eval_samples_per_second": 5.546, | |
| "eval_steps_per_second": 1.852, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.9235382308845578, | |
| "grad_norm": 72.83753967285156, | |
| "learning_rate": 1.2782174639164528e-05, | |
| "loss": 51.5118, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 1.047976011994003, | |
| "grad_norm": 79.93111419677734, | |
| "learning_rate": 1.0603784974222862e-05, | |
| "loss": 48.0834, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.1799100449775113, | |
| "grad_norm": 89.3459243774414, | |
| "learning_rate": 8.395887191422397e-06, | |
| "loss": 50.378, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.191904047976012, | |
| "eval_loss": 3.2021567821502686, | |
| "eval_runtime": 90.5661, | |
| "eval_samples_per_second": 5.521, | |
| "eval_steps_per_second": 1.844, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.3118440779610194, | |
| "grad_norm": 69.37840270996094, | |
| "learning_rate": 6.266385446673791e-06, | |
| "loss": 50.3322, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 1.4437781109445278, | |
| "grad_norm": 65.35021209716797, | |
| "learning_rate": 4.319352532688444e-06, | |
| "loss": 50.1113, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 1.4917541229385307, | |
| "eval_loss": 3.19496488571167, | |
| "eval_runtime": 90.6364, | |
| "eval_samples_per_second": 5.517, | |
| "eval_steps_per_second": 1.843, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 1.575712143928036, | |
| "grad_norm": 70.10005187988281, | |
| "learning_rate": 2.6499436440367165e-06, | |
| "loss": 50.1493, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 1.707646176911544, | |
| "grad_norm": 68.84884643554688, | |
| "learning_rate": 1.339745962155613e-06, | |
| "loss": 50.1588, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 1.7916041979010495, | |
| "eval_loss": 3.176970958709717, | |
| "eval_runtime": 89.1443, | |
| "eval_samples_per_second": 5.609, | |
| "eval_steps_per_second": 1.873, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.8395802098950524, | |
| "grad_norm": 84.58167266845703, | |
| "learning_rate": 4.5279133491454406e-07, | |
| "loss": 49.9598, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 1.9715142428785608, | |
| "grad_norm": 62.91596984863281, | |
| "learning_rate": 3.242691865790071e-08, | |
| "loss": 49.826, | |
| "step": 165 | |
| } | |
| ], | |
| "logging_steps": 11, | |
| "max_steps": 166, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 50, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.3724710537723904e+17, | |
| "train_batch_size": 3, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |