| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 4.0, | |
| "eval_steps": 50, | |
| "global_step": 690, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2898550724637681, | |
| "grad_norm": 0.907157838344574, | |
| "learning_rate": 7.267441860465117e-06, | |
| "loss": 1.0645, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.5797101449275363, | |
| "grad_norm": 0.44142013788223267, | |
| "learning_rate": 1.4389534883720932e-05, | |
| "loss": 0.7285, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 0.5562382340431213, | |
| "learning_rate": 2.1656976744186048e-05, | |
| "loss": 0.6372, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.9971014492753624, | |
| "eval_loss": 0.7050720453262329, | |
| "eval_runtime": 45.461, | |
| "eval_samples_per_second": 3.366, | |
| "eval_steps_per_second": 0.44, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 1.1594202898550725, | |
| "grad_norm": 0.5781270861625671, | |
| "learning_rate": 2.8924418604651166e-05, | |
| "loss": 0.6127, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.4492753623188406, | |
| "grad_norm": 0.6822673082351685, | |
| "learning_rate": 3.619186046511628e-05, | |
| "loss": 0.6025, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 1.7391304347826086, | |
| "grad_norm": 0.6946871280670166, | |
| "learning_rate": 4.34593023255814e-05, | |
| "loss": 0.6322, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 0.6602075099945068, | |
| "eval_runtime": 45.4656, | |
| "eval_samples_per_second": 3.365, | |
| "eval_steps_per_second": 0.44, | |
| "step": 345 | |
| }, | |
| { | |
| "epoch": 2.028985507246377, | |
| "grad_norm": 0.5847777128219604, | |
| "learning_rate": 4.981831395348838e-05, | |
| "loss": 0.6062, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 2.318840579710145, | |
| "grad_norm": 0.8614991307258606, | |
| "learning_rate": 4.8001453488372095e-05, | |
| "loss": 0.6037, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.608695652173913, | |
| "grad_norm": 0.5000740885734558, | |
| "learning_rate": 4.618459302325582e-05, | |
| "loss": 0.5691, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 2.898550724637681, | |
| "grad_norm": 0.7337203621864319, | |
| "learning_rate": 4.436773255813953e-05, | |
| "loss": 0.541, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 2.9971014492753625, | |
| "eval_loss": 0.6448820233345032, | |
| "eval_runtime": 45.4756, | |
| "eval_samples_per_second": 3.364, | |
| "eval_steps_per_second": 0.44, | |
| "step": 517 | |
| }, | |
| { | |
| "epoch": 3.1884057971014492, | |
| "grad_norm": 0.5098931789398193, | |
| "learning_rate": 4.255087209302326e-05, | |
| "loss": 0.5184, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 3.4782608695652173, | |
| "grad_norm": 0.5694060921669006, | |
| "learning_rate": 4.073401162790698e-05, | |
| "loss": 0.5301, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 3.7681159420289854, | |
| "grad_norm": 0.5817595720291138, | |
| "learning_rate": 3.89171511627907e-05, | |
| "loss": 0.593, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 4.0, | |
| "eval_loss": 0.6387717127799988, | |
| "eval_runtime": 45.4728, | |
| "eval_samples_per_second": 3.365, | |
| "eval_steps_per_second": 0.44, | |
| "step": 690 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 1720, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "total_flos": 4.715113760321864e+17, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |