| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.857142857142857, | |
| "eval_steps": 500, | |
| "global_step": 15, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.19, | |
| "grad_norm": 1.4144747257232666, | |
| "learning_rate": 0.0001, | |
| "loss": 4.8168, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "grad_norm": 1.2190855741500854, | |
| "learning_rate": 0.0002, | |
| "loss": 4.422, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "grad_norm": 1.224570870399475, | |
| "learning_rate": 0.00018461538461538463, | |
| "loss": 4.5365, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "grad_norm": 1.3508821725845337, | |
| "learning_rate": 0.00016923076923076923, | |
| "loss": 4.6879, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.95, | |
| "grad_norm": 1.2965322732925415, | |
| "learning_rate": 0.00015384615384615385, | |
| "loss": 4.7769, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 1.14, | |
| "grad_norm": 1.8319215774536133, | |
| "learning_rate": 0.00013846153846153847, | |
| "loss": 4.1008, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 1.33, | |
| "grad_norm": 1.348449468612671, | |
| "learning_rate": 0.0001230769230769231, | |
| "loss": 4.2341, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 1.52, | |
| "grad_norm": 1.84274423122406, | |
| "learning_rate": 0.0001076923076923077, | |
| "loss": 4.4207, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 1.71, | |
| "grad_norm": 1.6617754697799683, | |
| "learning_rate": 9.230769230769232e-05, | |
| "loss": 4.115, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 1.9, | |
| "grad_norm": 2.2886290550231934, | |
| "learning_rate": 7.692307692307693e-05, | |
| "loss": 4.1086, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 2.1, | |
| "grad_norm": 2.4682440757751465, | |
| "learning_rate": 6.153846153846155e-05, | |
| "loss": 3.9574, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 2.29, | |
| "grad_norm": 1.4467887878417969, | |
| "learning_rate": 4.615384615384616e-05, | |
| "loss": 3.8121, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 2.48, | |
| "grad_norm": 1.9620065689086914, | |
| "learning_rate": 3.0769230769230774e-05, | |
| "loss": 3.7146, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 2.67, | |
| "grad_norm": 2.561143636703491, | |
| "learning_rate": 1.5384615384615387e-05, | |
| "loss": 4.0838, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 2.86, | |
| "grad_norm": 2.504836082458496, | |
| "learning_rate": 0.0, | |
| "loss": 3.8844, | |
| "step": 15 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 15, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 186722426290176.0, | |
| "train_batch_size": 5, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |