| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 6411, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.2, | |
| "grad_norm": 6479046246400.0, | |
| "learning_rate": 3.3255451713395644e-05, | |
| "loss": 31699221108.3091, | |
| "step": 427 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "grad_norm": 8700652158976.0, | |
| "learning_rate": 4.8162593170393484e-05, | |
| "loss": 14378762930.6604, | |
| "step": 854 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "grad_norm": 16.28143310546875, | |
| "learning_rate": 4.446177847113885e-05, | |
| "loss": 3635725784.4309, | |
| "step": 1281 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "grad_norm": 6.706409454345703, | |
| "learning_rate": 4.076096377188421e-05, | |
| "loss": 1017393142.4075, | |
| "step": 1708 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "grad_norm": 9.295268058776855, | |
| "learning_rate": 3.7060149072629574e-05, | |
| "loss": 1230655574.3326, | |
| "step": 2135 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "grad_norm": 7.9164958000183105, | |
| "learning_rate": 3.335933437337493e-05, | |
| "loss": 387735187.4848, | |
| "step": 2562 | |
| }, | |
| { | |
| "epoch": 1.4, | |
| "grad_norm": 18.9678897857666, | |
| "learning_rate": 2.96585196741203e-05, | |
| "loss": 610998305.5738, | |
| "step": 2989 | |
| }, | |
| { | |
| "epoch": 1.6, | |
| "grad_norm": 22.521636962890625, | |
| "learning_rate": 2.595770497486566e-05, | |
| "loss": 286763645.9016, | |
| "step": 3416 | |
| }, | |
| { | |
| "epoch": 1.8, | |
| "grad_norm": 6.2225751876831055, | |
| "learning_rate": 2.2256890275611026e-05, | |
| "loss": 146726173.377, | |
| "step": 3843 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "grad_norm": 4.1397271156311035, | |
| "learning_rate": 1.8556075576356388e-05, | |
| "loss": 153057112.1311, | |
| "step": 4270 | |
| }, | |
| { | |
| "epoch": 2.2, | |
| "grad_norm": 20.3540096282959, | |
| "learning_rate": 1.4855260877101752e-05, | |
| "loss": 167597749.0585, | |
| "step": 4697 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 2.9089903831481934, | |
| "learning_rate": 1.1154446177847114e-05, | |
| "loss": 31491796.2342, | |
| "step": 5124 | |
| }, | |
| { | |
| "epoch": 2.6, | |
| "grad_norm": 11.952803611755371, | |
| "learning_rate": 7.453631478592478e-06, | |
| "loss": 228096436.459, | |
| "step": 5551 | |
| }, | |
| { | |
| "epoch": 2.8, | |
| "grad_norm": 8.380680084228516, | |
| "learning_rate": 3.7528167793378402e-06, | |
| "loss": 170669039.2131, | |
| "step": 5978 | |
| }, | |
| { | |
| "epoch": 3.0, | |
| "grad_norm": 11.055083274841309, | |
| "learning_rate": 5.2002080083203335e-08, | |
| "loss": 77525903.2881, | |
| "step": 6405 | |
| } | |
| ], | |
| "logging_steps": 427, | |
| "max_steps": 6411, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "total_flos": 1.089435891661996e+18, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |