| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 17.219321148825067, |
| "eval_steps": 500, |
| "global_step": 400, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.4177545691906005, |
| "grad_norm": 0.28227752447128296, |
| "learning_rate": 2.9999999999999997e-05, |
| "loss": 4.1508, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.835509138381201, |
| "grad_norm": 0.31433430314064026, |
| "learning_rate": 5.9999999999999995e-05, |
| "loss": 4.1593, |
| "step": 20 |
| }, |
| { |
| "epoch": 1.2532637075718016, |
| "grad_norm": 0.3350953161716461, |
| "learning_rate": 8.999999999999999e-05, |
| "loss": 4.0414, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.671018276762402, |
| "grad_norm": 0.2885706126689911, |
| "learning_rate": 0.00011999999999999999, |
| "loss": 3.8411, |
| "step": 40 |
| }, |
| { |
| "epoch": 2.0887728459530024, |
| "grad_norm": 0.23711609840393066, |
| "learning_rate": 0.00015, |
| "loss": 3.6434, |
| "step": 50 |
| }, |
| { |
| "epoch": 2.506527415143603, |
| "grad_norm": 0.21583135426044464, |
| "learning_rate": 0.00017999999999999998, |
| "loss": 3.4636, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.9242819843342036, |
| "grad_norm": 0.18754692375659943, |
| "learning_rate": 0.00020999999999999998, |
| "loss": 3.3154, |
| "step": 70 |
| }, |
| { |
| "epoch": 3.342036553524804, |
| "grad_norm": 0.15951760113239288, |
| "learning_rate": 0.00023999999999999998, |
| "loss": 3.2195, |
| "step": 80 |
| }, |
| { |
| "epoch": 3.759791122715405, |
| "grad_norm": 0.14639759063720703, |
| "learning_rate": 0.00027, |
| "loss": 3.122, |
| "step": 90 |
| }, |
| { |
| "epoch": 4.177545691906005, |
| "grad_norm": 0.1860765665769577, |
| "learning_rate": 0.0003, |
| "loss": 3.0677, |
| "step": 100 |
| }, |
| { |
| "epoch": 4.595300261096606, |
| "grad_norm": 0.1737535446882248, |
| "learning_rate": 0.000285, |
| "loss": 2.9992, |
| "step": 110 |
| }, |
| { |
| "epoch": 5.013054830287206, |
| "grad_norm": 0.181383416056633, |
| "learning_rate": 0.00027, |
| "loss": 2.9761, |
| "step": 120 |
| }, |
| { |
| "epoch": 5.430809399477806, |
| "grad_norm": 0.1873219609260559, |
| "learning_rate": 0.00025499999999999996, |
| "loss": 2.9281, |
| "step": 130 |
| }, |
| { |
| "epoch": 5.848563968668407, |
| "grad_norm": 0.19864186644554138, |
| "learning_rate": 0.00023999999999999998, |
| "loss": 2.9168, |
| "step": 140 |
| }, |
| { |
| "epoch": 6.266318537859008, |
| "grad_norm": 0.22326301038265228, |
| "learning_rate": 0.000225, |
| "loss": 2.8549, |
| "step": 150 |
| }, |
| { |
| "epoch": 6.684073107049608, |
| "grad_norm": 0.2200121283531189, |
| "learning_rate": 0.00020999999999999998, |
| "loss": 2.855, |
| "step": 160 |
| }, |
| { |
| "epoch": 7.101827676240209, |
| "grad_norm": 0.2546086311340332, |
| "learning_rate": 0.000195, |
| "loss": 2.8509, |
| "step": 170 |
| }, |
| { |
| "epoch": 7.51958224543081, |
| "grad_norm": 0.26345309615135193, |
| "learning_rate": 0.00017999999999999998, |
| "loss": 2.8144, |
| "step": 180 |
| }, |
| { |
| "epoch": 7.93733681462141, |
| "grad_norm": 0.21533280611038208, |
| "learning_rate": 0.000165, |
| "loss": 2.8006, |
| "step": 190 |
| }, |
| { |
| "epoch": 8.35509138381201, |
| "grad_norm": 0.2510657012462616, |
| "learning_rate": 0.00015, |
| "loss": 2.7816, |
| "step": 200 |
| }, |
| { |
| "epoch": 8.77284595300261, |
| "grad_norm": 0.23468665778636932, |
| "learning_rate": 0.000135, |
| "loss": 2.7762, |
| "step": 210 |
| }, |
| { |
| "epoch": 9.190600522193211, |
| "grad_norm": 0.23014432191848755, |
| "learning_rate": 0.00011999999999999999, |
| "loss": 2.7731, |
| "step": 220 |
| }, |
| { |
| "epoch": 9.608355091383812, |
| "grad_norm": 0.247611865401268, |
| "learning_rate": 0.00010499999999999999, |
| "loss": 2.742, |
| "step": 230 |
| }, |
| { |
| "epoch": 10.026109660574413, |
| "grad_norm": 0.2899376451969147, |
| "learning_rate": 8.999999999999999e-05, |
| "loss": 2.763, |
| "step": 240 |
| }, |
| { |
| "epoch": 10.443864229765014, |
| "grad_norm": 0.24601446092128754, |
| "learning_rate": 7.5e-05, |
| "loss": 2.7529, |
| "step": 250 |
| }, |
| { |
| "epoch": 10.861618798955613, |
| "grad_norm": 0.2344890832901001, |
| "learning_rate": 5.9999999999999995e-05, |
| "loss": 2.7373, |
| "step": 260 |
| }, |
| { |
| "epoch": 11.279373368146214, |
| "grad_norm": 0.22882166504859924, |
| "learning_rate": 4.4999999999999996e-05, |
| "loss": 2.7427, |
| "step": 270 |
| }, |
| { |
| "epoch": 11.697127937336814, |
| "grad_norm": 0.26199406385421753, |
| "learning_rate": 2.9999999999999997e-05, |
| "loss": 2.6814, |
| "step": 280 |
| }, |
| { |
| "epoch": 12.114882506527415, |
| "grad_norm": 0.2374505251646042, |
| "learning_rate": 1.4999999999999999e-05, |
| "loss": 2.758, |
| "step": 290 |
| }, |
| { |
| "epoch": 12.532637075718016, |
| "grad_norm": 0.2393040806055069, |
| "learning_rate": 0.0, |
| "loss": 2.7284, |
| "step": 300 |
| }, |
| { |
| "epoch": 13.459530026109661, |
| "grad_norm": 0.2965029180049896, |
| "learning_rate": 0.0002668421052631579, |
| "loss": 2.7219, |
| "step": 310 |
| }, |
| { |
| "epoch": 13.877284595300262, |
| "grad_norm": 0.2831190526485443, |
| "learning_rate": 0.0002652631578947368, |
| "loss": 2.7316, |
| "step": 320 |
| }, |
| { |
| "epoch": 14.295039164490861, |
| "grad_norm": 0.29041996598243713, |
| "learning_rate": 0.00026368421052631576, |
| "loss": 2.6869, |
| "step": 330 |
| }, |
| { |
| "epoch": 14.712793733681462, |
| "grad_norm": 0.27796632051467896, |
| "learning_rate": 0.0002621052631578947, |
| "loss": 2.7045, |
| "step": 340 |
| }, |
| { |
| "epoch": 15.130548302872063, |
| "grad_norm": 0.30092301964759827, |
| "learning_rate": 0.0002605263157894737, |
| "loss": 2.6589, |
| "step": 350 |
| }, |
| { |
| "epoch": 15.548302872062663, |
| "grad_norm": 0.33648282289505005, |
| "learning_rate": 0.0002589473684210526, |
| "loss": 2.6811, |
| "step": 360 |
| }, |
| { |
| "epoch": 15.966057441253264, |
| "grad_norm": 0.3513476550579071, |
| "learning_rate": 0.00025736842105263157, |
| "loss": 2.6424, |
| "step": 370 |
| }, |
| { |
| "epoch": 16.383812010443865, |
| "grad_norm": 0.366802453994751, |
| "learning_rate": 0.0002557894736842105, |
| "loss": 2.6225, |
| "step": 380 |
| }, |
| { |
| "epoch": 16.801566579634464, |
| "grad_norm": 0.3507522642612457, |
| "learning_rate": 0.00025421052631578945, |
| "loss": 2.6693, |
| "step": 390 |
| }, |
| { |
| "epoch": 17.219321148825067, |
| "grad_norm": 0.32098060846328735, |
| "learning_rate": 0.00025263157894736836, |
| "loss": 2.6372, |
| "step": 400 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 2000, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 87, |
| "save_steps": 100, |
| "total_flos": 6.460900234089062e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|