| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 12.532637075718016, |
| "eval_steps": 500, |
| "global_step": 300, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.4177545691906005, |
| "grad_norm": 0.28227752447128296, |
| "learning_rate": 2.9999999999999997e-05, |
| "loss": 4.1508, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.835509138381201, |
| "grad_norm": 0.31433430314064026, |
| "learning_rate": 5.9999999999999995e-05, |
| "loss": 4.1593, |
| "step": 20 |
| }, |
| { |
| "epoch": 1.2532637075718016, |
| "grad_norm": 0.3350953161716461, |
| "learning_rate": 8.999999999999999e-05, |
| "loss": 4.0414, |
| "step": 30 |
| }, |
| { |
| "epoch": 1.671018276762402, |
| "grad_norm": 0.2885706126689911, |
| "learning_rate": 0.00011999999999999999, |
| "loss": 3.8411, |
| "step": 40 |
| }, |
| { |
| "epoch": 2.0887728459530024, |
| "grad_norm": 0.23711609840393066, |
| "learning_rate": 0.00015, |
| "loss": 3.6434, |
| "step": 50 |
| }, |
| { |
| "epoch": 2.506527415143603, |
| "grad_norm": 0.21583135426044464, |
| "learning_rate": 0.00017999999999999998, |
| "loss": 3.4636, |
| "step": 60 |
| }, |
| { |
| "epoch": 2.9242819843342036, |
| "grad_norm": 0.18754692375659943, |
| "learning_rate": 0.00020999999999999998, |
| "loss": 3.3154, |
| "step": 70 |
| }, |
| { |
| "epoch": 3.342036553524804, |
| "grad_norm": 0.15951760113239288, |
| "learning_rate": 0.00023999999999999998, |
| "loss": 3.2195, |
| "step": 80 |
| }, |
| { |
| "epoch": 3.759791122715405, |
| "grad_norm": 0.14639759063720703, |
| "learning_rate": 0.00027, |
| "loss": 3.122, |
| "step": 90 |
| }, |
| { |
| "epoch": 4.177545691906005, |
| "grad_norm": 0.1860765665769577, |
| "learning_rate": 0.0003, |
| "loss": 3.0677, |
| "step": 100 |
| }, |
| { |
| "epoch": 4.595300261096606, |
| "grad_norm": 0.1737535446882248, |
| "learning_rate": 0.000285, |
| "loss": 2.9992, |
| "step": 110 |
| }, |
| { |
| "epoch": 5.013054830287206, |
| "grad_norm": 0.181383416056633, |
| "learning_rate": 0.00027, |
| "loss": 2.9761, |
| "step": 120 |
| }, |
| { |
| "epoch": 5.430809399477806, |
| "grad_norm": 0.1873219609260559, |
| "learning_rate": 0.00025499999999999996, |
| "loss": 2.9281, |
| "step": 130 |
| }, |
| { |
| "epoch": 5.848563968668407, |
| "grad_norm": 0.19864186644554138, |
| "learning_rate": 0.00023999999999999998, |
| "loss": 2.9168, |
| "step": 140 |
| }, |
| { |
| "epoch": 6.266318537859008, |
| "grad_norm": 0.22326301038265228, |
| "learning_rate": 0.000225, |
| "loss": 2.8549, |
| "step": 150 |
| }, |
| { |
| "epoch": 6.684073107049608, |
| "grad_norm": 0.2200121283531189, |
| "learning_rate": 0.00020999999999999998, |
| "loss": 2.855, |
| "step": 160 |
| }, |
| { |
| "epoch": 7.101827676240209, |
| "grad_norm": 0.2546086311340332, |
| "learning_rate": 0.000195, |
| "loss": 2.8509, |
| "step": 170 |
| }, |
| { |
| "epoch": 7.51958224543081, |
| "grad_norm": 0.26345309615135193, |
| "learning_rate": 0.00017999999999999998, |
| "loss": 2.8144, |
| "step": 180 |
| }, |
| { |
| "epoch": 7.93733681462141, |
| "grad_norm": 0.21533280611038208, |
| "learning_rate": 0.000165, |
| "loss": 2.8006, |
| "step": 190 |
| }, |
| { |
| "epoch": 8.35509138381201, |
| "grad_norm": 0.2510657012462616, |
| "learning_rate": 0.00015, |
| "loss": 2.7816, |
| "step": 200 |
| }, |
| { |
| "epoch": 8.77284595300261, |
| "grad_norm": 0.23468665778636932, |
| "learning_rate": 0.000135, |
| "loss": 2.7762, |
| "step": 210 |
| }, |
| { |
| "epoch": 9.190600522193211, |
| "grad_norm": 0.23014432191848755, |
| "learning_rate": 0.00011999999999999999, |
| "loss": 2.7731, |
| "step": 220 |
| }, |
| { |
| "epoch": 9.608355091383812, |
| "grad_norm": 0.247611865401268, |
| "learning_rate": 0.00010499999999999999, |
| "loss": 2.742, |
| "step": 230 |
| }, |
| { |
| "epoch": 10.026109660574413, |
| "grad_norm": 0.2899376451969147, |
| "learning_rate": 8.999999999999999e-05, |
| "loss": 2.763, |
| "step": 240 |
| }, |
| { |
| "epoch": 10.443864229765014, |
| "grad_norm": 0.24601446092128754, |
| "learning_rate": 7.5e-05, |
| "loss": 2.7529, |
| "step": 250 |
| }, |
| { |
| "epoch": 10.861618798955613, |
| "grad_norm": 0.2344890832901001, |
| "learning_rate": 5.9999999999999995e-05, |
| "loss": 2.7373, |
| "step": 260 |
| }, |
| { |
| "epoch": 11.279373368146214, |
| "grad_norm": 0.22882166504859924, |
| "learning_rate": 4.4999999999999996e-05, |
| "loss": 2.7427, |
| "step": 270 |
| }, |
| { |
| "epoch": 11.697127937336814, |
| "grad_norm": 0.26199406385421753, |
| "learning_rate": 2.9999999999999997e-05, |
| "loss": 2.6814, |
| "step": 280 |
| }, |
| { |
| "epoch": 12.114882506527415, |
| "grad_norm": 0.2374505251646042, |
| "learning_rate": 1.4999999999999999e-05, |
| "loss": 2.758, |
| "step": 290 |
| }, |
| { |
| "epoch": 12.532637075718016, |
| "grad_norm": 0.2393040806055069, |
| "learning_rate": 0.0, |
| "loss": 2.7284, |
| "step": 300 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 300, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 14, |
| "save_steps": 100, |
| "total_flos": 4.839745858893619e+16, |
| "train_batch_size": 4, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|