| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 8.35509138381201, | |
| "eval_steps": 500, | |
| "global_step": 200, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.4177545691906005, | |
| "grad_norm": 0.28227752447128296, | |
| "learning_rate": 2.9999999999999997e-05, | |
| "loss": 4.1508, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.835509138381201, | |
| "grad_norm": 0.31433430314064026, | |
| "learning_rate": 5.9999999999999995e-05, | |
| "loss": 4.1593, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 1.2532637075718016, | |
| "grad_norm": 0.3350953161716461, | |
| "learning_rate": 8.999999999999999e-05, | |
| "loss": 4.0414, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 1.671018276762402, | |
| "grad_norm": 0.2885706126689911, | |
| "learning_rate": 0.00011999999999999999, | |
| "loss": 3.8411, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 2.0887728459530024, | |
| "grad_norm": 0.23711609840393066, | |
| "learning_rate": 0.00015, | |
| "loss": 3.6434, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 2.506527415143603, | |
| "grad_norm": 0.21583135426044464, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 3.4636, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 2.9242819843342036, | |
| "grad_norm": 0.18754692375659943, | |
| "learning_rate": 0.00020999999999999998, | |
| "loss": 3.3154, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 3.342036553524804, | |
| "grad_norm": 0.15951760113239288, | |
| "learning_rate": 0.00023999999999999998, | |
| "loss": 3.2195, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 3.759791122715405, | |
| "grad_norm": 0.14639759063720703, | |
| "learning_rate": 0.00027, | |
| "loss": 3.122, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 4.177545691906005, | |
| "grad_norm": 0.1860765665769577, | |
| "learning_rate": 0.0003, | |
| "loss": 3.0677, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 4.595300261096606, | |
| "grad_norm": 0.1737535446882248, | |
| "learning_rate": 0.000285, | |
| "loss": 2.9992, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 5.013054830287206, | |
| "grad_norm": 0.181383416056633, | |
| "learning_rate": 0.00027, | |
| "loss": 2.9761, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 5.430809399477806, | |
| "grad_norm": 0.1873219609260559, | |
| "learning_rate": 0.00025499999999999996, | |
| "loss": 2.9281, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 5.848563968668407, | |
| "grad_norm": 0.19864186644554138, | |
| "learning_rate": 0.00023999999999999998, | |
| "loss": 2.9168, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 6.266318537859008, | |
| "grad_norm": 0.22326301038265228, | |
| "learning_rate": 0.000225, | |
| "loss": 2.8549, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 6.684073107049608, | |
| "grad_norm": 0.2200121283531189, | |
| "learning_rate": 0.00020999999999999998, | |
| "loss": 2.855, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 7.101827676240209, | |
| "grad_norm": 0.2546086311340332, | |
| "learning_rate": 0.000195, | |
| "loss": 2.8509, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 7.51958224543081, | |
| "grad_norm": 0.26345309615135193, | |
| "learning_rate": 0.00017999999999999998, | |
| "loss": 2.8144, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 7.93733681462141, | |
| "grad_norm": 0.21533280611038208, | |
| "learning_rate": 0.000165, | |
| "loss": 2.8006, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 8.35509138381201, | |
| "grad_norm": 0.2510657012462616, | |
| "learning_rate": 0.00015, | |
| "loss": 2.7816, | |
| "step": 200 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 300, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 14, | |
| "save_steps": 100, | |
| "total_flos": 3.235965641452339e+16, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |