| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.88, | |
| "eval_steps": 500, | |
| "global_step": 30, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.096, | |
| "grad_norm": 5.814257386763718, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 0.8387, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.192, | |
| "grad_norm": 6.057112863362473, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 0.8858, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.288, | |
| "grad_norm": 5.276795260814941, | |
| "learning_rate": 1e-05, | |
| "loss": 0.8479, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.384, | |
| "grad_norm": 2.5116217593740937, | |
| "learning_rate": 9.966191788709716e-06, | |
| "loss": 0.8012, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "grad_norm": 4.289695557521304, | |
| "learning_rate": 9.86522435289912e-06, | |
| "loss": 0.7796, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.576, | |
| "grad_norm": 4.704402355642819, | |
| "learning_rate": 9.698463103929542e-06, | |
| "loss": 0.7985, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.672, | |
| "grad_norm": 5.0676661473868325, | |
| "learning_rate": 9.468163201617063e-06, | |
| "loss": 0.7476, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.768, | |
| "grad_norm": 4.049515326251163, | |
| "learning_rate": 9.177439057064684e-06, | |
| "loss": 0.7624, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.864, | |
| "grad_norm": 2.4656355618004198, | |
| "learning_rate": 8.83022221559489e-06, | |
| "loss": 0.752, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "grad_norm": 1.7973826091618461, | |
| "learning_rate": 8.43120818934367e-06, | |
| "loss": 0.7203, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 1.056, | |
| "grad_norm": 2.5631593338110634, | |
| "learning_rate": 7.985792958513932e-06, | |
| "loss": 1.0135, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 1.152, | |
| "grad_norm": 1.5065858129288634, | |
| "learning_rate": 7.500000000000001e-06, | |
| "loss": 0.6443, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 1.248, | |
| "grad_norm": 1.0711886577452812, | |
| "learning_rate": 6.980398830195785e-06, | |
| "loss": 0.6762, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 1.3439999999999999, | |
| "grad_norm": 0.9064356734426154, | |
| "learning_rate": 6.434016163555452e-06, | |
| "loss": 0.6513, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 1.44, | |
| "grad_norm": 0.8737926206720971, | |
| "learning_rate": 5.8682408883346535e-06, | |
| "loss": 0.6318, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 1.536, | |
| "grad_norm": 0.8225114336765397, | |
| "learning_rate": 5.290724144552379e-06, | |
| "loss": 0.5994, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 1.6320000000000001, | |
| "grad_norm": 0.7178723068757664, | |
| "learning_rate": 4.7092758554476215e-06, | |
| "loss": 0.6365, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 1.728, | |
| "grad_norm": 0.7287121159800549, | |
| "learning_rate": 4.131759111665349e-06, | |
| "loss": 0.5871, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 1.8239999999999998, | |
| "grad_norm": 0.6335416231009494, | |
| "learning_rate": 3.5659838364445505e-06, | |
| "loss": 0.6292, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 1.92, | |
| "grad_norm": 0.5476999099926617, | |
| "learning_rate": 3.019601169804216e-06, | |
| "loss": 0.5608, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 2.016, | |
| "grad_norm": 0.9859250546500561, | |
| "learning_rate": 2.5000000000000015e-06, | |
| "loss": 0.955, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 2.112, | |
| "grad_norm": 0.5566210559921874, | |
| "learning_rate": 2.0142070414860704e-06, | |
| "loss": 0.5769, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 2.208, | |
| "grad_norm": 0.4550458467901826, | |
| "learning_rate": 1.5687918106563326e-06, | |
| "loss": 0.5528, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 2.304, | |
| "grad_norm": 0.49952012286935354, | |
| "learning_rate": 1.1697777844051105e-06, | |
| "loss": 0.6085, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 2.4, | |
| "grad_norm": 0.4493968503925793, | |
| "learning_rate": 8.225609429353187e-07, | |
| "loss": 0.5401, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 2.496, | |
| "grad_norm": 0.5067436127469506, | |
| "learning_rate": 5.318367983829393e-07, | |
| "loss": 0.5971, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 2.592, | |
| "grad_norm": 0.4734369814869573, | |
| "learning_rate": 3.015368960704584e-07, | |
| "loss": 0.613, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 2.6879999999999997, | |
| "grad_norm": 0.4342131854666511, | |
| "learning_rate": 1.3477564710088097e-07, | |
| "loss": 0.5625, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 2.784, | |
| "grad_norm": 0.4151771179874899, | |
| "learning_rate": 3.3808211290284886e-08, | |
| "loss": 0.539, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "grad_norm": 0.4349354473602356, | |
| "learning_rate": 0.0, | |
| "loss": 0.5994, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 2.88, | |
| "step": 30, | |
| "total_flos": 5.991907970357658e+16, | |
| "train_loss": 0.6902768890062968, | |
| "train_runtime": 1865.0314, | |
| "train_samples_per_second": 1.602, | |
| "train_steps_per_second": 0.016 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 30, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.991907970357658e+16, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |