| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 8.823529411764707, | |
| "eval_steps": 500, | |
| "global_step": 1500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.5882352941176471, | |
| "grad_norm": 0.6052117347717285, | |
| "learning_rate": 0.000941764705882353, | |
| "loss": 0.4992, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.1764705882352942, | |
| "grad_norm": 0.9228031635284424, | |
| "learning_rate": 0.0008829411764705883, | |
| "loss": 0.3017, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.7647058823529411, | |
| "grad_norm": 0.9054152965545654, | |
| "learning_rate": 0.0008241176470588235, | |
| "loss": 0.2262, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 2.3529411764705883, | |
| "grad_norm": 1.4709264039993286, | |
| "learning_rate": 0.0007652941176470588, | |
| "loss": 0.2054, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 2.9411764705882355, | |
| "grad_norm": 0.9707149267196655, | |
| "learning_rate": 0.0007064705882352941, | |
| "loss": 0.189, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 3.5294117647058822, | |
| "grad_norm": 1.0779001712799072, | |
| "learning_rate": 0.0006476470588235295, | |
| "loss": 0.1946, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 4.117647058823529, | |
| "grad_norm": 0.618778645992279, | |
| "learning_rate": 0.0005888235294117648, | |
| "loss": 0.1707, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 4.705882352941177, | |
| "grad_norm": 0.7503094673156738, | |
| "learning_rate": 0.0005300000000000001, | |
| "loss": 0.1713, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 5.294117647058823, | |
| "grad_norm": 0.9175742268562317, | |
| "learning_rate": 0.00047117647058823533, | |
| "loss": 0.1562, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 5.882352941176471, | |
| "grad_norm": 1.0091431140899658, | |
| "learning_rate": 0.0004123529411764706, | |
| "loss": 0.1596, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 6.470588235294118, | |
| "grad_norm": 0.6883541941642761, | |
| "learning_rate": 0.0003535294117647059, | |
| "loss": 0.1435, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 7.0588235294117645, | |
| "grad_norm": 0.7159141898155212, | |
| "learning_rate": 0.0002947058823529412, | |
| "loss": 0.1385, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 7.647058823529412, | |
| "grad_norm": 0.5263181924819946, | |
| "learning_rate": 0.00023588235294117648, | |
| "loss": 0.1278, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 8.235294117647058, | |
| "grad_norm": 0.7766987681388855, | |
| "learning_rate": 0.00017705882352941178, | |
| "loss": 0.1137, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 8.823529411764707, | |
| "grad_norm": 0.5680007338523865, | |
| "learning_rate": 0.00011823529411764706, | |
| "loss": 0.109, | |
| "step": 1500 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 1700, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 10, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 99203629449216.0, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |