| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.7407407407407407, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 1.9540774886310102, | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 0.8241696953773499, | |
| "learning_rate": 8.032786885245902e-05, | |
| "loss": 1.7451, | |
| "mean_token_accuracy": 0.6189013833552599, | |
| "num_tokens": 379953.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.5046265083923935, | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 4.424446105957031, | |
| "learning_rate": 9.990765991730485e-05, | |
| "loss": 0.4302, | |
| "mean_token_accuracy": 0.8861582314968109, | |
| "num_tokens": 757047.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.24775007627904416, | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.30575165152549744, | |
| "learning_rate": 9.950545603782162e-05, | |
| "loss": 0.2323, | |
| "mean_token_accuracy": 0.940535937026143, | |
| "num_tokens": 1134378.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.1925063591822982, | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.24072137475013733, | |
| "learning_rate": 9.878674879048427e-05, | |
| "loss": 0.1856, | |
| "mean_token_accuracy": 0.9529062640666962, | |
| "num_tokens": 1516152.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.18322961997240783, | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.18854060769081116, | |
| "learning_rate": 9.775613308830824e-05, | |
| "loss": 0.1775, | |
| "mean_token_accuracy": 0.9546041788160801, | |
| "num_tokens": 1896494.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.17187482433393597, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.18019668757915497, | |
| "learning_rate": 9.642019796948866e-05, | |
| "loss": 0.164, | |
| "mean_token_accuracy": 0.9579361644387245, | |
| "num_tokens": 2277194.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.16193925650790333, | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.23759332299232483, | |
| "learning_rate": 9.478748447168449e-05, | |
| "loss": 0.1539, | |
| "mean_token_accuracy": 0.9614081564545631, | |
| "num_tokens": 2658243.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.15039873549714686, | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.2590758204460144, | |
| "learning_rate": 9.28684310265789e-05, | |
| "loss": 0.1443, | |
| "mean_token_accuracy": 0.9650989197194576, | |
| "num_tokens": 3041430.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.14228019634261727, | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.18062612414360046, | |
| "learning_rate": 9.067530672382544e-05, | |
| "loss": 0.1366, | |
| "mean_token_accuracy": 0.9667340110242367, | |
| "num_tokens": 3422368.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.1391275341436267, | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.1984509527683258, | |
| "learning_rate": 8.822213287104348e-05, | |
| "loss": 0.1352, | |
| "mean_token_accuracy": 0.9674024738371372, | |
| "num_tokens": 3801968.0, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.588183505388503e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |