| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.7407407407407407, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 1.6574764619767666, | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 0.6482250690460205, | |
| "learning_rate": 8.032786885245902e-05, | |
| "loss": 1.4512, | |
| "mean_token_accuracy": 0.6979378089308739, | |
| "num_tokens": 408597.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.38226877618581057, | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 0.2513386011123657, | |
| "learning_rate": 9.990765991730485e-05, | |
| "loss": 0.3105, | |
| "mean_token_accuracy": 0.92415526881814, | |
| "num_tokens": 817567.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.16286218419671059, | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.2887406051158905, | |
| "learning_rate": 9.950545603782162e-05, | |
| "loss": 0.1493, | |
| "mean_token_accuracy": 0.9622085580229759, | |
| "num_tokens": 1226265.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.12771075297147036, | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.15249371528625488, | |
| "learning_rate": 9.878674879048427e-05, | |
| "loss": 0.1177, | |
| "mean_token_accuracy": 0.9713320073485374, | |
| "num_tokens": 1635020.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.1111781226657331, | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.18299971520900726, | |
| "learning_rate": 9.775613308830824e-05, | |
| "loss": 0.1014, | |
| "mean_token_accuracy": 0.9749903282523156, | |
| "num_tokens": 2044395.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.09641788197681307, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.13050603866577148, | |
| "learning_rate": 9.642019796948866e-05, | |
| "loss": 0.0915, | |
| "mean_token_accuracy": 0.977490707486868, | |
| "num_tokens": 2453123.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.09188288618810475, | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.11683323234319687, | |
| "learning_rate": 9.478748447168449e-05, | |
| "loss": 0.0883, | |
| "mean_token_accuracy": 0.9783088724315167, | |
| "num_tokens": 2862139.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.09006990000605583, | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.16509227454662323, | |
| "learning_rate": 9.28684310265789e-05, | |
| "loss": 0.0874, | |
| "mean_token_accuracy": 0.9784132435917854, | |
| "num_tokens": 3270859.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.08776539071463048, | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.13524411618709564, | |
| "learning_rate": 9.067530672382544e-05, | |
| "loss": 0.0866, | |
| "mean_token_accuracy": 0.9787498603761197, | |
| "num_tokens": 3680243.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.08806963111273944, | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.09137360751628876, | |
| "learning_rate": 8.822213287104348e-05, | |
| "loss": 0.086, | |
| "mean_token_accuracy": 0.9788302226364612, | |
| "num_tokens": 4089386.0, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.708245674967982e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |