| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.05115089514066496, | |
| "eval_steps": 500, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 3.2780354261398315, | |
| "epoch": 0.005115089514066497, | |
| "grad_norm": 25.268398857866387, | |
| "learning_rate": 7.666098807495741e-07, | |
| "loss": 2.5805, | |
| "mean_token_accuracy": 0.44439449459314345, | |
| "num_tokens": 201201.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 3.270029139518738, | |
| "epoch": 0.010230179028132993, | |
| "grad_norm": 61.66870761901112, | |
| "learning_rate": 1.6183986371379898e-06, | |
| "loss": 2.6136, | |
| "mean_token_accuracy": 0.4849040985107422, | |
| "num_tokens": 404577.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 3.303619647026062, | |
| "epoch": 0.015345268542199489, | |
| "grad_norm": 85.93666778656973, | |
| "learning_rate": 2.4701873935264056e-06, | |
| "loss": 2.6838, | |
| "mean_token_accuracy": 0.4358963340520859, | |
| "num_tokens": 609983.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 3.291827130317688, | |
| "epoch": 0.020460358056265986, | |
| "grad_norm": 15.511161480666924, | |
| "learning_rate": 3.321976149914821e-06, | |
| "loss": 1.8307, | |
| "mean_token_accuracy": 0.5674405485391617, | |
| "num_tokens": 802203.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 3.367189645767212, | |
| "epoch": 0.02557544757033248, | |
| "grad_norm": 14.365831800288149, | |
| "learning_rate": 4.173764906303237e-06, | |
| "loss": 1.2772, | |
| "mean_token_accuracy": 0.6300624251365662, | |
| "num_tokens": 1024749.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 3.36285605430603, | |
| "epoch": 0.030690537084398978, | |
| "grad_norm": 33.84168925997731, | |
| "learning_rate": 5.025553662691653e-06, | |
| "loss": 0.9931, | |
| "mean_token_accuracy": 0.7703894019126892, | |
| "num_tokens": 1242772.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 3.3700726509094237, | |
| "epoch": 0.03580562659846547, | |
| "grad_norm": 62.73563012853256, | |
| "learning_rate": 5.877342419080068e-06, | |
| "loss": 1.1674, | |
| "mean_token_accuracy": 0.74255530834198, | |
| "num_tokens": 1458515.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 3.516653847694397, | |
| "epoch": 0.04092071611253197, | |
| "grad_norm": 3.5686450164550396, | |
| "learning_rate": 6.7291311754684835e-06, | |
| "loss": 1.0982, | |
| "mean_token_accuracy": 0.7218687653541564, | |
| "num_tokens": 1655834.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 3.517510437965393, | |
| "epoch": 0.04603580562659847, | |
| "grad_norm": 22.108671955044745, | |
| "learning_rate": 7.5809199318569e-06, | |
| "loss": 0.8307, | |
| "mean_token_accuracy": 0.771921706199646, | |
| "num_tokens": 1871549.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 3.653357815742493, | |
| "epoch": 0.05115089514066496, | |
| "grad_norm": 21.61728498405485, | |
| "learning_rate": 8.432708688245315e-06, | |
| "loss": 0.9384, | |
| "mean_token_accuracy": 0.7474159002304077, | |
| "num_tokens": 2079472.0, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 5865, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 213113167151104.0, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |