| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 339, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 0.3933267489075661, | |
| "epoch": 0.08888888888888889, | |
| "grad_norm": 19.091772079467773, | |
| "learning_rate": 2e-05, | |
| "loss": 4.2829, | |
| "mean_token_accuracy": 0.5681655570864678, | |
| "num_tokens": 29409.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 0.7650148034095764, | |
| "epoch": 0.17777777777777778, | |
| "grad_norm": 3.8214948177337646, | |
| "learning_rate": 2e-05, | |
| "loss": 1.9461, | |
| "mean_token_accuracy": 0.6879387736320496, | |
| "num_tokens": 58827.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 1.1828202903270721, | |
| "epoch": 0.26666666666666666, | |
| "grad_norm": 2.6364810466766357, | |
| "learning_rate": 2e-05, | |
| "loss": 1.4227, | |
| "mean_token_accuracy": 0.7338245347142219, | |
| "num_tokens": 88299.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 1.134617891907692, | |
| "epoch": 0.35555555555555557, | |
| "grad_norm": 1.9795665740966797, | |
| "learning_rate": 2e-05, | |
| "loss": 1.1307, | |
| "mean_token_accuracy": 0.7887017637491226, | |
| "num_tokens": 117759.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.877768449485302, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 1.8397494554519653, | |
| "learning_rate": 2e-05, | |
| "loss": 0.8535, | |
| "mean_token_accuracy": 0.8351033940911293, | |
| "num_tokens": 147140.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.587233804166317, | |
| "epoch": 0.5333333333333333, | |
| "grad_norm": 1.7626832723617554, | |
| "learning_rate": 2e-05, | |
| "loss": 0.5781, | |
| "mean_token_accuracy": 0.8860435307025909, | |
| "num_tokens": 176659.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.3405880033969879, | |
| "epoch": 0.6222222222222222, | |
| "grad_norm": 1.520534634590149, | |
| "learning_rate": 2e-05, | |
| "loss": 0.3419, | |
| "mean_token_accuracy": 0.9315642505884171, | |
| "num_tokens": 206147.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.19235755391418935, | |
| "epoch": 0.7111111111111111, | |
| "grad_norm": 1.268977403640747, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1858, | |
| "mean_token_accuracy": 0.9728681713342666, | |
| "num_tokens": 235603.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.11804858762770891, | |
| "epoch": 0.8, | |
| "grad_norm": 0.781975269317627, | |
| "learning_rate": 2e-05, | |
| "loss": 0.1064, | |
| "mean_token_accuracy": 0.9887924045324326, | |
| "num_tokens": 265047.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.09983876422047615, | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.4874080419540405, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0815, | |
| "mean_token_accuracy": 0.9895498856902123, | |
| "num_tokens": 294524.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.08479245882481337, | |
| "epoch": 0.9777777777777777, | |
| "grad_norm": 0.3734425902366638, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0768, | |
| "mean_token_accuracy": 0.9904760375618935, | |
| "num_tokens": 324032.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.07308715611304108, | |
| "epoch": 1.0622222222222222, | |
| "grad_norm": 0.37292563915252686, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0646, | |
| "mean_token_accuracy": 0.9918417679636102, | |
| "num_tokens": 351993.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.06726833172142506, | |
| "epoch": 1.1511111111111112, | |
| "grad_norm": 0.5837728977203369, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0625, | |
| "mean_token_accuracy": 0.9917700842022896, | |
| "num_tokens": 381506.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.06390567375347019, | |
| "epoch": 1.24, | |
| "grad_norm": 0.3549947738647461, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0606, | |
| "mean_token_accuracy": 0.9916005581617355, | |
| "num_tokens": 411017.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.06265801694244147, | |
| "epoch": 1.3288888888888888, | |
| "grad_norm": 0.2998465597629547, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0564, | |
| "mean_token_accuracy": 0.9918477952480316, | |
| "num_tokens": 440460.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.060036468971520665, | |
| "epoch": 1.4177777777777778, | |
| "grad_norm": 0.5888973474502563, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0557, | |
| "mean_token_accuracy": 0.9919296875596046, | |
| "num_tokens": 469970.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.05825678938999772, | |
| "epoch": 1.5066666666666668, | |
| "grad_norm": 0.27591630816459656, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0557, | |
| "mean_token_accuracy": 0.991797935962677, | |
| "num_tokens": 499471.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.058804288040846586, | |
| "epoch": 1.5955555555555554, | |
| "grad_norm": 0.3655984401702881, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0537, | |
| "mean_token_accuracy": 0.9921618834137916, | |
| "num_tokens": 528959.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.05707454737275839, | |
| "epoch": 1.6844444444444444, | |
| "grad_norm": 0.3361314535140991, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0514, | |
| "mean_token_accuracy": 0.9922538578510285, | |
| "num_tokens": 558384.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.05885109649971128, | |
| "epoch": 1.7733333333333334, | |
| "grad_norm": 0.2926768958568573, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0565, | |
| "mean_token_accuracy": 0.9916261032223701, | |
| "num_tokens": 587873.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.05613070921972394, | |
| "epoch": 1.8622222222222222, | |
| "grad_norm": 0.2952570617198944, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0495, | |
| "mean_token_accuracy": 0.9927162423729896, | |
| "num_tokens": 617263.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.05452471813187003, | |
| "epoch": 1.951111111111111, | |
| "grad_norm": 0.2781422734260559, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0503, | |
| "mean_token_accuracy": 0.9923128932714462, | |
| "num_tokens": 646673.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 0.05329967241146063, | |
| "epoch": 2.0355555555555553, | |
| "grad_norm": 0.3413642942905426, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0495, | |
| "mean_token_accuracy": 0.9925352836910047, | |
| "num_tokens": 674550.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 0.055740222427994014, | |
| "epoch": 2.1244444444444444, | |
| "grad_norm": 0.3916967213153839, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0494, | |
| "mean_token_accuracy": 0.992403993010521, | |
| "num_tokens": 704049.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 0.05344773568212986, | |
| "epoch": 2.2133333333333334, | |
| "grad_norm": 0.41811126470565796, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0458, | |
| "mean_token_accuracy": 0.9928064867854118, | |
| "num_tokens": 733527.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.05095162307843566, | |
| "epoch": 2.3022222222222224, | |
| "grad_norm": 0.5080037117004395, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0462, | |
| "mean_token_accuracy": 0.9926309958100319, | |
| "num_tokens": 762980.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 0.05230198642238974, | |
| "epoch": 2.391111111111111, | |
| "grad_norm": 0.34135064482688904, | |
| "learning_rate": 2e-05, | |
| "loss": 0.047, | |
| "mean_token_accuracy": 0.9925719112157821, | |
| "num_tokens": 792457.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 0.053009994141757485, | |
| "epoch": 2.48, | |
| "grad_norm": 0.261165976524353, | |
| "learning_rate": 2e-05, | |
| "loss": 0.044, | |
| "mean_token_accuracy": 0.9927724987268448, | |
| "num_tokens": 821928.0, | |
| "step": 280 | |
| }, | |
| { | |
| "entropy": 0.052336765173822644, | |
| "epoch": 2.568888888888889, | |
| "grad_norm": 0.2925412356853485, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0459, | |
| "mean_token_accuracy": 0.9917180150747299, | |
| "num_tokens": 851523.0, | |
| "step": 290 | |
| }, | |
| { | |
| "entropy": 0.04876706637442112, | |
| "epoch": 2.6577777777777776, | |
| "grad_norm": 0.37404191493988037, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0406, | |
| "mean_token_accuracy": 0.9927994713187218, | |
| "num_tokens": 880960.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.051569243893027306, | |
| "epoch": 2.7466666666666666, | |
| "grad_norm": 0.33673104643821716, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0408, | |
| "mean_token_accuracy": 0.9931992888450623, | |
| "num_tokens": 910363.0, | |
| "step": 310 | |
| }, | |
| { | |
| "entropy": 0.049042111821472646, | |
| "epoch": 2.8355555555555556, | |
| "grad_norm": 0.3380878269672394, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0405, | |
| "mean_token_accuracy": 0.9932568341493606, | |
| "num_tokens": 939743.0, | |
| "step": 320 | |
| }, | |
| { | |
| "entropy": 0.04809595588594675, | |
| "epoch": 2.924444444444444, | |
| "grad_norm": 0.27717074751853943, | |
| "learning_rate": 2e-05, | |
| "loss": 0.0401, | |
| "mean_token_accuracy": 0.9928347066044807, | |
| "num_tokens": 969188.0, | |
| "step": 330 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 904, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 8, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 621123938392320.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |