| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 1.6480721473693847, | |
| "epoch": 0.05, | |
| "grad_norm": 27.1102352142334, | |
| "learning_rate": 8e-05, | |
| "loss": 1.8005, | |
| "mean_token_accuracy": 0.5941327238082885, | |
| "num_tokens": 9693.0, | |
| "step": 25 | |
| }, | |
| { | |
| "entropy": 1.3281144857406617, | |
| "epoch": 0.1, | |
| "grad_norm": 9.47355842590332, | |
| "learning_rate": 0.00018, | |
| "loss": 1.2936, | |
| "mean_token_accuracy": 0.6876855921745301, | |
| "num_tokens": 19797.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 1.2546486043930054, | |
| "epoch": 0.15, | |
| "grad_norm": 7.4732513427734375, | |
| "learning_rate": 0.00019902680687415705, | |
| "loss": 1.2329, | |
| "mean_token_accuracy": 0.691328091621399, | |
| "num_tokens": 28636.0, | |
| "step": 75 | |
| }, | |
| { | |
| "entropy": 1.1987505912780763, | |
| "epoch": 0.2, | |
| "grad_norm": 10.677826881408691, | |
| "learning_rate": 0.00019510565162951537, | |
| "loss": 1.165, | |
| "mean_token_accuracy": 0.7062379860877991, | |
| "num_tokens": 38194.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 1.2390620636940002, | |
| "epoch": 0.25, | |
| "grad_norm": 8.650012016296387, | |
| "learning_rate": 0.00018829475928589271, | |
| "loss": 1.2323, | |
| "mean_token_accuracy": 0.686786060333252, | |
| "num_tokens": 49170.0, | |
| "step": 125 | |
| }, | |
| { | |
| "entropy": 1.2445489859580994, | |
| "epoch": 0.3, | |
| "grad_norm": 6.545881748199463, | |
| "learning_rate": 0.00017880107536067218, | |
| "loss": 1.233, | |
| "mean_token_accuracy": 0.6857695412635804, | |
| "num_tokens": 58907.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 1.2420870399475097, | |
| "epoch": 0.35, | |
| "grad_norm": 24.16084098815918, | |
| "learning_rate": 0.00016743023875837233, | |
| "loss": 1.2297, | |
| "mean_token_accuracy": 0.6925088000297547, | |
| "num_tokens": 68206.0, | |
| "step": 175 | |
| }, | |
| { | |
| "entropy": 1.2456481051445008, | |
| "epoch": 0.4, | |
| "grad_norm": 23.415494918823242, | |
| "learning_rate": 0.00015358267949789966, | |
| "loss": 1.2531, | |
| "mean_token_accuracy": 0.6884878659248352, | |
| "num_tokens": 77989.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 1.2355119800567627, | |
| "epoch": 0.45, | |
| "grad_norm": 7.648189544677734, | |
| "learning_rate": 0.00013810703763502744, | |
| "loss": 1.2108, | |
| "mean_token_accuracy": 0.6975953841209411, | |
| "num_tokens": 87614.0, | |
| "step": 225 | |
| }, | |
| { | |
| "entropy": 1.2422665166854858, | |
| "epoch": 0.5, | |
| "grad_norm": 7.272965908050537, | |
| "learning_rate": 0.00012147353271670634, | |
| "loss": 1.2552, | |
| "mean_token_accuracy": 0.687971031665802, | |
| "num_tokens": 97427.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 1.2536789083480835, | |
| "epoch": 0.55, | |
| "grad_norm": 7.070037364959717, | |
| "learning_rate": 0.00010418756537291996, | |
| "loss": 1.2526, | |
| "mean_token_accuracy": 0.6898462009429932, | |
| "num_tokens": 107088.0, | |
| "step": 275 | |
| }, | |
| { | |
| "entropy": 1.2259885239601136, | |
| "epoch": 0.6, | |
| "grad_norm": 13.018112182617188, | |
| "learning_rate": 8.677436097428775e-05, | |
| "loss": 1.2247, | |
| "mean_token_accuracy": 0.6857342338562011, | |
| "num_tokens": 116177.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 1.2294299459457398, | |
| "epoch": 0.65, | |
| "grad_norm": 7.353261947631836, | |
| "learning_rate": 6.976301092495556e-05, | |
| "loss": 1.2238, | |
| "mean_token_accuracy": 0.6885941863059998, | |
| "num_tokens": 126328.0, | |
| "step": 325 | |
| }, | |
| { | |
| "entropy": 1.2531250643730163, | |
| "epoch": 0.7, | |
| "grad_norm": 7.604862689971924, | |
| "learning_rate": 5.3670396488013854e-05, | |
| "loss": 1.2219, | |
| "mean_token_accuracy": 0.6879838514328003, | |
| "num_tokens": 135285.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 1.2012500953674317, | |
| "epoch": 0.75, | |
| "grad_norm": 7.651504039764404, | |
| "learning_rate": 3.8985483609873244e-05, | |
| "loss": 1.2111, | |
| "mean_token_accuracy": 0.6910693979263306, | |
| "num_tokens": 145111.0, | |
| "step": 375 | |
| }, | |
| { | |
| "entropy": 1.2419355368614198, | |
| "epoch": 0.8, | |
| "grad_norm": 8.24601936340332, | |
| "learning_rate": 2.615446593741161e-05, | |
| "loss": 1.2345, | |
| "mean_token_accuracy": 0.685720636844635, | |
| "num_tokens": 155061.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 1.2203574657440186, | |
| "epoch": 0.85, | |
| "grad_norm": 5.648503303527832, | |
| "learning_rate": 1.5567207449798515e-05, | |
| "loss": 1.1986, | |
| "mean_token_accuracy": 0.6945811367034912, | |
| "num_tokens": 164613.0, | |
| "step": 425 | |
| }, | |
| { | |
| "entropy": 1.1166097807884217, | |
| "epoch": 0.9, | |
| "grad_norm": 7.915076732635498, | |
| "learning_rate": 7.545396638768698e-06, | |
| "loss": 1.1173, | |
| "mean_token_accuracy": 0.7110410404205322, | |
| "num_tokens": 174362.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 1.2107400488853455, | |
| "epoch": 0.95, | |
| "grad_norm": 8.340585708618164, | |
| "learning_rate": 2.332772166583208e-06, | |
| "loss": 1.1966, | |
| "mean_token_accuracy": 0.7019165325164795, | |
| "num_tokens": 183648.0, | |
| "step": 475 | |
| }, | |
| { | |
| "entropy": 1.2142256331443786, | |
| "epoch": 1.0, | |
| "grad_norm": 8.785581588745117, | |
| "learning_rate": 8.771699011416168e-08, | |
| "loss": 1.1894, | |
| "mean_token_accuracy": 0.7058351802825927, | |
| "num_tokens": 193469.0, | |
| "step": 500 | |
| } | |
| ], | |
| "logging_steps": 25, | |
| "max_steps": 500, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2045157535088640.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |