| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.4814814814814814, | |
| "eval_steps": 500, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 1.6574764619767666, | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 0.6482250690460205, | |
| "learning_rate": 8.032786885245902e-05, | |
| "loss": 1.4512, | |
| "mean_token_accuracy": 0.6979378089308739, | |
| "num_tokens": 408597.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.38226877618581057, | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 0.2513386011123657, | |
| "learning_rate": 9.990765991730485e-05, | |
| "loss": 0.3105, | |
| "mean_token_accuracy": 0.92415526881814, | |
| "num_tokens": 817567.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.16286218419671059, | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.2887406051158905, | |
| "learning_rate": 9.950545603782162e-05, | |
| "loss": 0.1493, | |
| "mean_token_accuracy": 0.9622085580229759, | |
| "num_tokens": 1226265.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.12771075297147036, | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.15249371528625488, | |
| "learning_rate": 9.878674879048427e-05, | |
| "loss": 0.1177, | |
| "mean_token_accuracy": 0.9713320073485374, | |
| "num_tokens": 1635020.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.1111781226657331, | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.18299971520900726, | |
| "learning_rate": 9.775613308830824e-05, | |
| "loss": 0.1014, | |
| "mean_token_accuracy": 0.9749903282523156, | |
| "num_tokens": 2044395.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.09641788197681307, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.13050603866577148, | |
| "learning_rate": 9.642019796948866e-05, | |
| "loss": 0.0915, | |
| "mean_token_accuracy": 0.977490707486868, | |
| "num_tokens": 2453123.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.09188288618810475, | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.11683323234319687, | |
| "learning_rate": 9.478748447168449e-05, | |
| "loss": 0.0883, | |
| "mean_token_accuracy": 0.9783088724315167, | |
| "num_tokens": 2862139.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.09006990000605583, | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.16509227454662323, | |
| "learning_rate": 9.28684310265789e-05, | |
| "loss": 0.0874, | |
| "mean_token_accuracy": 0.9784132435917854, | |
| "num_tokens": 3270859.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.08776539071463048, | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.13524411618709564, | |
| "learning_rate": 9.067530672382544e-05, | |
| "loss": 0.0866, | |
| "mean_token_accuracy": 0.9787498603761197, | |
| "num_tokens": 3680243.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.08806963111273944, | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.09137360751628876, | |
| "learning_rate": 8.822213287104348e-05, | |
| "loss": 0.086, | |
| "mean_token_accuracy": 0.9788302226364612, | |
| "num_tokens": 4089386.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.08409048081375659, | |
| "epoch": 0.8148148148148148, | |
| "grad_norm": 0.18463748693466187, | |
| "learning_rate": 8.552459335135381e-05, | |
| "loss": 0.0831, | |
| "mean_token_accuracy": 0.9793832650780678, | |
| "num_tokens": 4498160.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.08435689969919621, | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.09467488527297974, | |
| "learning_rate": 8.259993435156559e-05, | |
| "loss": 0.0831, | |
| "mean_token_accuracy": 0.9795491580665111, | |
| "num_tokens": 4907148.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 0.08570322282612323, | |
| "epoch": 0.9629629629629629, | |
| "grad_norm": 0.11093126982450485, | |
| "learning_rate": 7.946685410208296e-05, | |
| "loss": 0.0844, | |
| "mean_token_accuracy": 0.9790063916146755, | |
| "num_tokens": 5315908.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 0.08327227901667357, | |
| "epoch": 1.037037037037037, | |
| "grad_norm": 0.07517968118190765, | |
| "learning_rate": 7.614538333345735e-05, | |
| "loss": 0.0824, | |
| "mean_token_accuracy": 0.979544368237257, | |
| "num_tokens": 5724704.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 0.08300686337985098, | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.0655263215303421, | |
| "learning_rate": 7.265675721386285e-05, | |
| "loss": 0.0818, | |
| "mean_token_accuracy": 0.9796155346930027, | |
| "num_tokens": 6133646.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 0.08471525728702545, | |
| "epoch": 1.1851851851851851, | |
| "grad_norm": 0.063040591776371, | |
| "learning_rate": 6.902327958623736e-05, | |
| "loss": 0.0841, | |
| "mean_token_accuracy": 0.9790562556684017, | |
| "num_tokens": 6542365.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 0.08242542044259608, | |
| "epoch": 1.2592592592592593, | |
| "grad_norm": 0.16829481720924377, | |
| "learning_rate": 6.526818037306228e-05, | |
| "loss": 0.0818, | |
| "mean_token_accuracy": 0.9798041236400604, | |
| "num_tokens": 6951319.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 0.08193825788795948, | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.08218955248594284, | |
| "learning_rate": 6.14154670604355e-05, | |
| "loss": 0.0812, | |
| "mean_token_accuracy": 0.9799344435334205, | |
| "num_tokens": 7360266.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 0.0820022343751043, | |
| "epoch": 1.4074074074074074, | |
| "grad_norm": 0.07167431712150574, | |
| "learning_rate": 5.7489771210944564e-05, | |
| "loss": 0.0814, | |
| "mean_token_accuracy": 0.979758190214634, | |
| "num_tokens": 7769643.0, | |
| "step": 950 | |
| }, | |
| { | |
| "entropy": 0.08256705107167363, | |
| "epoch": 1.4814814814814814, | |
| "grad_norm": 0.1688249707221985, | |
| "learning_rate": 5.351619098663021e-05, | |
| "loss": 0.0818, | |
| "mean_token_accuracy": 0.9795773278176785, | |
| "num_tokens": 8178399.0, | |
| "step": 1000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 3.416335537880863e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |