| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.2222222222222223, | |
| "eval_steps": 500, | |
| "global_step": 1500, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 1.6574764619767666, | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 0.6482250690460205, | |
| "learning_rate": 8.032786885245902e-05, | |
| "loss": 1.4512, | |
| "mean_token_accuracy": 0.6979378089308739, | |
| "num_tokens": 408597.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.38226877618581057, | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 0.2513386011123657, | |
| "learning_rate": 9.990765991730485e-05, | |
| "loss": 0.3105, | |
| "mean_token_accuracy": 0.92415526881814, | |
| "num_tokens": 817567.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.16286218419671059, | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.2887406051158905, | |
| "learning_rate": 9.950545603782162e-05, | |
| "loss": 0.1493, | |
| "mean_token_accuracy": 0.9622085580229759, | |
| "num_tokens": 1226265.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.12771075297147036, | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.15249371528625488, | |
| "learning_rate": 9.878674879048427e-05, | |
| "loss": 0.1177, | |
| "mean_token_accuracy": 0.9713320073485374, | |
| "num_tokens": 1635020.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.1111781226657331, | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.18299971520900726, | |
| "learning_rate": 9.775613308830824e-05, | |
| "loss": 0.1014, | |
| "mean_token_accuracy": 0.9749903282523156, | |
| "num_tokens": 2044395.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.09641788197681307, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.13050603866577148, | |
| "learning_rate": 9.642019796948866e-05, | |
| "loss": 0.0915, | |
| "mean_token_accuracy": 0.977490707486868, | |
| "num_tokens": 2453123.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.09188288618810475, | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.11683323234319687, | |
| "learning_rate": 9.478748447168449e-05, | |
| "loss": 0.0883, | |
| "mean_token_accuracy": 0.9783088724315167, | |
| "num_tokens": 2862139.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.09006990000605583, | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.16509227454662323, | |
| "learning_rate": 9.28684310265789e-05, | |
| "loss": 0.0874, | |
| "mean_token_accuracy": 0.9784132435917854, | |
| "num_tokens": 3270859.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.08776539071463048, | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.13524411618709564, | |
| "learning_rate": 9.067530672382544e-05, | |
| "loss": 0.0866, | |
| "mean_token_accuracy": 0.9787498603761197, | |
| "num_tokens": 3680243.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.08806963111273944, | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.09137360751628876, | |
| "learning_rate": 8.822213287104348e-05, | |
| "loss": 0.086, | |
| "mean_token_accuracy": 0.9788302226364612, | |
| "num_tokens": 4089386.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.08409048081375659, | |
| "epoch": 0.8148148148148148, | |
| "grad_norm": 0.18463748693466187, | |
| "learning_rate": 8.552459335135381e-05, | |
| "loss": 0.0831, | |
| "mean_token_accuracy": 0.9793832650780678, | |
| "num_tokens": 4498160.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.08435689969919621, | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.09467488527297974, | |
| "learning_rate": 8.259993435156559e-05, | |
| "loss": 0.0831, | |
| "mean_token_accuracy": 0.9795491580665111, | |
| "num_tokens": 4907148.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 0.08570322282612323, | |
| "epoch": 0.9629629629629629, | |
| "grad_norm": 0.11093126982450485, | |
| "learning_rate": 7.946685410208296e-05, | |
| "loss": 0.0844, | |
| "mean_token_accuracy": 0.9790063916146755, | |
| "num_tokens": 5315908.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 0.08327227901667357, | |
| "epoch": 1.037037037037037, | |
| "grad_norm": 0.07517968118190765, | |
| "learning_rate": 7.614538333345735e-05, | |
| "loss": 0.0824, | |
| "mean_token_accuracy": 0.979544368237257, | |
| "num_tokens": 5724704.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 0.08300686337985098, | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.0655263215303421, | |
| "learning_rate": 7.265675721386285e-05, | |
| "loss": 0.0818, | |
| "mean_token_accuracy": 0.9796155346930027, | |
| "num_tokens": 6133646.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 0.08471525728702545, | |
| "epoch": 1.1851851851851851, | |
| "grad_norm": 0.063040591776371, | |
| "learning_rate": 6.902327958623736e-05, | |
| "loss": 0.0841, | |
| "mean_token_accuracy": 0.9790562556684017, | |
| "num_tokens": 6542365.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 0.08242542044259608, | |
| "epoch": 1.2592592592592593, | |
| "grad_norm": 0.16829481720924377, | |
| "learning_rate": 6.526818037306228e-05, | |
| "loss": 0.0818, | |
| "mean_token_accuracy": 0.9798041236400604, | |
| "num_tokens": 6951319.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 0.08193825788795948, | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.08218955248594284, | |
| "learning_rate": 6.14154670604355e-05, | |
| "loss": 0.0812, | |
| "mean_token_accuracy": 0.9799344435334205, | |
| "num_tokens": 7360266.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 0.0820022343751043, | |
| "epoch": 1.4074074074074074, | |
| "grad_norm": 0.07167431712150574, | |
| "learning_rate": 5.7489771210944564e-05, | |
| "loss": 0.0814, | |
| "mean_token_accuracy": 0.979758190214634, | |
| "num_tokens": 7769643.0, | |
| "step": 950 | |
| }, | |
| { | |
| "entropy": 0.08256705107167363, | |
| "epoch": 1.4814814814814814, | |
| "grad_norm": 0.1688249707221985, | |
| "learning_rate": 5.351619098663021e-05, | |
| "loss": 0.0818, | |
| "mean_token_accuracy": 0.9795773278176785, | |
| "num_tokens": 8178399.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "entropy": 0.08179685108363628, | |
| "epoch": 1.5555555555555556, | |
| "grad_norm": 0.06633222848176956, | |
| "learning_rate": 4.952013068883795e-05, | |
| "loss": 0.0814, | |
| "mean_token_accuracy": 0.979800873696804, | |
| "num_tokens": 8587121.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "entropy": 0.08153917868621648, | |
| "epoch": 1.6296296296296298, | |
| "grad_norm": 0.07971940189599991, | |
| "learning_rate": 4.5527138340828776e-05, | |
| "loss": 0.0812, | |
| "mean_token_accuracy": 0.9798491597175598, | |
| "num_tokens": 8996096.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "entropy": 0.0821349940635264, | |
| "epoch": 1.7037037037037037, | |
| "grad_norm": 0.0924430713057518, | |
| "learning_rate": 4.156274235153189e-05, | |
| "loss": 0.0815, | |
| "mean_token_accuracy": 0.9795667895674706, | |
| "num_tokens": 9404772.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "entropy": 0.08265135439112782, | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 0.05620681121945381, | |
| "learning_rate": 3.765228830469794e-05, | |
| "loss": 0.082, | |
| "mean_token_accuracy": 0.9795762729644776, | |
| "num_tokens": 9813274.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "entropy": 0.08118220455944539, | |
| "epoch": 1.8518518518518519, | |
| "grad_norm": 0.07570701092481613, | |
| "learning_rate": 3.3820776916908857e-05, | |
| "loss": 0.0809, | |
| "mean_token_accuracy": 0.9796575351059437, | |
| "num_tokens": 10222375.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "entropy": 0.08083926324732602, | |
| "epoch": 1.925925925925926, | |
| "grad_norm": 0.060712188482284546, | |
| "learning_rate": 3.0092704200428058e-05, | |
| "loss": 0.0802, | |
| "mean_token_accuracy": 0.9798884338140488, | |
| "num_tokens": 10631168.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "entropy": 0.080772600248456, | |
| "epoch": 2.0, | |
| "grad_norm": 0.07269751280546188, | |
| "learning_rate": 2.649190485277792e-05, | |
| "loss": 0.0796, | |
| "mean_token_accuracy": 0.9800221265852451, | |
| "num_tokens": 11040558.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "entropy": 0.08113629410974682, | |
| "epoch": 2.074074074074074, | |
| "grad_norm": 0.062111906707286835, | |
| "learning_rate": 2.3041399874302905e-05, | |
| "loss": 0.081, | |
| "mean_token_accuracy": 0.979648227095604, | |
| "num_tokens": 11449495.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "entropy": 0.08001561598852276, | |
| "epoch": 2.148148148148148, | |
| "grad_norm": 0.05966750532388687, | |
| "learning_rate": 1.976324938794482e-05, | |
| "loss": 0.0797, | |
| "mean_token_accuracy": 0.9802096697688103, | |
| "num_tokens": 11858437.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "entropy": 0.08092246975749731, | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 0.18608996272087097, | |
| "learning_rate": 1.667841160219835e-05, | |
| "loss": 0.0801, | |
| "mean_token_accuracy": 0.980140280276537, | |
| "num_tokens": 12267300.0, | |
| "step": 1500 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.124378615404544e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |