| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 2.99601593625498, |
| "eval_steps": 500, |
| "global_step": 846, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.035413899955732624, |
| "grad_norm": 3.6885943688286362, |
| "learning_rate": 5e-06, |
| "loss": 1.0358, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.07082779991146525, |
| "grad_norm": 1.1292210481988387, |
| "learning_rate": 5e-06, |
| "loss": 0.9008, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.10624169986719788, |
| "grad_norm": 1.0042513955043535, |
| "learning_rate": 5e-06, |
| "loss": 0.8649, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.1416555998229305, |
| "grad_norm": 0.9791530002656006, |
| "learning_rate": 5e-06, |
| "loss": 0.8405, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.17706949977866313, |
| "grad_norm": 1.199424761181154, |
| "learning_rate": 5e-06, |
| "loss": 0.8236, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.21248339973439576, |
| "grad_norm": 1.0089818619573485, |
| "learning_rate": 5e-06, |
| "loss": 0.8095, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.2478972996901284, |
| "grad_norm": 0.8718469920149639, |
| "learning_rate": 5e-06, |
| "loss": 0.7999, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.283311199645861, |
| "grad_norm": 1.054958976208734, |
| "learning_rate": 5e-06, |
| "loss": 0.7901, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.3187250996015936, |
| "grad_norm": 1.062126226641534, |
| "learning_rate": 5e-06, |
| "loss": 0.7828, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.35413899955732625, |
| "grad_norm": 0.8471991021726698, |
| "learning_rate": 5e-06, |
| "loss": 0.7756, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.3895528995130589, |
| "grad_norm": 0.7559286898403187, |
| "learning_rate": 5e-06, |
| "loss": 0.7751, |
| "step": 110 |
| }, |
| { |
| "epoch": 0.4249667994687915, |
| "grad_norm": 0.8330643108897282, |
| "learning_rate": 5e-06, |
| "loss": 0.7744, |
| "step": 120 |
| }, |
| { |
| "epoch": 0.46038069942452414, |
| "grad_norm": 0.6924155308030675, |
| "learning_rate": 5e-06, |
| "loss": 0.7663, |
| "step": 130 |
| }, |
| { |
| "epoch": 0.4957945993802568, |
| "grad_norm": 0.7911653338151806, |
| "learning_rate": 5e-06, |
| "loss": 0.767, |
| "step": 140 |
| }, |
| { |
| "epoch": 0.5312084993359893, |
| "grad_norm": 0.93677966176267, |
| "learning_rate": 5e-06, |
| "loss": 0.7677, |
| "step": 150 |
| }, |
| { |
| "epoch": 0.566622399291722, |
| "grad_norm": 0.5703640067986906, |
| "learning_rate": 5e-06, |
| "loss": 0.7614, |
| "step": 160 |
| }, |
| { |
| "epoch": 0.6020362992474546, |
| "grad_norm": 1.1349145203328255, |
| "learning_rate": 5e-06, |
| "loss": 0.7642, |
| "step": 170 |
| }, |
| { |
| "epoch": 0.6374501992031872, |
| "grad_norm": 0.5934664324102484, |
| "learning_rate": 5e-06, |
| "loss": 0.7567, |
| "step": 180 |
| }, |
| { |
| "epoch": 0.6728640991589199, |
| "grad_norm": 0.9117238778597279, |
| "learning_rate": 5e-06, |
| "loss": 0.7524, |
| "step": 190 |
| }, |
| { |
| "epoch": 0.7082779991146525, |
| "grad_norm": 0.6262203116435334, |
| "learning_rate": 5e-06, |
| "loss": 0.7572, |
| "step": 200 |
| }, |
| { |
| "epoch": 0.7436918990703851, |
| "grad_norm": 0.6120576759661753, |
| "learning_rate": 5e-06, |
| "loss": 0.7555, |
| "step": 210 |
| }, |
| { |
| "epoch": 0.7791057990261178, |
| "grad_norm": 0.9483891667337354, |
| "learning_rate": 5e-06, |
| "loss": 0.7523, |
| "step": 220 |
| }, |
| { |
| "epoch": 0.8145196989818504, |
| "grad_norm": 0.6447490106145711, |
| "learning_rate": 5e-06, |
| "loss": 0.7503, |
| "step": 230 |
| }, |
| { |
| "epoch": 0.849933598937583, |
| "grad_norm": 0.6971087167378522, |
| "learning_rate": 5e-06, |
| "loss": 0.7479, |
| "step": 240 |
| }, |
| { |
| "epoch": 0.8853474988933157, |
| "grad_norm": 0.6593317067328233, |
| "learning_rate": 5e-06, |
| "loss": 0.751, |
| "step": 250 |
| }, |
| { |
| "epoch": 0.9207613988490483, |
| "grad_norm": 0.9848005930026295, |
| "learning_rate": 5e-06, |
| "loss": 0.7492, |
| "step": 260 |
| }, |
| { |
| "epoch": 0.9561752988047809, |
| "grad_norm": 0.6554237313724047, |
| "learning_rate": 5e-06, |
| "loss": 0.7458, |
| "step": 270 |
| }, |
| { |
| "epoch": 0.9915891987605135, |
| "grad_norm": 0.6234170625013281, |
| "learning_rate": 5e-06, |
| "loss": 0.743, |
| "step": 280 |
| }, |
| { |
| "epoch": 0.99867197875166, |
| "eval_loss": 0.7379571199417114, |
| "eval_runtime": 300.016, |
| "eval_samples_per_second": 25.365, |
| "eval_steps_per_second": 0.397, |
| "step": 282 |
| }, |
| { |
| "epoch": 1.0270030987162462, |
| "grad_norm": 0.9058803003077476, |
| "learning_rate": 5e-06, |
| "loss": 0.7573, |
| "step": 290 |
| }, |
| { |
| "epoch": 1.0624169986719787, |
| "grad_norm": 0.8196159008408804, |
| "learning_rate": 5e-06, |
| "loss": 0.6946, |
| "step": 300 |
| }, |
| { |
| "epoch": 1.0978308986277114, |
| "grad_norm": 0.6484234307391061, |
| "learning_rate": 5e-06, |
| "loss": 0.702, |
| "step": 310 |
| }, |
| { |
| "epoch": 1.133244798583444, |
| "grad_norm": 0.7262607987594465, |
| "learning_rate": 5e-06, |
| "loss": 0.6964, |
| "step": 320 |
| }, |
| { |
| "epoch": 1.1686586985391767, |
| "grad_norm": 0.6727235471396565, |
| "learning_rate": 5e-06, |
| "loss": 0.6942, |
| "step": 330 |
| }, |
| { |
| "epoch": 1.2040725984949092, |
| "grad_norm": 0.6293915888100363, |
| "learning_rate": 5e-06, |
| "loss": 0.6962, |
| "step": 340 |
| }, |
| { |
| "epoch": 1.239486498450642, |
| "grad_norm": 0.6446128174628161, |
| "learning_rate": 5e-06, |
| "loss": 0.6908, |
| "step": 350 |
| }, |
| { |
| "epoch": 1.2749003984063745, |
| "grad_norm": 0.59705108673909, |
| "learning_rate": 5e-06, |
| "loss": 0.6953, |
| "step": 360 |
| }, |
| { |
| "epoch": 1.310314298362107, |
| "grad_norm": 0.5513995385623432, |
| "learning_rate": 5e-06, |
| "loss": 0.6944, |
| "step": 370 |
| }, |
| { |
| "epoch": 1.3457281983178397, |
| "grad_norm": 0.6549967871007819, |
| "learning_rate": 5e-06, |
| "loss": 0.6958, |
| "step": 380 |
| }, |
| { |
| "epoch": 1.3811420982735725, |
| "grad_norm": 0.6816706733447621, |
| "learning_rate": 5e-06, |
| "loss": 0.6916, |
| "step": 390 |
| }, |
| { |
| "epoch": 1.416555998229305, |
| "grad_norm": 0.7489323214876235, |
| "learning_rate": 5e-06, |
| "loss": 0.6931, |
| "step": 400 |
| }, |
| { |
| "epoch": 1.4519698981850375, |
| "grad_norm": 0.5909372675521306, |
| "learning_rate": 5e-06, |
| "loss": 0.6942, |
| "step": 410 |
| }, |
| { |
| "epoch": 1.4873837981407703, |
| "grad_norm": 0.6172076369672456, |
| "learning_rate": 5e-06, |
| "loss": 0.6958, |
| "step": 420 |
| }, |
| { |
| "epoch": 1.522797698096503, |
| "grad_norm": 0.6675730573535534, |
| "learning_rate": 5e-06, |
| "loss": 0.6942, |
| "step": 430 |
| }, |
| { |
| "epoch": 1.5582115980522355, |
| "grad_norm": 0.8303140429090745, |
| "learning_rate": 5e-06, |
| "loss": 0.6977, |
| "step": 440 |
| }, |
| { |
| "epoch": 1.593625498007968, |
| "grad_norm": 0.9264814000797602, |
| "learning_rate": 5e-06, |
| "loss": 0.6895, |
| "step": 450 |
| }, |
| { |
| "epoch": 1.6290393979637008, |
| "grad_norm": 0.6833603350918256, |
| "learning_rate": 5e-06, |
| "loss": 0.6953, |
| "step": 460 |
| }, |
| { |
| "epoch": 1.6644532979194335, |
| "grad_norm": 0.5496598280997902, |
| "learning_rate": 5e-06, |
| "loss": 0.6928, |
| "step": 470 |
| }, |
| { |
| "epoch": 1.699867197875166, |
| "grad_norm": 0.6230148229630972, |
| "learning_rate": 5e-06, |
| "loss": 0.695, |
| "step": 480 |
| }, |
| { |
| "epoch": 1.7352810978308986, |
| "grad_norm": 0.594432061780042, |
| "learning_rate": 5e-06, |
| "loss": 0.6967, |
| "step": 490 |
| }, |
| { |
| "epoch": 1.7706949977866313, |
| "grad_norm": 0.6575561736689409, |
| "learning_rate": 5e-06, |
| "loss": 0.6895, |
| "step": 500 |
| }, |
| { |
| "epoch": 1.8061088977423638, |
| "grad_norm": 0.6032506999721393, |
| "learning_rate": 5e-06, |
| "loss": 0.6925, |
| "step": 510 |
| }, |
| { |
| "epoch": 1.8415227976980963, |
| "grad_norm": 0.6257609214237119, |
| "learning_rate": 5e-06, |
| "loss": 0.6852, |
| "step": 520 |
| }, |
| { |
| "epoch": 1.876936697653829, |
| "grad_norm": 0.5888797849432607, |
| "learning_rate": 5e-06, |
| "loss": 0.6906, |
| "step": 530 |
| }, |
| { |
| "epoch": 1.9123505976095618, |
| "grad_norm": 0.696420169935596, |
| "learning_rate": 5e-06, |
| "loss": 0.6943, |
| "step": 540 |
| }, |
| { |
| "epoch": 1.9477644975652944, |
| "grad_norm": 0.6937852142616266, |
| "learning_rate": 5e-06, |
| "loss": 0.6913, |
| "step": 550 |
| }, |
| { |
| "epoch": 1.9831783975210269, |
| "grad_norm": 0.6596923964753812, |
| "learning_rate": 5e-06, |
| "loss": 0.6929, |
| "step": 560 |
| }, |
| { |
| "epoch": 1.99734395750332, |
| "eval_loss": 0.7246915698051453, |
| "eval_runtime": 300.9531, |
| "eval_samples_per_second": 25.286, |
| "eval_steps_per_second": 0.395, |
| "step": 564 |
| }, |
| { |
| "epoch": 2.0185922974767596, |
| "grad_norm": 0.9940799787486152, |
| "learning_rate": 5e-06, |
| "loss": 0.7115, |
| "step": 570 |
| }, |
| { |
| "epoch": 2.0540061974324924, |
| "grad_norm": 0.8096217900242528, |
| "learning_rate": 5e-06, |
| "loss": 0.6411, |
| "step": 580 |
| }, |
| { |
| "epoch": 2.089420097388225, |
| "grad_norm": 0.6501863394387699, |
| "learning_rate": 5e-06, |
| "loss": 0.6429, |
| "step": 590 |
| }, |
| { |
| "epoch": 2.1248339973439574, |
| "grad_norm": 0.6122196411062536, |
| "learning_rate": 5e-06, |
| "loss": 0.6386, |
| "step": 600 |
| }, |
| { |
| "epoch": 2.16024789729969, |
| "grad_norm": 0.7874769344509056, |
| "learning_rate": 5e-06, |
| "loss": 0.6393, |
| "step": 610 |
| }, |
| { |
| "epoch": 2.195661797255423, |
| "grad_norm": 0.7662409953157251, |
| "learning_rate": 5e-06, |
| "loss": 0.6446, |
| "step": 620 |
| }, |
| { |
| "epoch": 2.231075697211155, |
| "grad_norm": 0.6486701532672472, |
| "learning_rate": 5e-06, |
| "loss": 0.6435, |
| "step": 630 |
| }, |
| { |
| "epoch": 2.266489597166888, |
| "grad_norm": 0.7974784537301154, |
| "learning_rate": 5e-06, |
| "loss": 0.6403, |
| "step": 640 |
| }, |
| { |
| "epoch": 2.3019034971226207, |
| "grad_norm": 0.7420290418626383, |
| "learning_rate": 5e-06, |
| "loss": 0.6449, |
| "step": 650 |
| }, |
| { |
| "epoch": 2.3373173970783534, |
| "grad_norm": 0.6370830145933201, |
| "learning_rate": 5e-06, |
| "loss": 0.6403, |
| "step": 660 |
| }, |
| { |
| "epoch": 2.3727312970340857, |
| "grad_norm": 0.7679039868432358, |
| "learning_rate": 5e-06, |
| "loss": 0.6438, |
| "step": 670 |
| }, |
| { |
| "epoch": 2.4081451969898184, |
| "grad_norm": 0.6382655087029674, |
| "learning_rate": 5e-06, |
| "loss": 0.6421, |
| "step": 680 |
| }, |
| { |
| "epoch": 2.443559096945551, |
| "grad_norm": 0.5885244385610666, |
| "learning_rate": 5e-06, |
| "loss": 0.6412, |
| "step": 690 |
| }, |
| { |
| "epoch": 2.478972996901284, |
| "grad_norm": 0.6582698831139674, |
| "learning_rate": 5e-06, |
| "loss": 0.6428, |
| "step": 700 |
| }, |
| { |
| "epoch": 2.514386896857016, |
| "grad_norm": 0.7037470022291554, |
| "learning_rate": 5e-06, |
| "loss": 0.6438, |
| "step": 710 |
| }, |
| { |
| "epoch": 2.549800796812749, |
| "grad_norm": 1.0213076779608703, |
| "learning_rate": 5e-06, |
| "loss": 0.6483, |
| "step": 720 |
| }, |
| { |
| "epoch": 2.5852146967684817, |
| "grad_norm": 0.5676691827731816, |
| "learning_rate": 5e-06, |
| "loss": 0.6389, |
| "step": 730 |
| }, |
| { |
| "epoch": 2.620628596724214, |
| "grad_norm": 0.6890989468656522, |
| "learning_rate": 5e-06, |
| "loss": 0.6452, |
| "step": 740 |
| }, |
| { |
| "epoch": 2.6560424966799467, |
| "grad_norm": 0.6987502593543923, |
| "learning_rate": 5e-06, |
| "loss": 0.6444, |
| "step": 750 |
| }, |
| { |
| "epoch": 2.6914563966356795, |
| "grad_norm": 0.7481889837431972, |
| "learning_rate": 5e-06, |
| "loss": 0.6466, |
| "step": 760 |
| }, |
| { |
| "epoch": 2.7268702965914122, |
| "grad_norm": 0.6034360676734497, |
| "learning_rate": 5e-06, |
| "loss": 0.6457, |
| "step": 770 |
| }, |
| { |
| "epoch": 2.762284196547145, |
| "grad_norm": 0.7387667210048486, |
| "learning_rate": 5e-06, |
| "loss": 0.6434, |
| "step": 780 |
| }, |
| { |
| "epoch": 2.7976980965028773, |
| "grad_norm": 0.8273606923824004, |
| "learning_rate": 5e-06, |
| "loss": 0.6416, |
| "step": 790 |
| }, |
| { |
| "epoch": 2.83311199645861, |
| "grad_norm": 0.711116483296859, |
| "learning_rate": 5e-06, |
| "loss": 0.646, |
| "step": 800 |
| }, |
| { |
| "epoch": 2.8685258964143427, |
| "grad_norm": 0.7055181130205083, |
| "learning_rate": 5e-06, |
| "loss": 0.6423, |
| "step": 810 |
| }, |
| { |
| "epoch": 2.903939796370075, |
| "grad_norm": 0.6391999459462019, |
| "learning_rate": 5e-06, |
| "loss": 0.647, |
| "step": 820 |
| }, |
| { |
| "epoch": 2.939353696325808, |
| "grad_norm": 0.5945789434525439, |
| "learning_rate": 5e-06, |
| "loss": 0.6494, |
| "step": 830 |
| }, |
| { |
| "epoch": 2.9747675962815405, |
| "grad_norm": 0.6457259379303459, |
| "learning_rate": 5e-06, |
| "loss": 0.6454, |
| "step": 840 |
| }, |
| { |
| "epoch": 2.99601593625498, |
| "eval_loss": 0.7258848547935486, |
| "eval_runtime": 300.4749, |
| "eval_samples_per_second": 25.327, |
| "eval_steps_per_second": 0.396, |
| "step": 846 |
| }, |
| { |
| "epoch": 2.99601593625498, |
| "step": 846, |
| "total_flos": 1416872129986560.0, |
| "train_loss": 0.7094193071338302, |
| "train_runtime": 50264.9892, |
| "train_samples_per_second": 8.629, |
| "train_steps_per_second": 0.017 |
| } |
| ], |
| "logging_steps": 10, |
| "max_steps": 846, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 3, |
| "save_steps": 500, |
| "stateful_callbacks": { |
| "TrainerControl": { |
| "args": { |
| "should_epoch_stop": false, |
| "should_evaluate": false, |
| "should_log": false, |
| "should_save": true, |
| "should_training_stop": true |
| }, |
| "attributes": {} |
| } |
| }, |
| "total_flos": 1416872129986560.0, |
| "train_batch_size": 8, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|