| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.962962962962963, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 1.9540774886310102, | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 0.8241696953773499, | |
| "learning_rate": 8.032786885245902e-05, | |
| "loss": 1.7451, | |
| "mean_token_accuracy": 0.6189013833552599, | |
| "num_tokens": 379953.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.5046265083923935, | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 4.424446105957031, | |
| "learning_rate": 9.990765991730485e-05, | |
| "loss": 0.4302, | |
| "mean_token_accuracy": 0.8861582314968109, | |
| "num_tokens": 757047.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.24775007627904416, | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.30575165152549744, | |
| "learning_rate": 9.950545603782162e-05, | |
| "loss": 0.2323, | |
| "mean_token_accuracy": 0.940535937026143, | |
| "num_tokens": 1134378.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.1925063591822982, | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.24072137475013733, | |
| "learning_rate": 9.878674879048427e-05, | |
| "loss": 0.1856, | |
| "mean_token_accuracy": 0.9529062640666962, | |
| "num_tokens": 1516152.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.18322961997240783, | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.18854060769081116, | |
| "learning_rate": 9.775613308830824e-05, | |
| "loss": 0.1775, | |
| "mean_token_accuracy": 0.9546041788160801, | |
| "num_tokens": 1896494.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.17187482433393597, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.18019668757915497, | |
| "learning_rate": 9.642019796948866e-05, | |
| "loss": 0.164, | |
| "mean_token_accuracy": 0.9579361644387245, | |
| "num_tokens": 2277194.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.16193925650790333, | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.23759332299232483, | |
| "learning_rate": 9.478748447168449e-05, | |
| "loss": 0.1539, | |
| "mean_token_accuracy": 0.9614081564545631, | |
| "num_tokens": 2658243.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.15039873549714686, | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.2590758204460144, | |
| "learning_rate": 9.28684310265789e-05, | |
| "loss": 0.1443, | |
| "mean_token_accuracy": 0.9650989197194576, | |
| "num_tokens": 3041430.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.14228019634261727, | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.18062612414360046, | |
| "learning_rate": 9.067530672382544e-05, | |
| "loss": 0.1366, | |
| "mean_token_accuracy": 0.9667340110242367, | |
| "num_tokens": 3422368.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.1391275341436267, | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.1984509527683258, | |
| "learning_rate": 8.822213287104348e-05, | |
| "loss": 0.1352, | |
| "mean_token_accuracy": 0.9674024738371372, | |
| "num_tokens": 3801968.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.13673329239711166, | |
| "epoch": 0.8148148148148148, | |
| "grad_norm": 0.13088534772396088, | |
| "learning_rate": 8.552459335135381e-05, | |
| "loss": 0.1354, | |
| "mean_token_accuracy": 0.9673544447124004, | |
| "num_tokens": 4182102.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.13533455861732363, | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.33587542176246643, | |
| "learning_rate": 8.259993435156559e-05, | |
| "loss": 0.1325, | |
| "mean_token_accuracy": 0.9676820485293866, | |
| "num_tokens": 4561998.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 0.13281562993302942, | |
| "epoch": 0.9629629629629629, | |
| "grad_norm": 0.11626797914505005, | |
| "learning_rate": 7.946685410208296e-05, | |
| "loss": 0.1313, | |
| "mean_token_accuracy": 0.9678142921626568, | |
| "num_tokens": 4936347.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 0.1338025047816336, | |
| "epoch": 1.037037037037037, | |
| "grad_norm": 0.10883153975009918, | |
| "learning_rate": 7.614538333345735e-05, | |
| "loss": 0.1328, | |
| "mean_token_accuracy": 0.9673172944784164, | |
| "num_tokens": 5313396.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 0.1330988533422351, | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.14333182573318481, | |
| "learning_rate": 7.265675721386285e-05, | |
| "loss": 0.1317, | |
| "mean_token_accuracy": 0.9678454534709453, | |
| "num_tokens": 5694821.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 0.1315150342695415, | |
| "epoch": 1.1851851851851851, | |
| "grad_norm": 0.07893866300582886, | |
| "learning_rate": 6.902327958623736e-05, | |
| "loss": 0.131, | |
| "mean_token_accuracy": 0.9678919970989227, | |
| "num_tokens": 6071147.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 0.1348141137883067, | |
| "epoch": 1.2592592592592593, | |
| "grad_norm": 0.10548827797174454, | |
| "learning_rate": 6.526818037306228e-05, | |
| "loss": 0.1337, | |
| "mean_token_accuracy": 0.9671075843274594, | |
| "num_tokens": 6454454.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 0.12924523117020725, | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.13734276592731476, | |
| "learning_rate": 6.14154670604355e-05, | |
| "loss": 0.1294, | |
| "mean_token_accuracy": 0.9686036820709706, | |
| "num_tokens": 6831407.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 0.13089272173121572, | |
| "epoch": 1.4074074074074074, | |
| "grad_norm": 0.062484513968229294, | |
| "learning_rate": 5.7489771210944564e-05, | |
| "loss": 0.1304, | |
| "mean_token_accuracy": 0.9679886139929295, | |
| "num_tokens": 7211253.0, | |
| "step": 950 | |
| }, | |
| { | |
| "entropy": 0.1295284123532474, | |
| "epoch": 1.4814814814814814, | |
| "grad_norm": 0.07434429228305817, | |
| "learning_rate": 5.351619098663021e-05, | |
| "loss": 0.1289, | |
| "mean_token_accuracy": 0.9682336232066154, | |
| "num_tokens": 7591172.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "entropy": 0.13286457041278482, | |
| "epoch": 1.5555555555555556, | |
| "grad_norm": 0.1009301245212555, | |
| "learning_rate": 4.952013068883795e-05, | |
| "loss": 0.1326, | |
| "mean_token_accuracy": 0.9677493931353092, | |
| "num_tokens": 7976360.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "entropy": 0.13181614426895977, | |
| "epoch": 1.6296296296296298, | |
| "grad_norm": 0.09136403352022171, | |
| "learning_rate": 4.5527138340828776e-05, | |
| "loss": 0.1317, | |
| "mean_token_accuracy": 0.9678143452107907, | |
| "num_tokens": 8355146.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "entropy": 0.12889527762308717, | |
| "epoch": 1.7037037037037037, | |
| "grad_norm": 0.11126290261745453, | |
| "learning_rate": 4.156274235153189e-05, | |
| "loss": 0.1289, | |
| "mean_token_accuracy": 0.9681750671565532, | |
| "num_tokens": 8736037.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "entropy": 0.1293605554662645, | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 0.10993292182683945, | |
| "learning_rate": 3.765228830469794e-05, | |
| "loss": 0.1293, | |
| "mean_token_accuracy": 0.9684934197366237, | |
| "num_tokens": 9113158.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "entropy": 0.12954492604359985, | |
| "epoch": 1.8518518518518519, | |
| "grad_norm": 0.1266999989748001, | |
| "learning_rate": 3.3820776916908857e-05, | |
| "loss": 0.1296, | |
| "mean_token_accuracy": 0.9680785122513771, | |
| "num_tokens": 9492245.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "entropy": 0.13069430900737644, | |
| "epoch": 1.925925925925926, | |
| "grad_norm": 0.07351736724376678, | |
| "learning_rate": 3.0092704200428058e-05, | |
| "loss": 0.131, | |
| "mean_token_accuracy": 0.9679618345201015, | |
| "num_tokens": 9874560.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "entropy": 0.12986605327576398, | |
| "epoch": 2.0, | |
| "grad_norm": 0.07602707296609879, | |
| "learning_rate": 2.649190485277792e-05, | |
| "loss": 0.1301, | |
| "mean_token_accuracy": 0.9679882827401162, | |
| "num_tokens": 10251446.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "entropy": 0.13015099691227078, | |
| "epoch": 2.074074074074074, | |
| "grad_norm": 0.07422789186239243, | |
| "learning_rate": 2.3041399874302905e-05, | |
| "loss": 0.13, | |
| "mean_token_accuracy": 0.968013653755188, | |
| "num_tokens": 10637110.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "entropy": 0.12833521047607063, | |
| "epoch": 2.148148148148148, | |
| "grad_norm": 0.06928899884223938, | |
| "learning_rate": 1.976324938794482e-05, | |
| "loss": 0.1287, | |
| "mean_token_accuracy": 0.9684618780016899, | |
| "num_tokens": 11014301.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "entropy": 0.12765121564269066, | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 0.07737194001674652, | |
| "learning_rate": 1.667841160219835e-05, | |
| "loss": 0.1279, | |
| "mean_token_accuracy": 0.9685468013584614, | |
| "num_tokens": 11390895.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "entropy": 0.13076237022876738, | |
| "epoch": 2.2962962962962963, | |
| "grad_norm": 0.0690469890832901, | |
| "learning_rate": 1.3806608818939203e-05, | |
| "loss": 0.1308, | |
| "mean_token_accuracy": 0.9680638153851032, | |
| "num_tokens": 11773360.0, | |
| "step": 1550 | |
| }, | |
| { | |
| "entropy": 0.1271959487348795, | |
| "epoch": 2.3703703703703702, | |
| "grad_norm": 0.08058126270771027, | |
| "learning_rate": 1.1166201342777438e-05, | |
| "loss": 0.1275, | |
| "mean_token_accuracy": 0.9684904217720032, | |
| "num_tokens": 12152692.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "entropy": 0.12783636916428803, | |
| "epoch": 2.4444444444444446, | |
| "grad_norm": 0.08844149112701416, | |
| "learning_rate": 8.774070098071668e-06, | |
| "loss": 0.1278, | |
| "mean_token_accuracy": 0.9686374716460705, | |
| "num_tokens": 12532074.0, | |
| "step": 1650 | |
| }, | |
| { | |
| "entropy": 0.12949045987799765, | |
| "epoch": 2.5185185185185186, | |
| "grad_norm": 0.0735594779253006, | |
| "learning_rate": 6.645508704069003e-06, | |
| "loss": 0.13, | |
| "mean_token_accuracy": 0.9678716999292374, | |
| "num_tokens": 12914270.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "entropy": 0.12908031923696398, | |
| "epoch": 2.5925925925925926, | |
| "grad_norm": 0.0713183730840683, | |
| "learning_rate": 4.794125698167262e-06, | |
| "loss": 0.1291, | |
| "mean_token_accuracy": 0.9681964771449566, | |
| "num_tokens": 13293683.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "entropy": 0.12857461655512453, | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.06526947021484375, | |
| "learning_rate": 3.231757532415458e-06, | |
| "loss": 0.1287, | |
| "mean_token_accuracy": 0.9684041538834571, | |
| "num_tokens": 13671470.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "entropy": 0.12981407037004827, | |
| "epoch": 2.7407407407407405, | |
| "grad_norm": 0.06781283766031265, | |
| "learning_rate": 1.9683928994924385e-06, | |
| "loss": 0.1304, | |
| "mean_token_accuracy": 0.9678549686074257, | |
| "num_tokens": 14051413.0, | |
| "step": 1850 | |
| }, | |
| { | |
| "entropy": 0.1281242691539228, | |
| "epoch": 2.814814814814815, | |
| "grad_norm": 0.06520246714353561, | |
| "learning_rate": 1.0121088719706296e-06, | |
| "loss": 0.1291, | |
| "mean_token_accuracy": 0.968293984234333, | |
| "num_tokens": 14429791.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "entropy": 0.12915834257379175, | |
| "epoch": 2.888888888888889, | |
| "grad_norm": 0.07606221735477448, | |
| "learning_rate": 3.6901926314575894e-07, | |
| "loss": 0.1292, | |
| "mean_token_accuracy": 0.9681269869208335, | |
| "num_tokens": 14808357.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "entropy": 0.1287903120368719, | |
| "epoch": 2.962962962962963, | |
| "grad_norm": 0.07113504409790039, | |
| "learning_rate": 4.323553957759629e-08, | |
| "loss": 0.1286, | |
| "mean_token_accuracy": 0.9684909208118916, | |
| "num_tokens": 15187872.0, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6.344379487768412e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |