| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 500, | |
| "global_step": 782, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01278772378516624, | |
| "grad_norm": 0.17330682277679443, | |
| "learning_rate": 0.0001976982097186701, | |
| "loss": 1.4811, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02557544757033248, | |
| "grad_norm": 0.15069860219955444, | |
| "learning_rate": 0.00019514066496163684, | |
| "loss": 1.4092, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.03836317135549872, | |
| "grad_norm": 0.1438504010438919, | |
| "learning_rate": 0.0001925831202046036, | |
| "loss": 1.3498, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.05115089514066496, | |
| "grad_norm": 0.16118377447128296, | |
| "learning_rate": 0.00019002557544757034, | |
| "loss": 1.3031, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.0639386189258312, | |
| "grad_norm": 0.15924590826034546, | |
| "learning_rate": 0.0001874680306905371, | |
| "loss": 1.3659, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07672634271099744, | |
| "grad_norm": 0.16134314239025116, | |
| "learning_rate": 0.00018491048593350385, | |
| "loss": 1.2793, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.08951406649616368, | |
| "grad_norm": 0.17917372286319733, | |
| "learning_rate": 0.0001823529411764706, | |
| "loss": 1.2369, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.10230179028132992, | |
| "grad_norm": 0.17515508830547333, | |
| "learning_rate": 0.00017979539641943735, | |
| "loss": 1.2174, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.11508951406649616, | |
| "grad_norm": 0.16867513954639435, | |
| "learning_rate": 0.0001772378516624041, | |
| "loss": 1.2656, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.1278772378516624, | |
| "grad_norm": 0.21344934403896332, | |
| "learning_rate": 0.00017468030690537086, | |
| "loss": 1.2513, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.14066496163682865, | |
| "grad_norm": 0.19054637849330902, | |
| "learning_rate": 0.0001721227621483376, | |
| "loss": 1.2906, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.1534526854219949, | |
| "grad_norm": 0.2011404037475586, | |
| "learning_rate": 0.00016956521739130436, | |
| "loss": 1.2156, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.16624040920716113, | |
| "grad_norm": 0.190298929810524, | |
| "learning_rate": 0.0001670076726342711, | |
| "loss": 1.1907, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.17902813299232737, | |
| "grad_norm": 0.20571213960647583, | |
| "learning_rate": 0.00016445012787723786, | |
| "loss": 1.2082, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.1918158567774936, | |
| "grad_norm": 0.20900501310825348, | |
| "learning_rate": 0.00016189258312020462, | |
| "loss": 1.1588, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.20460358056265984, | |
| "grad_norm": 0.21515554189682007, | |
| "learning_rate": 0.00015933503836317137, | |
| "loss": 1.2504, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.21739130434782608, | |
| "grad_norm": 0.21124675869941711, | |
| "learning_rate": 0.00015677749360613812, | |
| "loss": 1.209, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.23017902813299232, | |
| "grad_norm": 0.26808661222457886, | |
| "learning_rate": 0.00015421994884910487, | |
| "loss": 1.2274, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.24296675191815856, | |
| "grad_norm": 0.21091195940971375, | |
| "learning_rate": 0.00015166240409207163, | |
| "loss": 1.1682, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.2557544757033248, | |
| "grad_norm": 0.23372243344783783, | |
| "learning_rate": 0.00014910485933503838, | |
| "loss": 1.2151, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.26854219948849106, | |
| "grad_norm": 0.20886382460594177, | |
| "learning_rate": 0.00014654731457800513, | |
| "loss": 1.1707, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.2813299232736573, | |
| "grad_norm": 0.2718591094017029, | |
| "learning_rate": 0.00014398976982097188, | |
| "loss": 1.1818, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.29411764705882354, | |
| "grad_norm": 0.20646598935127258, | |
| "learning_rate": 0.00014143222506393863, | |
| "loss": 1.2143, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.3069053708439898, | |
| "grad_norm": 0.23880060017108917, | |
| "learning_rate": 0.00013887468030690539, | |
| "loss": 1.2, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.319693094629156, | |
| "grad_norm": 0.21879994869232178, | |
| "learning_rate": 0.00013631713554987214, | |
| "loss": 1.1613, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.33248081841432225, | |
| "grad_norm": 0.21538586914539337, | |
| "learning_rate": 0.0001337595907928389, | |
| "loss": 1.2349, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.3452685421994885, | |
| "grad_norm": 0.20641756057739258, | |
| "learning_rate": 0.00013120204603580564, | |
| "loss": 1.1739, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.35805626598465473, | |
| "grad_norm": 0.24804268777370453, | |
| "learning_rate": 0.0001286445012787724, | |
| "loss": 1.1915, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.37084398976982097, | |
| "grad_norm": 0.24932831525802612, | |
| "learning_rate": 0.00012608695652173915, | |
| "loss": 1.1843, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.3836317135549872, | |
| "grad_norm": 0.26308584213256836, | |
| "learning_rate": 0.0001235294117647059, | |
| "loss": 1.1992, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.39641943734015345, | |
| "grad_norm": 0.2279403656721115, | |
| "learning_rate": 0.00012097186700767265, | |
| "loss": 1.1884, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.4092071611253197, | |
| "grad_norm": 0.2214374542236328, | |
| "learning_rate": 0.0001184143222506394, | |
| "loss": 1.1879, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.4219948849104859, | |
| "grad_norm": 0.20924842357635498, | |
| "learning_rate": 0.00011585677749360616, | |
| "loss": 1.1716, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.43478260869565216, | |
| "grad_norm": 0.2850317358970642, | |
| "learning_rate": 0.00011329923273657291, | |
| "loss": 1.1537, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.4475703324808184, | |
| "grad_norm": 0.2727545499801636, | |
| "learning_rate": 0.00011074168797953966, | |
| "loss": 1.1924, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.46035805626598464, | |
| "grad_norm": 0.21406596899032593, | |
| "learning_rate": 0.00010818414322250641, | |
| "loss": 1.1935, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.4731457800511509, | |
| "grad_norm": 0.26856428384780884, | |
| "learning_rate": 0.00010562659846547316, | |
| "loss": 1.1473, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.4859335038363171, | |
| "grad_norm": 0.31214985251426697, | |
| "learning_rate": 0.00010306905370843992, | |
| "loss": 1.1869, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.49872122762148335, | |
| "grad_norm": 0.25869354605674744, | |
| "learning_rate": 0.00010051150895140667, | |
| "loss": 1.1726, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.5115089514066496, | |
| "grad_norm": 0.25544413924217224, | |
| "learning_rate": 9.79539641943734e-05, | |
| "loss": 1.2077, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5242966751918159, | |
| "grad_norm": 0.24060559272766113, | |
| "learning_rate": 9.539641943734016e-05, | |
| "loss": 1.1651, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.5370843989769821, | |
| "grad_norm": 0.2562996447086334, | |
| "learning_rate": 9.283887468030691e-05, | |
| "loss": 1.1922, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.5498721227621484, | |
| "grad_norm": 0.24809622764587402, | |
| "learning_rate": 9.028132992327366e-05, | |
| "loss": 1.1622, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.5626598465473146, | |
| "grad_norm": 0.23276326060295105, | |
| "learning_rate": 8.772378516624042e-05, | |
| "loss": 1.1461, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5754475703324808, | |
| "grad_norm": 0.26975592970848083, | |
| "learning_rate": 8.516624040920717e-05, | |
| "loss": 1.1719, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5882352941176471, | |
| "grad_norm": 0.2104811817407608, | |
| "learning_rate": 8.260869565217392e-05, | |
| "loss": 1.1957, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.6010230179028133, | |
| "grad_norm": 0.25681132078170776, | |
| "learning_rate": 8.005115089514067e-05, | |
| "loss": 1.1508, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.6138107416879796, | |
| "grad_norm": 0.2409922480583191, | |
| "learning_rate": 7.749360613810742e-05, | |
| "loss": 1.169, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.6265984654731458, | |
| "grad_norm": 0.22748278081417084, | |
| "learning_rate": 7.493606138107418e-05, | |
| "loss": 1.1102, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.639386189258312, | |
| "grad_norm": 0.27332964539527893, | |
| "learning_rate": 7.237851662404093e-05, | |
| "loss": 1.162, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6521739130434783, | |
| "grad_norm": 0.2509157955646515, | |
| "learning_rate": 6.982097186700768e-05, | |
| "loss": 1.1518, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.6649616368286445, | |
| "grad_norm": 0.2574126720428467, | |
| "learning_rate": 6.726342710997443e-05, | |
| "loss": 1.1643, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.6777493606138107, | |
| "grad_norm": 0.23611821234226227, | |
| "learning_rate": 6.470588235294118e-05, | |
| "loss": 1.1975, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.690537084398977, | |
| "grad_norm": 0.2839568257331848, | |
| "learning_rate": 6.214833759590794e-05, | |
| "loss": 1.1611, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.7033248081841432, | |
| "grad_norm": 0.2545911967754364, | |
| "learning_rate": 5.959079283887469e-05, | |
| "loss": 1.1711, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.7161125319693095, | |
| "grad_norm": 0.24425293505191803, | |
| "learning_rate": 5.703324808184144e-05, | |
| "loss": 1.16, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.7289002557544757, | |
| "grad_norm": 0.21479295194149017, | |
| "learning_rate": 5.447570332480819e-05, | |
| "loss": 1.1918, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.7416879795396419, | |
| "grad_norm": 0.28247973322868347, | |
| "learning_rate": 5.1918158567774945e-05, | |
| "loss": 1.1219, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.7544757033248082, | |
| "grad_norm": 0.2444392889738083, | |
| "learning_rate": 4.936061381074169e-05, | |
| "loss": 1.1619, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.7672634271099744, | |
| "grad_norm": 0.2738865911960602, | |
| "learning_rate": 4.680306905370844e-05, | |
| "loss": 1.1875, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7800511508951407, | |
| "grad_norm": 0.2427508682012558, | |
| "learning_rate": 4.4245524296675195e-05, | |
| "loss": 1.1554, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.7928388746803069, | |
| "grad_norm": 0.2514194846153259, | |
| "learning_rate": 4.168797953964195e-05, | |
| "loss": 1.0994, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.8056265984654731, | |
| "grad_norm": 0.3031892776489258, | |
| "learning_rate": 3.91304347826087e-05, | |
| "loss": 1.1534, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.8184143222506394, | |
| "grad_norm": 0.26739251613616943, | |
| "learning_rate": 3.657289002557545e-05, | |
| "loss": 1.1519, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.8312020460358056, | |
| "grad_norm": 0.24512003362178802, | |
| "learning_rate": 3.40153452685422e-05, | |
| "loss": 1.1461, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.8439897698209718, | |
| "grad_norm": 0.25903940200805664, | |
| "learning_rate": 3.145780051150895e-05, | |
| "loss": 1.2031, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.8567774936061381, | |
| "grad_norm": 0.2628057599067688, | |
| "learning_rate": 2.89002557544757e-05, | |
| "loss": 1.0987, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.8695652173913043, | |
| "grad_norm": 0.2536948621273041, | |
| "learning_rate": 2.6342710997442456e-05, | |
| "loss": 1.1747, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.8823529411764706, | |
| "grad_norm": 0.24911293387413025, | |
| "learning_rate": 2.378516624040921e-05, | |
| "loss": 1.134, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.8951406649616368, | |
| "grad_norm": 0.25777775049209595, | |
| "learning_rate": 2.122762148337596e-05, | |
| "loss": 1.1314, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.907928388746803, | |
| "grad_norm": 0.2719346582889557, | |
| "learning_rate": 1.8670076726342713e-05, | |
| "loss": 1.1481, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.9207161125319693, | |
| "grad_norm": 0.2559177279472351, | |
| "learning_rate": 1.6112531969309465e-05, | |
| "loss": 1.1892, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.9335038363171355, | |
| "grad_norm": 0.25907379388809204, | |
| "learning_rate": 1.3554987212276215e-05, | |
| "loss": 1.1818, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.9462915601023018, | |
| "grad_norm": 0.24793638288974762, | |
| "learning_rate": 1.0997442455242967e-05, | |
| "loss": 1.1825, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.959079283887468, | |
| "grad_norm": 0.2763022184371948, | |
| "learning_rate": 8.439897698209718e-06, | |
| "loss": 1.1252, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.9718670076726342, | |
| "grad_norm": 0.2603158950805664, | |
| "learning_rate": 5.882352941176471e-06, | |
| "loss": 1.1759, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.9846547314578005, | |
| "grad_norm": 0.2559683620929718, | |
| "learning_rate": 3.324808184143223e-06, | |
| "loss": 1.1206, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.9974424552429667, | |
| "grad_norm": 0.2707849442958832, | |
| "learning_rate": 7.672634271099745e-07, | |
| "loss": 1.1607, | |
| "step": 780 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 782, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.54046633984e+17, | |
| "train_batch_size": 32, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |