| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 3.0, | |
| "eval_steps": 500, | |
| "global_step": 1848, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 2.0573711842298508, | |
| "epoch": 0.016240357287860333, | |
| "grad_norm": 0.98046875, | |
| "learning_rate": 3.2142857142857144e-05, | |
| "loss": 2.3979, | |
| "mean_token_accuracy": 0.5674526583403349, | |
| "num_tokens": 319222.0, | |
| "step": 10 | |
| }, | |
| { | |
| "entropy": 1.6298254258930682, | |
| "epoch": 0.032480714575720666, | |
| "grad_norm": 0.7734375, | |
| "learning_rate": 6.785714285714286e-05, | |
| "loss": 1.7067, | |
| "mean_token_accuracy": 0.6522471688687801, | |
| "num_tokens": 635410.0, | |
| "step": 20 | |
| }, | |
| { | |
| "entropy": 0.8830645218491554, | |
| "epoch": 0.048721071863580996, | |
| "grad_norm": 0.28125, | |
| "learning_rate": 0.00010357142857142859, | |
| "loss": 0.8894, | |
| "mean_token_accuracy": 0.791144409775734, | |
| "num_tokens": 952489.0, | |
| "step": 30 | |
| }, | |
| { | |
| "entropy": 0.6340911261737346, | |
| "epoch": 0.06496142915144133, | |
| "grad_norm": 0.2236328125, | |
| "learning_rate": 0.0001392857142857143, | |
| "loss": 0.6371, | |
| "mean_token_accuracy": 0.840929938852787, | |
| "num_tokens": 1271722.0, | |
| "step": 40 | |
| }, | |
| { | |
| "entropy": 0.5732324108481407, | |
| "epoch": 0.08120178643930166, | |
| "grad_norm": 0.33203125, | |
| "learning_rate": 0.000175, | |
| "loss": 0.5747, | |
| "mean_token_accuracy": 0.8537561506032944, | |
| "num_tokens": 1590635.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.5194318823516368, | |
| "epoch": 0.09744214372716199, | |
| "grad_norm": 0.115234375, | |
| "learning_rate": 0.0001999986169583868, | |
| "loss": 0.5268, | |
| "mean_token_accuracy": 0.8640895910561085, | |
| "num_tokens": 1908010.0, | |
| "step": 60 | |
| }, | |
| { | |
| "entropy": 0.49497435204684737, | |
| "epoch": 0.11368250101502234, | |
| "grad_norm": 0.125, | |
| "learning_rate": 0.00019997403061615897, | |
| "loss": 0.4986, | |
| "mean_token_accuracy": 0.8717949956655502, | |
| "num_tokens": 2225569.0, | |
| "step": 70 | |
| }, | |
| { | |
| "entropy": 0.4971678379923105, | |
| "epoch": 0.12992285830288267, | |
| "grad_norm": 0.11083984375, | |
| "learning_rate": 0.0001999187187134847, | |
| "loss": 0.4924, | |
| "mean_token_accuracy": 0.8731547556817532, | |
| "num_tokens": 2539488.0, | |
| "step": 80 | |
| }, | |
| { | |
| "entropy": 0.5084386952221394, | |
| "epoch": 0.146163215590743, | |
| "grad_norm": 0.1083984375, | |
| "learning_rate": 0.00019983269824967067, | |
| "loss": 0.5163, | |
| "mean_token_accuracy": 0.8671703346073627, | |
| "num_tokens": 2857760.0, | |
| "step": 90 | |
| }, | |
| { | |
| "entropy": 0.476475191116333, | |
| "epoch": 0.16240357287860333, | |
| "grad_norm": 0.11279296875, | |
| "learning_rate": 0.00019971599566185206, | |
| "loss": 0.4758, | |
| "mean_token_accuracy": 0.876344844698906, | |
| "num_tokens": 3174136.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.48804047554731367, | |
| "epoch": 0.17864393016646365, | |
| "grad_norm": 0.1025390625, | |
| "learning_rate": 0.00019956864681686744, | |
| "loss": 0.4837, | |
| "mean_token_accuracy": 0.8742976881563663, | |
| "num_tokens": 3489505.0, | |
| "step": 110 | |
| }, | |
| { | |
| "entropy": 0.4607066061347723, | |
| "epoch": 0.19488428745432398, | |
| "grad_norm": 0.255859375, | |
| "learning_rate": 0.00019939069700023563, | |
| "loss": 0.4639, | |
| "mean_token_accuracy": 0.879499676078558, | |
| "num_tokens": 3807935.0, | |
| "step": 120 | |
| }, | |
| { | |
| "entropy": 0.4422083988785744, | |
| "epoch": 0.21112464474218431, | |
| "grad_norm": 0.099609375, | |
| "learning_rate": 0.00019918220090223775, | |
| "loss": 0.4431, | |
| "mean_token_accuracy": 0.8831395357847214, | |
| "num_tokens": 4125137.0, | |
| "step": 130 | |
| }, | |
| { | |
| "entropy": 0.44356597438454626, | |
| "epoch": 0.22736500203004467, | |
| "grad_norm": 0.10595703125, | |
| "learning_rate": 0.00019894322260110927, | |
| "loss": 0.4373, | |
| "mean_token_accuracy": 0.8849747203290462, | |
| "num_tokens": 4442538.0, | |
| "step": 140 | |
| }, | |
| { | |
| "entropy": 0.44916940554976464, | |
| "epoch": 0.243605359317905, | |
| "grad_norm": 0.11083984375, | |
| "learning_rate": 0.00019867383554334603, | |
| "loss": 0.4473, | |
| "mean_token_accuracy": 0.8828106552362442, | |
| "num_tokens": 4761703.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.44491727463901043, | |
| "epoch": 0.25984571660576533, | |
| "grad_norm": 0.09716796875, | |
| "learning_rate": 0.00019837412252113204, | |
| "loss": 0.446, | |
| "mean_token_accuracy": 0.8818246781826019, | |
| "num_tokens": 5080459.0, | |
| "step": 160 | |
| }, | |
| { | |
| "entropy": 0.4443864993751049, | |
| "epoch": 0.27608607389362566, | |
| "grad_norm": 0.11474609375, | |
| "learning_rate": 0.00019804417564689403, | |
| "loss": 0.4424, | |
| "mean_token_accuracy": 0.8833745121955872, | |
| "num_tokens": 5394935.0, | |
| "step": 170 | |
| }, | |
| { | |
| "entropy": 0.43182576820254326, | |
| "epoch": 0.292326431181486, | |
| "grad_norm": 0.10302734375, | |
| "learning_rate": 0.00019768409632499244, | |
| "loss": 0.4337, | |
| "mean_token_accuracy": 0.8853104099631309, | |
| "num_tokens": 5711130.0, | |
| "step": 180 | |
| }, | |
| { | |
| "entropy": 0.4353721059858799, | |
| "epoch": 0.3085667884693463, | |
| "grad_norm": 0.10205078125, | |
| "learning_rate": 0.00019729399522055603, | |
| "loss": 0.4351, | |
| "mean_token_accuracy": 0.8850096859037876, | |
| "num_tokens": 6028009.0, | |
| "step": 190 | |
| }, | |
| { | |
| "entropy": 0.4161925382912159, | |
| "epoch": 0.32480714575720665, | |
| "grad_norm": 0.10107421875, | |
| "learning_rate": 0.0001968739922254706, | |
| "loss": 0.4118, | |
| "mean_token_accuracy": 0.8901829145848751, | |
| "num_tokens": 6345284.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.4270293299108744, | |
| "epoch": 0.341047503045067, | |
| "grad_norm": 0.126953125, | |
| "learning_rate": 0.00019642421642153198, | |
| "loss": 0.4266, | |
| "mean_token_accuracy": 0.885687505453825, | |
| "num_tokens": 6664555.0, | |
| "step": 210 | |
| }, | |
| { | |
| "entropy": 0.4177013225853443, | |
| "epoch": 0.3572878603329273, | |
| "grad_norm": 0.09912109375, | |
| "learning_rate": 0.0001959448060407748, | |
| "loss": 0.4185, | |
| "mean_token_accuracy": 0.8890490755438805, | |
| "num_tokens": 6984869.0, | |
| "step": 220 | |
| }, | |
| { | |
| "entropy": 0.4492575516924262, | |
| "epoch": 0.37352821762078764, | |
| "grad_norm": 0.125, | |
| "learning_rate": 0.00019543590842298857, | |
| "loss": 0.4471, | |
| "mean_token_accuracy": 0.8821755766868591, | |
| "num_tokens": 7301522.0, | |
| "step": 230 | |
| }, | |
| { | |
| "entropy": 0.4262876145541668, | |
| "epoch": 0.38976857490864797, | |
| "grad_norm": 0.126953125, | |
| "learning_rate": 0.0001948976799704351, | |
| "loss": 0.4272, | |
| "mean_token_accuracy": 0.8858069330453873, | |
| "num_tokens": 7617411.0, | |
| "step": 240 | |
| }, | |
| { | |
| "entropy": 0.4227599944919348, | |
| "epoch": 0.4060089321965083, | |
| "grad_norm": 0.111328125, | |
| "learning_rate": 0.0001943302860997807, | |
| "loss": 0.4183, | |
| "mean_token_accuracy": 0.889183484762907, | |
| "num_tokens": 7932287.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.4225012965500355, | |
| "epoch": 0.42224928948436863, | |
| "grad_norm": 0.173828125, | |
| "learning_rate": 0.00019373390119125752, | |
| "loss": 0.4241, | |
| "mean_token_accuracy": 0.8874775715172291, | |
| "num_tokens": 8249106.0, | |
| "step": 260 | |
| }, | |
| { | |
| "entropy": 0.4166328992694616, | |
| "epoch": 0.438489646772229, | |
| "grad_norm": 0.109375, | |
| "learning_rate": 0.00019310870853507043, | |
| "loss": 0.4183, | |
| "mean_token_accuracy": 0.8885860778391361, | |
| "num_tokens": 8564544.0, | |
| "step": 270 | |
| }, | |
| { | |
| "entropy": 0.38806356210261583, | |
| "epoch": 0.45473000406008934, | |
| "grad_norm": 0.11328125, | |
| "learning_rate": 0.00019245490027506546, | |
| "loss": 0.3836, | |
| "mean_token_accuracy": 0.8962622597813606, | |
| "num_tokens": 8883546.0, | |
| "step": 280 | |
| }, | |
| { | |
| "entropy": 0.388262290135026, | |
| "epoch": 0.4709703613479497, | |
| "grad_norm": 0.09130859375, | |
| "learning_rate": 0.0001917726773496773, | |
| "loss": 0.3847, | |
| "mean_token_accuracy": 0.8955962382256984, | |
| "num_tokens": 9199902.0, | |
| "step": 290 | |
| }, | |
| { | |
| "entropy": 0.40017246957868335, | |
| "epoch": 0.48721071863581, | |
| "grad_norm": 0.09716796875, | |
| "learning_rate": 0.00019106224943017352, | |
| "loss": 0.3984, | |
| "mean_token_accuracy": 0.8926815405488014, | |
| "num_tokens": 9518427.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.39343364126980307, | |
| "epoch": 0.5034510759236703, | |
| "grad_norm": 0.09521484375, | |
| "learning_rate": 0.00019032383485621546, | |
| "loss": 0.3936, | |
| "mean_token_accuracy": 0.8935227513313293, | |
| "num_tokens": 9834801.0, | |
| "step": 310 | |
| }, | |
| { | |
| "entropy": 0.40088152755051853, | |
| "epoch": 0.5196914332115307, | |
| "grad_norm": 0.09423828125, | |
| "learning_rate": 0.00018955766056875456, | |
| "loss": 0.4013, | |
| "mean_token_accuracy": 0.8928539358079434, | |
| "num_tokens": 10150756.0, | |
| "step": 320 | |
| }, | |
| { | |
| "entropy": 0.3935076169669628, | |
| "epoch": 0.535931790499391, | |
| "grad_norm": 0.09619140625, | |
| "learning_rate": 0.0001887639620402854, | |
| "loss": 0.3894, | |
| "mean_token_accuracy": 0.8937225684523582, | |
| "num_tokens": 10470119.0, | |
| "step": 330 | |
| }, | |
| { | |
| "entropy": 0.4141834359616041, | |
| "epoch": 0.5521721477872513, | |
| "grad_norm": 0.099609375, | |
| "learning_rate": 0.00018794298320247665, | |
| "loss": 0.415, | |
| "mean_token_accuracy": 0.8895114719867706, | |
| "num_tokens": 10787249.0, | |
| "step": 340 | |
| }, | |
| { | |
| "entropy": 0.41944080144166945, | |
| "epoch": 0.5684125050751117, | |
| "grad_norm": 0.1162109375, | |
| "learning_rate": 0.0001870949763712022, | |
| "loss": 0.4151, | |
| "mean_token_accuracy": 0.8887378059327602, | |
| "num_tokens": 11107800.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.4085154063999653, | |
| "epoch": 0.584652862362972, | |
| "grad_norm": 0.123046875, | |
| "learning_rate": 0.00018622020216899575, | |
| "loss": 0.4067, | |
| "mean_token_accuracy": 0.8906957127153874, | |
| "num_tokens": 11424520.0, | |
| "step": 360 | |
| }, | |
| { | |
| "entropy": 0.41457892414182423, | |
| "epoch": 0.6008932196508323, | |
| "grad_norm": 0.1005859375, | |
| "learning_rate": 0.00018531892944495195, | |
| "loss": 0.4139, | |
| "mean_token_accuracy": 0.8893011771142483, | |
| "num_tokens": 11743854.0, | |
| "step": 370 | |
| }, | |
| { | |
| "entropy": 0.3982046090066433, | |
| "epoch": 0.6171335769386926, | |
| "grad_norm": 0.095703125, | |
| "learning_rate": 0.00018439143519209984, | |
| "loss": 0.3972, | |
| "mean_token_accuracy": 0.8931242369115353, | |
| "num_tokens": 12060493.0, | |
| "step": 380 | |
| }, | |
| { | |
| "entropy": 0.37617460917681456, | |
| "epoch": 0.633373934226553, | |
| "grad_norm": 0.17578125, | |
| "learning_rate": 0.00018343800446227285, | |
| "loss": 0.3737, | |
| "mean_token_accuracy": 0.8988998346030712, | |
| "num_tokens": 12377614.0, | |
| "step": 390 | |
| }, | |
| { | |
| "entropy": 0.38792264480143784, | |
| "epoch": 0.6496142915144133, | |
| "grad_norm": 0.1025390625, | |
| "learning_rate": 0.00018245893027850254, | |
| "loss": 0.3863, | |
| "mean_token_accuracy": 0.8964896731078624, | |
| "num_tokens": 12695713.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.3779356569051743, | |
| "epoch": 0.6658546488022736, | |
| "grad_norm": 0.09912109375, | |
| "learning_rate": 0.00018145451354496198, | |
| "loss": 0.377, | |
| "mean_token_accuracy": 0.8985928252339364, | |
| "num_tokens": 13011198.0, | |
| "step": 410 | |
| }, | |
| { | |
| "entropy": 0.40948784444481134, | |
| "epoch": 0.682095006090134, | |
| "grad_norm": 0.1162109375, | |
| "learning_rate": 0.0001804250629544874, | |
| "loss": 0.4077, | |
| "mean_token_accuracy": 0.8911456301808357, | |
| "num_tokens": 13329544.0, | |
| "step": 420 | |
| }, | |
| { | |
| "entropy": 0.38341086860746143, | |
| "epoch": 0.6983353633779943, | |
| "grad_norm": 0.10888671875, | |
| "learning_rate": 0.00017937089489370594, | |
| "loss": 0.3829, | |
| "mean_token_accuracy": 0.8969453655183315, | |
| "num_tokens": 13645469.0, | |
| "step": 430 | |
| }, | |
| { | |
| "entropy": 0.3890859391540289, | |
| "epoch": 0.7145757206658546, | |
| "grad_norm": 0.10400390625, | |
| "learning_rate": 0.0001782923333457987, | |
| "loss": 0.3875, | |
| "mean_token_accuracy": 0.8961056731641293, | |
| "num_tokens": 13964383.0, | |
| "step": 440 | |
| }, | |
| { | |
| "entropy": 0.3823310313746333, | |
| "epoch": 0.730816077953715, | |
| "grad_norm": 0.09375, | |
| "learning_rate": 0.0001771897097909294, | |
| "loss": 0.3857, | |
| "mean_token_accuracy": 0.8955351069569588, | |
| "num_tokens": 14284139.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.372044226154685, | |
| "epoch": 0.7470564352415753, | |
| "grad_norm": 0.1044921875, | |
| "learning_rate": 0.00017606336310436874, | |
| "loss": 0.3703, | |
| "mean_token_accuracy": 0.8989921748638153, | |
| "num_tokens": 14601084.0, | |
| "step": 460 | |
| }, | |
| { | |
| "entropy": 0.3764212913811207, | |
| "epoch": 0.7632967925294356, | |
| "grad_norm": 0.10546875, | |
| "learning_rate": 0.00017491363945234593, | |
| "loss": 0.3708, | |
| "mean_token_accuracy": 0.8989950515329838, | |
| "num_tokens": 14919347.0, | |
| "step": 470 | |
| }, | |
| { | |
| "entropy": 0.3796376219019294, | |
| "epoch": 0.7795371498172959, | |
| "grad_norm": 0.0986328125, | |
| "learning_rate": 0.00017374089218565972, | |
| "loss": 0.3777, | |
| "mean_token_accuracy": 0.8979221723973752, | |
| "num_tokens": 15235024.0, | |
| "step": 480 | |
| }, | |
| { | |
| "entropy": 0.3780976843088865, | |
| "epoch": 0.7957775071051563, | |
| "grad_norm": 0.09326171875, | |
| "learning_rate": 0.000172545481731081, | |
| "loss": 0.3814, | |
| "mean_token_accuracy": 0.8970133177936077, | |
| "num_tokens": 15555890.0, | |
| "step": 490 | |
| }, | |
| { | |
| "entropy": 0.36868189480155705, | |
| "epoch": 0.8120178643930166, | |
| "grad_norm": 0.1318359375, | |
| "learning_rate": 0.00017132777548058102, | |
| "loss": 0.367, | |
| "mean_token_accuracy": 0.8997100129723549, | |
| "num_tokens": 15871127.0, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.8120178643930166, | |
| "eval_entropy": 0.3875140378834637, | |
| "eval_loss": 0.38122546672821045, | |
| "eval_mean_token_accuracy": 0.8967177114868892, | |
| "eval_num_tokens": 15871127.0, | |
| "eval_runtime": 177.0916, | |
| "eval_samples_per_second": 2.953, | |
| "eval_steps_per_second": 1.479, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.36098182667046785, | |
| "epoch": 0.8282582216808769, | |
| "grad_norm": 0.091796875, | |
| "learning_rate": 0.00017008814767841872, | |
| "loss": 0.358, | |
| "mean_token_accuracy": 0.9020477868616581, | |
| "num_tokens": 16186646.0, | |
| "step": 510 | |
| }, | |
| { | |
| "entropy": 0.37107769679278135, | |
| "epoch": 0.8444985789687373, | |
| "grad_norm": 0.0966796875, | |
| "learning_rate": 0.00016882697930612237, | |
| "loss": 0.3667, | |
| "mean_token_accuracy": 0.8997148185968399, | |
| "num_tokens": 16505442.0, | |
| "step": 520 | |
| }, | |
| { | |
| "entropy": 0.36859209295362233, | |
| "epoch": 0.8607389362565977, | |
| "grad_norm": 0.09228515625, | |
| "learning_rate": 0.00016754465796540028, | |
| "loss": 0.3656, | |
| "mean_token_accuracy": 0.9001396887004376, | |
| "num_tokens": 16824612.0, | |
| "step": 530 | |
| }, | |
| { | |
| "entropy": 0.36337947361171247, | |
| "epoch": 0.876979293544458, | |
| "grad_norm": 0.095703125, | |
| "learning_rate": 0.0001662415777590172, | |
| "loss": 0.3653, | |
| "mean_token_accuracy": 0.9004977688193321, | |
| "num_tokens": 17147012.0, | |
| "step": 540 | |
| }, | |
| { | |
| "entropy": 0.38551054075360297, | |
| "epoch": 0.8932196508323184, | |
| "grad_norm": 0.10302734375, | |
| "learning_rate": 0.00016491813916967246, | |
| "loss": 0.3841, | |
| "mean_token_accuracy": 0.8968335554003716, | |
| "num_tokens": 17463603.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.3718857761472464, | |
| "epoch": 0.9094600081201787, | |
| "grad_norm": 0.1025390625, | |
| "learning_rate": 0.00016357474893691757, | |
| "loss": 0.3694, | |
| "mean_token_accuracy": 0.8994779132306576, | |
| "num_tokens": 17780634.0, | |
| "step": 560 | |
| }, | |
| { | |
| "entropy": 0.3478269662708044, | |
| "epoch": 0.925700365408039, | |
| "grad_norm": 0.1005859375, | |
| "learning_rate": 0.00016221181993215068, | |
| "loss": 0.3495, | |
| "mean_token_accuracy": 0.9044472806155681, | |
| "num_tokens": 18099787.0, | |
| "step": 570 | |
| }, | |
| { | |
| "entropy": 0.3706334102898836, | |
| "epoch": 0.9419407226958993, | |
| "grad_norm": 0.10107421875, | |
| "learning_rate": 0.00016082977103172664, | |
| "loss": 0.3643, | |
| "mean_token_accuracy": 0.9007264509797096, | |
| "num_tokens": 18417572.0, | |
| "step": 580 | |
| }, | |
| { | |
| "entropy": 0.33980775382369754, | |
| "epoch": 0.9581810799837597, | |
| "grad_norm": 0.099609375, | |
| "learning_rate": 0.00015942902698822136, | |
| "loss": 0.339, | |
| "mean_token_accuracy": 0.9067365050315856, | |
| "num_tokens": 18734911.0, | |
| "step": 590 | |
| }, | |
| { | |
| "entropy": 0.36088257618248465, | |
| "epoch": 0.97442143727162, | |
| "grad_norm": 0.10693359375, | |
| "learning_rate": 0.00015801001829989032, | |
| "loss": 0.3576, | |
| "mean_token_accuracy": 0.9025222927331924, | |
| "num_tokens": 19054345.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 0.35844882633537056, | |
| "epoch": 0.9906617945594803, | |
| "grad_norm": 0.10400390625, | |
| "learning_rate": 0.0001565731810783613, | |
| "loss": 0.3553, | |
| "mean_token_accuracy": 0.9028041236102581, | |
| "num_tokens": 19371809.0, | |
| "step": 610 | |
| }, | |
| { | |
| "entropy": 0.3564173472233308, | |
| "epoch": 1.0064961429151442, | |
| "grad_norm": 0.11181640625, | |
| "learning_rate": 0.00015511895691460188, | |
| "loss": 0.353, | |
| "mean_token_accuracy": 0.9035867773569547, | |
| "num_tokens": 19684212.0, | |
| "step": 620 | |
| }, | |
| { | |
| "entropy": 0.3372783612459898, | |
| "epoch": 1.0227365002030044, | |
| "grad_norm": 0.1064453125, | |
| "learning_rate": 0.00015364779274320255, | |
| "loss": 0.3362, | |
| "mean_token_accuracy": 0.9072924487292766, | |
| "num_tokens": 20003357.0, | |
| "step": 630 | |
| }, | |
| { | |
| "entropy": 0.3311926079913974, | |
| "epoch": 1.0389768574908649, | |
| "grad_norm": 0.09912109375, | |
| "learning_rate": 0.00015216014070501834, | |
| "loss": 0.3244, | |
| "mean_token_accuracy": 0.9089326687157154, | |
| "num_tokens": 20322614.0, | |
| "step": 640 | |
| }, | |
| { | |
| "entropy": 0.33821379821747544, | |
| "epoch": 1.055217214778725, | |
| "grad_norm": 0.09912109375, | |
| "learning_rate": 0.0001506564580082096, | |
| "loss": 0.3388, | |
| "mean_token_accuracy": 0.9062777034938335, | |
| "num_tokens": 20635631.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 0.3465416576713324, | |
| "epoch": 1.0714575720665855, | |
| "grad_norm": 0.1083984375, | |
| "learning_rate": 0.00014913720678772584, | |
| "loss": 0.3455, | |
| "mean_token_accuracy": 0.9043642178177833, | |
| "num_tokens": 20955241.0, | |
| "step": 660 | |
| }, | |
| { | |
| "entropy": 0.33202757611870765, | |
| "epoch": 1.0876979293544458, | |
| "grad_norm": 0.10595703125, | |
| "learning_rate": 0.00014760285396327532, | |
| "loss": 0.3277, | |
| "mean_token_accuracy": 0.9081737406551837, | |
| "num_tokens": 21269871.0, | |
| "step": 670 | |
| }, | |
| { | |
| "entropy": 0.30594254843890667, | |
| "epoch": 1.1039382866423062, | |
| "grad_norm": 0.140625, | |
| "learning_rate": 0.000146053871095824, | |
| "loss": 0.3029, | |
| "mean_token_accuracy": 0.9145903818309307, | |
| "num_tokens": 21586356.0, | |
| "step": 680 | |
| }, | |
| { | |
| "entropy": 0.31971859056502583, | |
| "epoch": 1.1201786439301664, | |
| "grad_norm": 0.107421875, | |
| "learning_rate": 0.00014449073424266837, | |
| "loss": 0.3133, | |
| "mean_token_accuracy": 0.9111780665814877, | |
| "num_tokens": 21905514.0, | |
| "step": 690 | |
| }, | |
| { | |
| "entropy": 0.32340758945792913, | |
| "epoch": 1.1364190012180269, | |
| "grad_norm": 0.1005859375, | |
| "learning_rate": 0.0001429139238111259, | |
| "loss": 0.3199, | |
| "mean_token_accuracy": 0.910588438808918, | |
| "num_tokens": 22224116.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 0.30627013817429544, | |
| "epoch": 1.152659358505887, | |
| "grad_norm": 0.11474609375, | |
| "learning_rate": 0.00014132392441088898, | |
| "loss": 0.3062, | |
| "mean_token_accuracy": 0.9141925357282161, | |
| "num_tokens": 22541518.0, | |
| "step": 710 | |
| }, | |
| { | |
| "entropy": 0.34387709144502876, | |
| "epoch": 1.1688997157937475, | |
| "grad_norm": 0.10986328125, | |
| "learning_rate": 0.00013972122470508726, | |
| "loss": 0.3388, | |
| "mean_token_accuracy": 0.9059014208614826, | |
| "num_tokens": 22857809.0, | |
| "step": 720 | |
| }, | |
| { | |
| "entropy": 0.3105178466066718, | |
| "epoch": 1.1851400730816077, | |
| "grad_norm": 0.103515625, | |
| "learning_rate": 0.00013810631726010405, | |
| "loss": 0.3113, | |
| "mean_token_accuracy": 0.912415674328804, | |
| "num_tokens": 23175061.0, | |
| "step": 730 | |
| }, | |
| { | |
| "entropy": 0.32021520137786863, | |
| "epoch": 1.2013804303694682, | |
| "grad_norm": 0.10205078125, | |
| "learning_rate": 0.00013647969839419334, | |
| "loss": 0.3166, | |
| "mean_token_accuracy": 0.9124397613108158, | |
| "num_tokens": 23492422.0, | |
| "step": 740 | |
| }, | |
| { | |
| "entropy": 0.33215807750821114, | |
| "epoch": 1.2176207876573284, | |
| "grad_norm": 0.10693359375, | |
| "learning_rate": 0.00013484186802494345, | |
| "loss": 0.3288, | |
| "mean_token_accuracy": 0.9079324699938297, | |
| "num_tokens": 23811312.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 0.3269189100712538, | |
| "epoch": 1.2338611449451888, | |
| "grad_norm": 0.12353515625, | |
| "learning_rate": 0.00013319332951563495, | |
| "loss": 0.3229, | |
| "mean_token_accuracy": 0.9100485563278198, | |
| "num_tokens": 24127363.0, | |
| "step": 760 | |
| }, | |
| { | |
| "entropy": 0.32415517419576645, | |
| "epoch": 1.250101502233049, | |
| "grad_norm": 0.10498046875, | |
| "learning_rate": 0.0001315345895205389, | |
| "loss": 0.3218, | |
| "mean_token_accuracy": 0.9100560195744037, | |
| "num_tokens": 24443515.0, | |
| "step": 770 | |
| }, | |
| { | |
| "entropy": 0.3075957763940096, | |
| "epoch": 1.2663418595209095, | |
| "grad_norm": 0.107421875, | |
| "learning_rate": 0.0001298661578292044, | |
| "loss": 0.3085, | |
| "mean_token_accuracy": 0.9134799301624298, | |
| "num_tokens": 24759656.0, | |
| "step": 780 | |
| }, | |
| { | |
| "entropy": 0.33256804049015043, | |
| "epoch": 1.2825822168087697, | |
| "grad_norm": 0.11572265625, | |
| "learning_rate": 0.00012818854720978196, | |
| "loss": 0.3283, | |
| "mean_token_accuracy": 0.9092446401715278, | |
| "num_tokens": 25076124.0, | |
| "step": 790 | |
| }, | |
| { | |
| "entropy": 0.3044602788053453, | |
| "epoch": 1.2988225740966302, | |
| "grad_norm": 0.109375, | |
| "learning_rate": 0.00012650227325143191, | |
| "loss": 0.2998, | |
| "mean_token_accuracy": 0.9158783234655857, | |
| "num_tokens": 25394550.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 0.3089112024754286, | |
| "epoch": 1.3150629313844906, | |
| "grad_norm": 0.1181640625, | |
| "learning_rate": 0.0001248078542058653, | |
| "loss": 0.3065, | |
| "mean_token_accuracy": 0.9139430224895477, | |
| "num_tokens": 25713937.0, | |
| "step": 810 | |
| }, | |
| { | |
| "entropy": 0.3199151481501758, | |
| "epoch": 1.3313032886723508, | |
| "grad_norm": 0.1064453125, | |
| "learning_rate": 0.00012310581082806713, | |
| "loss": 0.3153, | |
| "mean_token_accuracy": 0.9130744747817516, | |
| "num_tokens": 26033627.0, | |
| "step": 820 | |
| }, | |
| { | |
| "entropy": 0.3134147599339485, | |
| "epoch": 1.347543645960211, | |
| "grad_norm": 0.10888671875, | |
| "learning_rate": 0.0001213966662162496, | |
| "loss": 0.3158, | |
| "mean_token_accuracy": 0.9115599945187569, | |
| "num_tokens": 26351334.0, | |
| "step": 830 | |
| }, | |
| { | |
| "entropy": 0.30595332104712725, | |
| "epoch": 1.3637840032480715, | |
| "grad_norm": 0.11376953125, | |
| "learning_rate": 0.00011968094565108572, | |
| "loss": 0.2998, | |
| "mean_token_accuracy": 0.9158065438270568, | |
| "num_tokens": 26670167.0, | |
| "step": 840 | |
| }, | |
| { | |
| "entropy": 0.3209634754806757, | |
| "epoch": 1.380024360535932, | |
| "grad_norm": 0.10693359375, | |
| "learning_rate": 0.00011795917643427179, | |
| "loss": 0.3185, | |
| "mean_token_accuracy": 0.9116993598639965, | |
| "num_tokens": 26987963.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 0.31424548048526046, | |
| "epoch": 1.3962647178237921, | |
| "grad_norm": 0.1171875, | |
| "learning_rate": 0.0001162318877264691, | |
| "loss": 0.3089, | |
| "mean_token_accuracy": 0.9129889853298664, | |
| "num_tokens": 27308192.0, | |
| "step": 860 | |
| }, | |
| { | |
| "entropy": 0.33518767151981593, | |
| "epoch": 1.4125050751116524, | |
| "grad_norm": 0.11572265625, | |
| "learning_rate": 0.00011449961038467389, | |
| "loss": 0.3334, | |
| "mean_token_accuracy": 0.907250489294529, | |
| "num_tokens": 27627277.0, | |
| "step": 870 | |
| }, | |
| { | |
| "entropy": 0.32196133993566034, | |
| "epoch": 1.4287454323995128, | |
| "grad_norm": 0.11083984375, | |
| "learning_rate": 0.00011276287679906639, | |
| "loss": 0.3235, | |
| "mean_token_accuracy": 0.9093112558126449, | |
| "num_tokens": 27943368.0, | |
| "step": 880 | |
| }, | |
| { | |
| "entropy": 0.3061803586781025, | |
| "epoch": 1.4449857896873732, | |
| "grad_norm": 0.115234375, | |
| "learning_rate": 0.00011102222072938832, | |
| "loss": 0.3011, | |
| "mean_token_accuracy": 0.915228334069252, | |
| "num_tokens": 28259836.0, | |
| "step": 890 | |
| }, | |
| { | |
| "entropy": 0.3232524123042822, | |
| "epoch": 1.4612261469752335, | |
| "grad_norm": 0.111328125, | |
| "learning_rate": 0.00010927817714089973, | |
| "loss": 0.3191, | |
| "mean_token_accuracy": 0.9117089517414569, | |
| "num_tokens": 28580230.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 0.33082296065986155, | |
| "epoch": 1.4774665042630937, | |
| "grad_norm": 0.11962890625, | |
| "learning_rate": 0.00010753128203996519, | |
| "loss": 0.3269, | |
| "mean_token_accuracy": 0.9086541675031186, | |
| "num_tokens": 28898617.0, | |
| "step": 910 | |
| }, | |
| { | |
| "entropy": 0.3250410893931985, | |
| "epoch": 1.4937068615509541, | |
| "grad_norm": 0.150390625, | |
| "learning_rate": 0.00010578207230932, | |
| "loss": 0.319, | |
| "mean_token_accuracy": 0.9112100295722485, | |
| "num_tokens": 29215055.0, | |
| "step": 920 | |
| }, | |
| { | |
| "entropy": 0.3142668510787189, | |
| "epoch": 1.5099472188388146, | |
| "grad_norm": 0.1123046875, | |
| "learning_rate": 0.00010403108554306717, | |
| "loss": 0.3122, | |
| "mean_token_accuracy": 0.9117408633232117, | |
| "num_tokens": 29529393.0, | |
| "step": 930 | |
| }, | |
| { | |
| "entropy": 0.3135885909199715, | |
| "epoch": 1.5261875761266748, | |
| "grad_norm": 0.11572265625, | |
| "learning_rate": 0.00010227885988145563, | |
| "loss": 0.3075, | |
| "mean_token_accuracy": 0.9139442838728428, | |
| "num_tokens": 29851116.0, | |
| "step": 940 | |
| }, | |
| { | |
| "entropy": 0.3046269157901406, | |
| "epoch": 1.542427933414535, | |
| "grad_norm": 0.10693359375, | |
| "learning_rate": 0.00010052593384549082, | |
| "loss": 0.3034, | |
| "mean_token_accuracy": 0.9146199978888034, | |
| "num_tokens": 30169222.0, | |
| "step": 950 | |
| }, | |
| { | |
| "entropy": 0.31176882088184354, | |
| "epoch": 1.5586682907023954, | |
| "grad_norm": 0.11767578125, | |
| "learning_rate": 9.877284617142802e-05, | |
| "loss": 0.3083, | |
| "mean_token_accuracy": 0.9138757094740868, | |
| "num_tokens": 30484117.0, | |
| "step": 960 | |
| }, | |
| { | |
| "entropy": 0.31525961998850105, | |
| "epoch": 1.5749086479902559, | |
| "grad_norm": 0.11328125, | |
| "learning_rate": 9.702013564519954e-05, | |
| "loss": 0.311, | |
| "mean_token_accuracy": 0.9124061703681946, | |
| "num_tokens": 30804341.0, | |
| "step": 970 | |
| }, | |
| { | |
| "entropy": 0.3192242424935102, | |
| "epoch": 1.591149005278116, | |
| "grad_norm": 0.1123046875, | |
| "learning_rate": 9.526834093682685e-05, | |
| "loss": 0.3172, | |
| "mean_token_accuracy": 0.9116800054907799, | |
| "num_tokens": 31119505.0, | |
| "step": 980 | |
| }, | |
| { | |
| "entropy": 0.30036033764481546, | |
| "epoch": 1.6073893625659763, | |
| "grad_norm": 0.11376953125, | |
| "learning_rate": 9.351800043486823e-05, | |
| "loss": 0.298, | |
| "mean_token_accuracy": 0.9167301289737224, | |
| "num_tokens": 31433788.0, | |
| "step": 990 | |
| }, | |
| { | |
| "entropy": 0.3045342108234763, | |
| "epoch": 1.6236297198538368, | |
| "grad_norm": 0.11572265625, | |
| "learning_rate": 9.176965208095265e-05, | |
| "loss": 0.3011, | |
| "mean_token_accuracy": 0.915463775396347, | |
| "num_tokens": 31754135.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.6236297198538368, | |
| "eval_entropy": 0.3258335756436559, | |
| "eval_loss": 0.3386901617050171, | |
| "eval_mean_token_accuracy": 0.9073542472515398, | |
| "eval_num_tokens": 31754135.0, | |
| "eval_runtime": 177.024, | |
| "eval_samples_per_second": 2.954, | |
| "eval_steps_per_second": 1.48, | |
| "step": 1000 | |
| }, | |
| { | |
| "entropy": 0.3127696432173252, | |
| "epoch": 1.6398700771416972, | |
| "grad_norm": 0.11328125, | |
| "learning_rate": 9.002383320445163e-05, | |
| "loss": 0.3115, | |
| "mean_token_accuracy": 0.9123898334801197, | |
| "num_tokens": 32071093.0, | |
| "step": 1010 | |
| }, | |
| { | |
| "entropy": 0.32986372373998163, | |
| "epoch": 1.6561104344295574, | |
| "grad_norm": 0.11962890625, | |
| "learning_rate": 8.82810803573385e-05, | |
| "loss": 0.3244, | |
| "mean_token_accuracy": 0.9100102588534356, | |
| "num_tokens": 32391792.0, | |
| "step": 1020 | |
| }, | |
| { | |
| "entropy": 0.2965856045484543, | |
| "epoch": 1.6723507917174176, | |
| "grad_norm": 0.11279296875, | |
| "learning_rate": 8.654192914928739e-05, | |
| "loss": 0.2979, | |
| "mean_token_accuracy": 0.9166141480207444, | |
| "num_tokens": 32710598.0, | |
| "step": 1030 | |
| }, | |
| { | |
| "entropy": 0.2905145835131407, | |
| "epoch": 1.688591149005278, | |
| "grad_norm": 0.11376953125, | |
| "learning_rate": 8.480691408306097e-05, | |
| "loss": 0.2837, | |
| "mean_token_accuracy": 0.9194815665483475, | |
| "num_tokens": 33028070.0, | |
| "step": 1040 | |
| }, | |
| { | |
| "entropy": 0.2960927043110132, | |
| "epoch": 1.7048315062931385, | |
| "grad_norm": 0.1142578125, | |
| "learning_rate": 8.307656839023909e-05, | |
| "loss": 0.2971, | |
| "mean_token_accuracy": 0.9165702179074288, | |
| "num_tokens": 33345066.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "entropy": 0.3149249626323581, | |
| "epoch": 1.7210718635809987, | |
| "grad_norm": 0.115234375, | |
| "learning_rate": 8.135142386733794e-05, | |
| "loss": 0.3076, | |
| "mean_token_accuracy": 0.9137652173638344, | |
| "num_tokens": 33662026.0, | |
| "step": 1060 | |
| }, | |
| { | |
| "entropy": 0.30231469254940746, | |
| "epoch": 1.7373122208688592, | |
| "grad_norm": 0.1181640625, | |
| "learning_rate": 7.963201071236971e-05, | |
| "loss": 0.302, | |
| "mean_token_accuracy": 0.9156768411397934, | |
| "num_tokens": 33978146.0, | |
| "step": 1070 | |
| }, | |
| { | |
| "entropy": 0.3091453406959772, | |
| "epoch": 1.7535525781567194, | |
| "grad_norm": 0.10400390625, | |
| "learning_rate": 7.791885736189447e-05, | |
| "loss": 0.3013, | |
| "mean_token_accuracy": 0.9143429882824421, | |
| "num_tokens": 34296547.0, | |
| "step": 1080 | |
| }, | |
| { | |
| "entropy": 0.2943760992027819, | |
| "epoch": 1.7697929354445798, | |
| "grad_norm": 0.12255859375, | |
| "learning_rate": 7.621249032861249e-05, | |
| "loss": 0.2944, | |
| "mean_token_accuracy": 0.9172896653413772, | |
| "num_tokens": 34613598.0, | |
| "step": 1090 | |
| }, | |
| { | |
| "entropy": 0.29523692447692157, | |
| "epoch": 1.7860332927324403, | |
| "grad_norm": 0.11669921875, | |
| "learning_rate": 7.451343403954856e-05, | |
| "loss": 0.2896, | |
| "mean_token_accuracy": 0.9178669437766075, | |
| "num_tokens": 34930737.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "entropy": 0.30031131571158765, | |
| "epoch": 1.8022736500203005, | |
| "grad_norm": 0.11669921875, | |
| "learning_rate": 7.282221067487673e-05, | |
| "loss": 0.3018, | |
| "mean_token_accuracy": 0.9155903697013855, | |
| "num_tokens": 35247543.0, | |
| "step": 1110 | |
| }, | |
| { | |
| "entropy": 0.31066682981327176, | |
| "epoch": 1.8185140073081607, | |
| "grad_norm": 0.11181640625, | |
| "learning_rate": 7.113934000743598e-05, | |
| "loss": 0.3016, | |
| "mean_token_accuracy": 0.9159741207957268, | |
| "num_tokens": 35563846.0, | |
| "step": 1120 | |
| }, | |
| { | |
| "entropy": 0.3058769850060344, | |
| "epoch": 1.8347543645960211, | |
| "grad_norm": 0.11865234375, | |
| "learning_rate": 6.946533924298566e-05, | |
| "loss": 0.3064, | |
| "mean_token_accuracy": 0.9143878504633903, | |
| "num_tokens": 35880192.0, | |
| "step": 1130 | |
| }, | |
| { | |
| "entropy": 0.3041055051609874, | |
| "epoch": 1.8509947218838816, | |
| "grad_norm": 0.1279296875, | |
| "learning_rate": 6.78007228612497e-05, | |
| "loss": 0.3005, | |
| "mean_token_accuracy": 0.9166514202952385, | |
| "num_tokens": 36199306.0, | |
| "step": 1140 | |
| }, | |
| { | |
| "entropy": 0.2977755956351757, | |
| "epoch": 1.8672350791717418, | |
| "grad_norm": 0.11865234375, | |
| "learning_rate": 6.614600245779894e-05, | |
| "loss": 0.293, | |
| "mean_token_accuracy": 0.9174539044499397, | |
| "num_tokens": 36513771.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "entropy": 0.29172705616801975, | |
| "epoch": 1.883475436459602, | |
| "grad_norm": 0.12890625, | |
| "learning_rate": 6.45016865868195e-05, | |
| "loss": 0.2892, | |
| "mean_token_accuracy": 0.9188766203820705, | |
| "num_tokens": 36831018.0, | |
| "step": 1160 | |
| }, | |
| { | |
| "entropy": 0.3056895272806287, | |
| "epoch": 1.8997157937474625, | |
| "grad_norm": 0.11083984375, | |
| "learning_rate": 6.286828060481626e-05, | |
| "loss": 0.3017, | |
| "mean_token_accuracy": 0.9152774572372436, | |
| "num_tokens": 37150959.0, | |
| "step": 1170 | |
| }, | |
| { | |
| "entropy": 0.3092532352544367, | |
| "epoch": 1.915956151035323, | |
| "grad_norm": 0.1240234375, | |
| "learning_rate": 6.124628651529875e-05, | |
| "loss": 0.3056, | |
| "mean_token_accuracy": 0.9151738248765469, | |
| "num_tokens": 37466849.0, | |
| "step": 1180 | |
| }, | |
| { | |
| "entropy": 0.29272988215088847, | |
| "epoch": 1.9321965083231831, | |
| "grad_norm": 0.126953125, | |
| "learning_rate": 5.963620281449778e-05, | |
| "loss": 0.2913, | |
| "mean_token_accuracy": 0.9184561900794506, | |
| "num_tokens": 37786024.0, | |
| "step": 1190 | |
| }, | |
| { | |
| "entropy": 0.28097219932824374, | |
| "epoch": 1.9484368656110433, | |
| "grad_norm": 0.1240234375, | |
| "learning_rate": 5.80385243381599e-05, | |
| "loss": 0.2751, | |
| "mean_token_accuracy": 0.9222856566309929, | |
| "num_tokens": 38104138.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "entropy": 0.31009797360748054, | |
| "epoch": 1.9646772228989038, | |
| "grad_norm": 0.111328125, | |
| "learning_rate": 5.645374210946674e-05, | |
| "loss": 0.303, | |
| "mean_token_accuracy": 0.9149694032967091, | |
| "num_tokens": 38420179.0, | |
| "step": 1210 | |
| }, | |
| { | |
| "entropy": 0.28405070351436734, | |
| "epoch": 1.9809175801867642, | |
| "grad_norm": 0.12353515625, | |
| "learning_rate": 5.488234318812636e-05, | |
| "loss": 0.2821, | |
| "mean_token_accuracy": 0.9190818406641483, | |
| "num_tokens": 38738629.0, | |
| "step": 1220 | |
| }, | |
| { | |
| "entropy": 0.2817179733887315, | |
| "epoch": 1.9971579374746244, | |
| "grad_norm": 0.11083984375, | |
| "learning_rate": 5.332481052068243e-05, | |
| "loss": 0.278, | |
| "mean_token_accuracy": 0.9208778738975525, | |
| "num_tokens": 39056765.0, | |
| "step": 1230 | |
| }, | |
| { | |
| "entropy": 0.31614991583121127, | |
| "epoch": 2.0129922858302884, | |
| "grad_norm": 0.1162109375, | |
| "learning_rate": 5.1781622792087735e-05, | |
| "loss": 0.3034, | |
| "mean_token_accuracy": 0.915187330582203, | |
| "num_tokens": 39369100.0, | |
| "step": 1240 | |
| }, | |
| { | |
| "entropy": 0.2791068171150982, | |
| "epoch": 2.0292326431181484, | |
| "grad_norm": 0.1259765625, | |
| "learning_rate": 5.0253254278587195e-05, | |
| "loss": 0.2719, | |
| "mean_token_accuracy": 0.9229367971420288, | |
| "num_tokens": 39685618.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "entropy": 0.26613821778446434, | |
| "epoch": 2.045473000406009, | |
| "grad_norm": 0.11962890625, | |
| "learning_rate": 4.8740174701956085e-05, | |
| "loss": 0.2589, | |
| "mean_token_accuracy": 0.9257113888859749, | |
| "num_tokens": 40006044.0, | |
| "step": 1260 | |
| }, | |
| { | |
| "entropy": 0.26739490162581203, | |
| "epoch": 2.0617133576938693, | |
| "grad_norm": 0.1298828125, | |
| "learning_rate": 4.724284908513802e-05, | |
| "loss": 0.258, | |
| "mean_token_accuracy": 0.9269713960587979, | |
| "num_tokens": 40324361.0, | |
| "step": 1270 | |
| }, | |
| { | |
| "entropy": 0.2743425130844116, | |
| "epoch": 2.0779537149817298, | |
| "grad_norm": 0.11865234375, | |
| "learning_rate": 4.576173760932658e-05, | |
| "loss": 0.2659, | |
| "mean_token_accuracy": 0.923577631264925, | |
| "num_tokens": 40641432.0, | |
| "step": 1280 | |
| }, | |
| { | |
| "entropy": 0.2544120085425675, | |
| "epoch": 2.0941940722695898, | |
| "grad_norm": 0.12109375, | |
| "learning_rate": 4.429729547253574e-05, | |
| "loss": 0.2481, | |
| "mean_token_accuracy": 0.9286975994706154, | |
| "num_tokens": 40961580.0, | |
| "step": 1290 | |
| }, | |
| { | |
| "entropy": 0.26631462909281256, | |
| "epoch": 2.11043442955745, | |
| "grad_norm": 0.11962890625, | |
| "learning_rate": 4.2849972749701104e-05, | |
| "loss": 0.2616, | |
| "mean_token_accuracy": 0.9257244259119034, | |
| "num_tokens": 41280252.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "entropy": 0.2599586082622409, | |
| "epoch": 2.1266747868453106, | |
| "grad_norm": 0.1171875, | |
| "learning_rate": 4.142021425435612e-05, | |
| "loss": 0.2498, | |
| "mean_token_accuracy": 0.9278856895864009, | |
| "num_tokens": 41599525.0, | |
| "step": 1310 | |
| }, | |
| { | |
| "entropy": 0.2707246173173189, | |
| "epoch": 2.142915144133171, | |
| "grad_norm": 0.125, | |
| "learning_rate": 4.0008459401924936e-05, | |
| "loss": 0.2634, | |
| "mean_token_accuracy": 0.9242307640612125, | |
| "num_tokens": 41915594.0, | |
| "step": 1320 | |
| }, | |
| { | |
| "entropy": 0.2711412087082863, | |
| "epoch": 2.159155501421031, | |
| "grad_norm": 0.126953125, | |
| "learning_rate": 3.8615142074674625e-05, | |
| "loss": 0.2641, | |
| "mean_token_accuracy": 0.9243660770356655, | |
| "num_tokens": 42229552.0, | |
| "step": 1330 | |
| }, | |
| { | |
| "entropy": 0.26722599640488626, | |
| "epoch": 2.1753958587088915, | |
| "grad_norm": 0.126953125, | |
| "learning_rate": 3.7240690488367833e-05, | |
| "loss": 0.2595, | |
| "mean_token_accuracy": 0.9257352128624916, | |
| "num_tokens": 42547446.0, | |
| "step": 1340 | |
| }, | |
| { | |
| "entropy": 0.260900554433465, | |
| "epoch": 2.191636215996752, | |
| "grad_norm": 0.12890625, | |
| "learning_rate": 3.588552706065672e-05, | |
| "loss": 0.2532, | |
| "mean_token_accuracy": 0.9262578003108501, | |
| "num_tokens": 42866068.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "entropy": 0.26855619475245474, | |
| "epoch": 2.2078765732846124, | |
| "grad_norm": 0.125, | |
| "learning_rate": 3.4550068281259295e-05, | |
| "loss": 0.2627, | |
| "mean_token_accuracy": 0.9253914162516594, | |
| "num_tokens": 43186333.0, | |
| "step": 1360 | |
| }, | |
| { | |
| "entropy": 0.25198941631242633, | |
| "epoch": 2.2241169305724724, | |
| "grad_norm": 0.1142578125, | |
| "learning_rate": 3.323472458395712e-05, | |
| "loss": 0.2422, | |
| "mean_token_accuracy": 0.9293296724557877, | |
| "num_tokens": 43502312.0, | |
| "step": 1370 | |
| }, | |
| { | |
| "entropy": 0.2605569550767541, | |
| "epoch": 2.240357287860333, | |
| "grad_norm": 0.115234375, | |
| "learning_rate": 3.19399002204547e-05, | |
| "loss": 0.2497, | |
| "mean_token_accuracy": 0.9273073241114617, | |
| "num_tokens": 43820947.0, | |
| "step": 1380 | |
| }, | |
| { | |
| "entropy": 0.2586898487061262, | |
| "epoch": 2.2565976451481933, | |
| "grad_norm": 0.1279296875, | |
| "learning_rate": 3.066599313613849e-05, | |
| "loss": 0.2539, | |
| "mean_token_accuracy": 0.9264877527952194, | |
| "num_tokens": 44138605.0, | |
| "step": 1390 | |
| }, | |
| { | |
| "entropy": 0.26510731065645815, | |
| "epoch": 2.2728380024360537, | |
| "grad_norm": 0.1220703125, | |
| "learning_rate": 2.9413394847774178e-05, | |
| "loss": 0.2613, | |
| "mean_token_accuracy": 0.9252329200506211, | |
| "num_tokens": 44454341.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "entropy": 0.2784864826127887, | |
| "epoch": 2.2890783597239137, | |
| "grad_norm": 0.134765625, | |
| "learning_rate": 2.818249032317981e-05, | |
| "loss": 0.2675, | |
| "mean_token_accuracy": 0.9228313736617565, | |
| "num_tokens": 44770720.0, | |
| "step": 1410 | |
| }, | |
| { | |
| "entropy": 0.2637157242745161, | |
| "epoch": 2.305318717011774, | |
| "grad_norm": 0.126953125, | |
| "learning_rate": 2.6973657862911418e-05, | |
| "loss": 0.2574, | |
| "mean_token_accuracy": 0.9266756564378739, | |
| "num_tokens": 45089446.0, | |
| "step": 1420 | |
| }, | |
| { | |
| "entropy": 0.2700337437912822, | |
| "epoch": 2.3215590742996346, | |
| "grad_norm": 0.130859375, | |
| "learning_rate": 2.578726898399799e-05, | |
| "loss": 0.2635, | |
| "mean_token_accuracy": 0.9248986080288887, | |
| "num_tokens": 45408829.0, | |
| "step": 1430 | |
| }, | |
| { | |
| "entropy": 0.25822389777749777, | |
| "epoch": 2.337799431587495, | |
| "grad_norm": 0.12255859375, | |
| "learning_rate": 2.4623688305761005e-05, | |
| "loss": 0.2474, | |
| "mean_token_accuracy": 0.9288143701851368, | |
| "num_tokens": 45725486.0, | |
| "step": 1440 | |
| }, | |
| { | |
| "entropy": 0.25613431744277476, | |
| "epoch": 2.3540397888753555, | |
| "grad_norm": 0.1328125, | |
| "learning_rate": 2.3483273437754107e-05, | |
| "loss": 0.2473, | |
| "mean_token_accuracy": 0.9285800620913506, | |
| "num_tokens": 46043580.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "entropy": 0.2738852068781853, | |
| "epoch": 2.3702801461632155, | |
| "grad_norm": 0.1572265625, | |
| "learning_rate": 2.2366374869856998e-05, | |
| "loss": 0.2696, | |
| "mean_token_accuracy": 0.9233844071626663, | |
| "num_tokens": 46364834.0, | |
| "step": 1460 | |
| }, | |
| { | |
| "entropy": 0.2616811253130436, | |
| "epoch": 2.386520503451076, | |
| "grad_norm": 0.140625, | |
| "learning_rate": 2.12733358645574e-05, | |
| "loss": 0.2551, | |
| "mean_token_accuracy": 0.926591832190752, | |
| "num_tokens": 46682176.0, | |
| "step": 1470 | |
| }, | |
| { | |
| "entropy": 0.2675771150738001, | |
| "epoch": 2.4027608607389364, | |
| "grad_norm": 0.1318359375, | |
| "learning_rate": 2.0204492351454472e-05, | |
| "loss": 0.2583, | |
| "mean_token_accuracy": 0.9257503561675549, | |
| "num_tokens": 47001783.0, | |
| "step": 1480 | |
| }, | |
| { | |
| "entropy": 0.2594972148537636, | |
| "epoch": 2.4190012180267964, | |
| "grad_norm": 0.11962890625, | |
| "learning_rate": 1.9160172824015586e-05, | |
| "loss": 0.2523, | |
| "mean_token_accuracy": 0.9283341206610203, | |
| "num_tokens": 47318361.0, | |
| "step": 1490 | |
| }, | |
| { | |
| "entropy": 0.25459160562604666, | |
| "epoch": 2.435241575314657, | |
| "grad_norm": 0.12451171875, | |
| "learning_rate": 1.8140698238618846e-05, | |
| "loss": 0.2449, | |
| "mean_token_accuracy": 0.9285047873854637, | |
| "num_tokens": 47636931.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.435241575314657, | |
| "eval_entropy": 0.2933734593102495, | |
| "eval_loss": 0.3246920108795166, | |
| "eval_mean_token_accuracy": 0.911526195193065, | |
| "eval_num_tokens": 47636931.0, | |
| "eval_runtime": 176.9613, | |
| "eval_samples_per_second": 2.955, | |
| "eval_steps_per_second": 1.481, | |
| "step": 1500 | |
| }, | |
| { | |
| "entropy": 0.26183086801320316, | |
| "epoch": 2.4514819326025172, | |
| "grad_norm": 0.1259765625, | |
| "learning_rate": 1.7146381915911624e-05, | |
| "loss": 0.2547, | |
| "mean_token_accuracy": 0.9276451498270035, | |
| "num_tokens": 47956999.0, | |
| "step": 1510 | |
| }, | |
| { | |
| "entropy": 0.2695474956184626, | |
| "epoch": 2.4677222898903777, | |
| "grad_norm": 0.1328125, | |
| "learning_rate": 1.6177529444516194e-05, | |
| "loss": 0.2635, | |
| "mean_token_accuracy": 0.9245093256235123, | |
| "num_tokens": 48274856.0, | |
| "step": 1520 | |
| }, | |
| { | |
| "entropy": 0.2680878189392388, | |
| "epoch": 2.483962647178238, | |
| "grad_norm": 0.12109375, | |
| "learning_rate": 1.5234438587111433e-05, | |
| "loss": 0.2625, | |
| "mean_token_accuracy": 0.9249465495347977, | |
| "num_tokens": 48592891.0, | |
| "step": 1530 | |
| }, | |
| { | |
| "entropy": 0.27772825080901387, | |
| "epoch": 2.500203004466098, | |
| "grad_norm": 0.12890625, | |
| "learning_rate": 1.4317399188919767e-05, | |
| "loss": 0.2699, | |
| "mean_token_accuracy": 0.9224151849746705, | |
| "num_tokens": 48910313.0, | |
| "step": 1540 | |
| }, | |
| { | |
| "entropy": 0.2515021483413875, | |
| "epoch": 2.5164433617539586, | |
| "grad_norm": 0.12451171875, | |
| "learning_rate": 1.342669308862764e-05, | |
| "loss": 0.2438, | |
| "mean_token_accuracy": 0.9300386533141136, | |
| "num_tokens": 49229459.0, | |
| "step": 1550 | |
| }, | |
| { | |
| "entropy": 0.2689725374802947, | |
| "epoch": 2.532683719041819, | |
| "grad_norm": 0.1328125, | |
| "learning_rate": 1.2562594031766262e-05, | |
| "loss": 0.2632, | |
| "mean_token_accuracy": 0.9254089832305908, | |
| "num_tokens": 49543124.0, | |
| "step": 1560 | |
| }, | |
| { | |
| "entropy": 0.2733833186328411, | |
| "epoch": 2.548924076329679, | |
| "grad_norm": 0.1240234375, | |
| "learning_rate": 1.1725367586580161e-05, | |
| "loss": 0.2677, | |
| "mean_token_accuracy": 0.9235794551670551, | |
| "num_tokens": 49860038.0, | |
| "step": 1570 | |
| }, | |
| { | |
| "entropy": 0.26735376939177513, | |
| "epoch": 2.5651644336175394, | |
| "grad_norm": 0.125, | |
| "learning_rate": 1.0915271062408428e-05, | |
| "loss": 0.2587, | |
| "mean_token_accuracy": 0.9263471320271492, | |
| "num_tokens": 50174575.0, | |
| "step": 1580 | |
| }, | |
| { | |
| "entropy": 0.2635248651728034, | |
| "epoch": 2.5814047909054, | |
| "grad_norm": 0.1171875, | |
| "learning_rate": 1.0132553430604608e-05, | |
| "loss": 0.2566, | |
| "mean_token_accuracy": 0.9271455019712448, | |
| "num_tokens": 50489544.0, | |
| "step": 1590 | |
| }, | |
| { | |
| "entropy": 0.2556127179414034, | |
| "epoch": 2.5976451481932603, | |
| "grad_norm": 0.12060546875, | |
| "learning_rate": 9.377455248018963e-06, | |
| "loss": 0.2482, | |
| "mean_token_accuracy": 0.9281990304589272, | |
| "num_tokens": 50806996.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "entropy": 0.26483127316460014, | |
| "epoch": 2.6138855054811208, | |
| "grad_norm": 0.1220703125, | |
| "learning_rate": 8.650208583066689e-06, | |
| "loss": 0.257, | |
| "mean_token_accuracy": 0.9260235637426376, | |
| "num_tokens": 51122784.0, | |
| "step": 1610 | |
| }, | |
| { | |
| "entropy": 0.25958297261968255, | |
| "epoch": 2.630125862768981, | |
| "grad_norm": 0.12451171875, | |
| "learning_rate": 7.951036944405287e-06, | |
| "loss": 0.2545, | |
| "mean_token_accuracy": 0.9273005492985249, | |
| "num_tokens": 51440528.0, | |
| "step": 1620 | |
| }, | |
| { | |
| "entropy": 0.2685284251347184, | |
| "epoch": 2.646366220056841, | |
| "grad_norm": 0.130859375, | |
| "learning_rate": 7.2801552122422454e-06, | |
| "loss": 0.262, | |
| "mean_token_accuracy": 0.9255671858787536, | |
| "num_tokens": 51758667.0, | |
| "step": 1630 | |
| }, | |
| { | |
| "entropy": 0.27078196927905085, | |
| "epoch": 2.6626065773447016, | |
| "grad_norm": 0.13671875, | |
| "learning_rate": 6.637769572294905e-06, | |
| "loss": 0.2671, | |
| "mean_token_accuracy": 0.9235647596418858, | |
| "num_tokens": 52074499.0, | |
| "step": 1640 | |
| }, | |
| { | |
| "entropy": 0.2435209064744413, | |
| "epoch": 2.678846934632562, | |
| "grad_norm": 0.123046875, | |
| "learning_rate": 6.024077452422128e-06, | |
| "loss": 0.2355, | |
| "mean_token_accuracy": 0.9319050885736943, | |
| "num_tokens": 52390967.0, | |
| "step": 1650 | |
| }, | |
| { | |
| "entropy": 0.26512936893850564, | |
| "epoch": 2.695087291920422, | |
| "grad_norm": 0.1259765625, | |
| "learning_rate": 5.439267461947883e-06, | |
| "loss": 0.2566, | |
| "mean_token_accuracy": 0.9259013153612614, | |
| "num_tokens": 52705695.0, | |
| "step": 1660 | |
| }, | |
| { | |
| "entropy": 0.25156018473207953, | |
| "epoch": 2.7113276492082825, | |
| "grad_norm": 0.126953125, | |
| "learning_rate": 4.883519333694742e-06, | |
| "loss": 0.2439, | |
| "mean_token_accuracy": 0.9301516726613045, | |
| "num_tokens": 53023475.0, | |
| "step": 1670 | |
| }, | |
| { | |
| "entropy": 0.2691020904108882, | |
| "epoch": 2.727568006496143, | |
| "grad_norm": 0.1279296875, | |
| "learning_rate": 4.3570038687458125e-06, | |
| "loss": 0.2606, | |
| "mean_token_accuracy": 0.9248066917061806, | |
| "num_tokens": 53343151.0, | |
| "step": 1680 | |
| }, | |
| { | |
| "entropy": 0.2650392297655344, | |
| "epoch": 2.7438083637840034, | |
| "grad_norm": 0.12060546875, | |
| "learning_rate": 3.859882883951371e-06, | |
| "loss": 0.2582, | |
| "mean_token_accuracy": 0.9258100286126136, | |
| "num_tokens": 53660043.0, | |
| "step": 1690 | |
| }, | |
| { | |
| "entropy": 0.2701218418776989, | |
| "epoch": 2.760048721071864, | |
| "grad_norm": 0.1279296875, | |
| "learning_rate": 3.3923091621968493e-06, | |
| "loss": 0.2625, | |
| "mean_token_accuracy": 0.9250252448022366, | |
| "num_tokens": 53974228.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "entropy": 0.26398087330162523, | |
| "epoch": 2.776289078359724, | |
| "grad_norm": 0.12109375, | |
| "learning_rate": 2.954426405447297e-06, | |
| "loss": 0.2567, | |
| "mean_token_accuracy": 0.9267565242946147, | |
| "num_tokens": 54294390.0, | |
| "step": 1710 | |
| }, | |
| { | |
| "entropy": 0.25762436566874386, | |
| "epoch": 2.7925294356475843, | |
| "grad_norm": 0.1279296875, | |
| "learning_rate": 2.546369190582576e-06, | |
| "loss": 0.2513, | |
| "mean_token_accuracy": 0.9276721946895122, | |
| "num_tokens": 54606801.0, | |
| "step": 1720 | |
| }, | |
| { | |
| "entropy": 0.25800341702997687, | |
| "epoch": 2.8087697929354447, | |
| "grad_norm": 0.1298828125, | |
| "learning_rate": 2.1682629280372456e-06, | |
| "loss": 0.2501, | |
| "mean_token_accuracy": 0.927888036519289, | |
| "num_tokens": 54926091.0, | |
| "step": 1730 | |
| }, | |
| { | |
| "entropy": 0.2676691495813429, | |
| "epoch": 2.8250101502233047, | |
| "grad_norm": 0.11767578125, | |
| "learning_rate": 1.820223823257372e-06, | |
| "loss": 0.2588, | |
| "mean_token_accuracy": 0.926097498089075, | |
| "num_tokens": 55243114.0, | |
| "step": 1740 | |
| }, | |
| { | |
| "entropy": 0.2512422949075699, | |
| "epoch": 2.841250507511165, | |
| "grad_norm": 0.1259765625, | |
| "learning_rate": 1.502358840986562e-06, | |
| "loss": 0.2415, | |
| "mean_token_accuracy": 0.9298646509647369, | |
| "num_tokens": 55560960.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "entropy": 0.25036826338618995, | |
| "epoch": 2.8574908647990256, | |
| "grad_norm": 0.11572265625, | |
| "learning_rate": 1.2147656723918821e-06, | |
| "loss": 0.2444, | |
| "mean_token_accuracy": 0.9300106823444366, | |
| "num_tokens": 55878679.0, | |
| "step": 1760 | |
| }, | |
| { | |
| "entropy": 0.2511869052425027, | |
| "epoch": 2.873731222086886, | |
| "grad_norm": 0.12890625, | |
| "learning_rate": 9.575327050398875e-07, | |
| "loss": 0.2445, | |
| "mean_token_accuracy": 0.9300001263618469, | |
| "num_tokens": 56198045.0, | |
| "step": 1770 | |
| }, | |
| { | |
| "entropy": 0.26052912287414076, | |
| "epoch": 2.8899715793747465, | |
| "grad_norm": 0.12109375, | |
| "learning_rate": 7.307389957320276e-07, | |
| "loss": 0.254, | |
| "mean_token_accuracy": 0.9268272012472153, | |
| "num_tokens": 56516117.0, | |
| "step": 1780 | |
| }, | |
| { | |
| "entropy": 0.28557793945074084, | |
| "epoch": 2.9062119366626065, | |
| "grad_norm": 0.1298828125, | |
| "learning_rate": 5.344542462076496e-07, | |
| "loss": 0.2782, | |
| "mean_token_accuracy": 0.921804615855217, | |
| "num_tokens": 56835068.0, | |
| "step": 1790 | |
| }, | |
| { | |
| "entropy": 0.26550282556563615, | |
| "epoch": 2.922452293950467, | |
| "grad_norm": 0.12451171875, | |
| "learning_rate": 3.687387817221999e-07, | |
| "loss": 0.2607, | |
| "mean_token_accuracy": 0.9259473696351052, | |
| "num_tokens": 57150238.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "entropy": 0.2448538973927498, | |
| "epoch": 2.9386926512383273, | |
| "grad_norm": 0.11376953125, | |
| "learning_rate": 2.3364353250716619e-07, | |
| "loss": 0.2381, | |
| "mean_token_accuracy": 0.9314554944634438, | |
| "num_tokens": 57465824.0, | |
| "step": 1810 | |
| }, | |
| { | |
| "entropy": 0.2839592194184661, | |
| "epoch": 2.9549330085261873, | |
| "grad_norm": 0.12353515625, | |
| "learning_rate": 1.292100181173872e-07, | |
| "loss": 0.2776, | |
| "mean_token_accuracy": 0.921784307807684, | |
| "num_tokens": 57785604.0, | |
| "step": 1820 | |
| }, | |
| { | |
| "entropy": 0.2578106375411153, | |
| "epoch": 2.971173365814048, | |
| "grad_norm": 0.126953125, | |
| "learning_rate": 5.547033467060425e-08, | |
| "loss": 0.2512, | |
| "mean_token_accuracy": 0.9276329085230828, | |
| "num_tokens": 58101643.0, | |
| "step": 1830 | |
| }, | |
| { | |
| "entropy": 0.2721929314546287, | |
| "epoch": 2.9874137231019082, | |
| "grad_norm": 0.12109375, | |
| "learning_rate": 1.2447144983229742e-08, | |
| "loss": 0.2666, | |
| "mean_token_accuracy": 0.9238130748271942, | |
| "num_tokens": 58420389.0, | |
| "step": 1840 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1848, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.382555575899939e+18, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |