| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9987174005985464, | |
| "eval_steps": 500, | |
| "global_step": 73, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.013681060282171868, | |
| "grad_norm": 7.548368406170154, | |
| "learning_rate": 0.0, | |
| "loss": 1.5136, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.027362120564343735, | |
| "grad_norm": 7.827536257097371, | |
| "learning_rate": 6.666666666666667e-07, | |
| "loss": 1.4973, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.041043180846515606, | |
| "grad_norm": 7.398147035041813, | |
| "learning_rate": 1.3333333333333334e-06, | |
| "loss": 1.4526, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.05472424112868747, | |
| "grad_norm": 6.621467563471504, | |
| "learning_rate": 2.0000000000000003e-06, | |
| "loss": 1.3913, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06840530141085935, | |
| "grad_norm": 5.273700898837342, | |
| "learning_rate": 2.666666666666667e-06, | |
| "loss": 1.3428, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.08208636169303121, | |
| "grad_norm": 3.9736038530372646, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 1.1501, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09576742197520308, | |
| "grad_norm": 3.5059552761650705, | |
| "learning_rate": 4.000000000000001e-06, | |
| "loss": 1.2428, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.10944848225737494, | |
| "grad_norm": 3.6945698874338477, | |
| "learning_rate": 4.666666666666667e-06, | |
| "loss": 1.3626, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.12312954253954682, | |
| "grad_norm": 2.9010276135864124, | |
| "learning_rate": 5.333333333333334e-06, | |
| "loss": 1.0364, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.1368106028217187, | |
| "grad_norm": 3.6634274177297486, | |
| "learning_rate": 6e-06, | |
| "loss": 1.1989, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.15049166310389056, | |
| "grad_norm": 3.3162811107208, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 1.074, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.16417272338606242, | |
| "grad_norm": 3.4273757924875965, | |
| "learning_rate": 7.333333333333333e-06, | |
| "loss": 0.9367, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.1778537836682343, | |
| "grad_norm": 3.712520280275751, | |
| "learning_rate": 8.000000000000001e-06, | |
| "loss": 0.9526, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.19153484395040615, | |
| "grad_norm": 3.489057750483782, | |
| "learning_rate": 8.666666666666668e-06, | |
| "loss": 1.124, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.20521590423257802, | |
| "grad_norm": 2.588795249616481, | |
| "learning_rate": 9.333333333333334e-06, | |
| "loss": 0.8115, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.21889696451474988, | |
| "grad_norm": 2.5083608844278915, | |
| "learning_rate": 1e-05, | |
| "loss": 0.8998, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.23257802479692177, | |
| "grad_norm": 1.733597300225938, | |
| "learning_rate": 9.998562273265786e-06, | |
| "loss": 0.7702, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.24625908507909364, | |
| "grad_norm": 1.91140737255229, | |
| "learning_rate": 9.994249919886402e-06, | |
| "loss": 0.8392, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.2599401453612655, | |
| "grad_norm": 2.2610549095712646, | |
| "learning_rate": 9.98706541985615e-06, | |
| "loss": 0.9605, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.2736212056434374, | |
| "grad_norm": 1.820469178389298, | |
| "learning_rate": 9.977012904914133e-06, | |
| "loss": 0.8495, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.28730226592560926, | |
| "grad_norm": 1.8986542607595096, | |
| "learning_rate": 9.964098156168143e-06, | |
| "loss": 0.8255, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.3009833262077811, | |
| "grad_norm": 1.8732046088429473, | |
| "learning_rate": 9.948328600769996e-06, | |
| "loss": 0.8314, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.314664386489953, | |
| "grad_norm": 2.022587424257079, | |
| "learning_rate": 9.929713307644245e-06, | |
| "loss": 0.9394, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.32834544677212485, | |
| "grad_norm": 2.185047520868034, | |
| "learning_rate": 9.908262982272724e-06, | |
| "loss": 0.9943, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.3420265070542967, | |
| "grad_norm": 1.858153572034805, | |
| "learning_rate": 9.883989960537934e-06, | |
| "loss": 0.919, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.3557075673364686, | |
| "grad_norm": 1.6533796703245207, | |
| "learning_rate": 9.85690820162878e-06, | |
| "loss": 0.9009, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.36938862761864044, | |
| "grad_norm": 1.5392129172041236, | |
| "learning_rate": 9.827033280012783e-06, | |
| "loss": 0.8536, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.3830696879008123, | |
| "grad_norm": 1.2934191579255045, | |
| "learning_rate": 9.794382376479334e-06, | |
| "loss": 0.7038, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.39675074818298417, | |
| "grad_norm": 1.5922900609334563, | |
| "learning_rate": 9.7589742682592e-06, | |
| "loss": 0.8082, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.41043180846515603, | |
| "grad_norm": 1.3349784246999898, | |
| "learning_rate": 9.720829318225897e-06, | |
| "loss": 0.7657, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.4241128687473279, | |
| "grad_norm": 1.3968084241612435, | |
| "learning_rate": 9.6799694631852e-06, | |
| "loss": 0.7583, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.43779392902949976, | |
| "grad_norm": 1.4438050306391899, | |
| "learning_rate": 9.63641820125949e-06, | |
| "loss": 0.8281, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.4514749893116717, | |
| "grad_norm": 1.5653751821202764, | |
| "learning_rate": 9.590200578374198e-06, | |
| "loss": 0.8164, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.46515604959384355, | |
| "grad_norm": 1.4428794862229695, | |
| "learning_rate": 9.541343173854128e-06, | |
| "loss": 0.7563, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.4788371098760154, | |
| "grad_norm": 1.424184980221859, | |
| "learning_rate": 9.48987408513794e-06, | |
| "loss": 0.8012, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.4925181701581873, | |
| "grad_norm": 1.4959423264612097, | |
| "learning_rate": 9.435822911619564e-06, | |
| "loss": 0.7733, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.5061992304403591, | |
| "grad_norm": 1.5145765101418414, | |
| "learning_rate": 9.379220737625877e-06, | |
| "loss": 0.8185, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.519880290722531, | |
| "grad_norm": 1.4205405113619893, | |
| "learning_rate": 9.320100114540382e-06, | |
| "loss": 0.7728, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5335613510047028, | |
| "grad_norm": 1.3618866165050287, | |
| "learning_rate": 9.258495042083222e-06, | |
| "loss": 0.7188, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.5472424112868748, | |
| "grad_norm": 1.3210682200708475, | |
| "learning_rate": 9.19444094875825e-06, | |
| "loss": 0.7462, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.5609234715690466, | |
| "grad_norm": 1.5209903146284076, | |
| "learning_rate": 9.127974671478432e-06, | |
| "loss": 0.8553, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.5746045318512185, | |
| "grad_norm": 1.320169467583813, | |
| "learning_rate": 9.059134434381274e-06, | |
| "loss": 0.7292, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.5882855921333904, | |
| "grad_norm": 1.3832163792888312, | |
| "learning_rate": 8.987959826846479e-06, | |
| "loss": 0.7342, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.6019666524155622, | |
| "grad_norm": 1.3410523808663086, | |
| "learning_rate": 8.914491780728471e-06, | |
| "loss": 0.7135, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.6156477126977341, | |
| "grad_norm": 1.7453570328171104, | |
| "learning_rate": 8.838772546816857e-06, | |
| "loss": 0.895, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.629328772979906, | |
| "grad_norm": 1.3236234248181074, | |
| "learning_rate": 8.760845670538387e-06, | |
| "loss": 0.7094, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6430098332620778, | |
| "grad_norm": 1.3474642609149368, | |
| "learning_rate": 8.6807559669144e-06, | |
| "loss": 0.7463, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.6566908935442497, | |
| "grad_norm": 1.3020601607506088, | |
| "learning_rate": 8.598549494788111e-06, | |
| "loss": 0.7731, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.6703719538264216, | |
| "grad_norm": 1.5137293508647862, | |
| "learning_rate": 8.5142735303366e-06, | |
| "loss": 0.877, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.6840530141085934, | |
| "grad_norm": 1.5403773102256209, | |
| "learning_rate": 8.427976539882725e-06, | |
| "loss": 0.8081, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.6977340743907653, | |
| "grad_norm": 1.4219674838583494, | |
| "learning_rate": 8.339708152022586e-06, | |
| "loss": 0.7865, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.7114151346729372, | |
| "grad_norm": 1.3635386118244723, | |
| "learning_rate": 8.24951912908459e-06, | |
| "loss": 0.7789, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.725096194955109, | |
| "grad_norm": 1.2536175575645903, | |
| "learning_rate": 8.157461337936506e-06, | |
| "loss": 0.7167, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.7387772552372809, | |
| "grad_norm": 1.399259576609449, | |
| "learning_rate": 8.063587720157298e-06, | |
| "loss": 0.7842, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7524583155194527, | |
| "grad_norm": 1.3669785990529744, | |
| "learning_rate": 7.967952261590936e-06, | |
| "loss": 0.8057, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.7661393758016246, | |
| "grad_norm": 1.2116543523109478, | |
| "learning_rate": 7.870609961299627e-06, | |
| "loss": 0.71, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.7798204360837965, | |
| "grad_norm": 1.496951541567372, | |
| "learning_rate": 7.771616799934372e-06, | |
| "loss": 0.9227, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.7935014963659683, | |
| "grad_norm": 1.4409586905300167, | |
| "learning_rate": 7.67102970754101e-06, | |
| "loss": 0.7771, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.8071825566481402, | |
| "grad_norm": 1.3689064689741741, | |
| "learning_rate": 7.568906530820281e-06, | |
| "loss": 0.7964, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.8208636169303121, | |
| "grad_norm": 1.4659172981797304, | |
| "learning_rate": 7.465305999860728e-06, | |
| "loss": 0.7944, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.8345446772124839, | |
| "grad_norm": 1.5394174805450438, | |
| "learning_rate": 7.360287694363566e-06, | |
| "loss": 0.8467, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.8482257374946558, | |
| "grad_norm": 1.577909399946238, | |
| "learning_rate": 7.253912009378953e-06, | |
| "loss": 0.8211, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8619067977768277, | |
| "grad_norm": 1.1486399152909672, | |
| "learning_rate": 7.146240120573358e-06, | |
| "loss": 0.6165, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.8755878580589995, | |
| "grad_norm": 1.3636010924515631, | |
| "learning_rate": 7.037333949048005e-06, | |
| "loss": 0.7381, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.8892689183411714, | |
| "grad_norm": 1.275910605216143, | |
| "learning_rate": 6.927256125728624e-06, | |
| "loss": 0.7358, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.9029499786233434, | |
| "grad_norm": 1.29369042675824, | |
| "learning_rate": 6.816069955346986e-06, | |
| "loss": 0.6921, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.9166310389055152, | |
| "grad_norm": 1.4240273029942732, | |
| "learning_rate": 6.703839380034945e-06, | |
| "loss": 0.779, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.9303120991876871, | |
| "grad_norm": 1.3292854453457577, | |
| "learning_rate": 6.590628942551909e-06, | |
| "loss": 0.7228, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.943993159469859, | |
| "grad_norm": 1.496692141287322, | |
| "learning_rate": 6.476503749166903e-06, | |
| "loss": 0.8125, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.9576742197520308, | |
| "grad_norm": 1.5176216507128955, | |
| "learning_rate": 6.36152943221656e-06, | |
| "loss": 0.8534, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.9713552800342027, | |
| "grad_norm": 1.302742485236075, | |
| "learning_rate": 6.245772112360568e-06, | |
| "loss": 0.6825, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.9850363403163745, | |
| "grad_norm": 1.6413121562783546, | |
| "learning_rate": 6.129298360556304e-06, | |
| "loss": 0.9217, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.9987174005985464, | |
| "grad_norm": 1.3894377677004774, | |
| "learning_rate": 6.012175159774488e-06, | |
| "loss": 0.8585, | |
| "step": 73 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 146, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 73, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 9628983689216.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |