| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.4868131868131869, | |
| "eval_steps": 36, | |
| "global_step": 213, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.007032967032967033, | |
| "grad_norm": 35.26959420627133, | |
| "learning_rate": 3.7037037037037036e-07, | |
| "loss": 1.2446, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.007032967032967033, | |
| "eval_loss": 2.0324625968933105, | |
| "eval_runtime": 226.4201, | |
| "eval_samples_per_second": 13.705, | |
| "eval_steps_per_second": 3.427, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.014065934065934066, | |
| "grad_norm": 36.83848200011581, | |
| "learning_rate": 7.407407407407407e-07, | |
| "loss": 1.2517, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0210989010989011, | |
| "grad_norm": 34.19087139357936, | |
| "learning_rate": 1.111111111111111e-06, | |
| "loss": 1.2471, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.028131868131868132, | |
| "grad_norm": 27.541225332943558, | |
| "learning_rate": 1.4814814814814815e-06, | |
| "loss": 1.2362, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.035164835164835165, | |
| "grad_norm": 15.57856703468476, | |
| "learning_rate": 1.8518518518518519e-06, | |
| "loss": 1.2332, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0421978021978022, | |
| "grad_norm": 8.528472835565562, | |
| "learning_rate": 2.222222222222222e-06, | |
| "loss": 1.2307, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.04923076923076923, | |
| "grad_norm": 16.759402720703044, | |
| "learning_rate": 2.5925925925925925e-06, | |
| "loss": 1.199, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.056263736263736264, | |
| "grad_norm": 18.137822554727045, | |
| "learning_rate": 2.962962962962963e-06, | |
| "loss": 1.1867, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.0632967032967033, | |
| "grad_norm": 10.608949122260702, | |
| "learning_rate": 3.3333333333333333e-06, | |
| "loss": 1.1342, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.07032967032967033, | |
| "grad_norm": 9.138194583762399, | |
| "learning_rate": 3.7037037037037037e-06, | |
| "loss": 1.1253, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.07736263736263736, | |
| "grad_norm": 7.585351194968479, | |
| "learning_rate": 4.074074074074074e-06, | |
| "loss": 1.0929, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.0843956043956044, | |
| "grad_norm": 3.1314432257964384, | |
| "learning_rate": 4.444444444444444e-06, | |
| "loss": 1.1159, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.09142857142857143, | |
| "grad_norm": 1.9754939241031118, | |
| "learning_rate": 4.814814814814815e-06, | |
| "loss": 1.0872, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.09846153846153846, | |
| "grad_norm": 2.333789509418066, | |
| "learning_rate": 5.185185185185185e-06, | |
| "loss": 1.0829, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.1054945054945055, | |
| "grad_norm": 2.0991930564058823, | |
| "learning_rate": 5.555555555555557e-06, | |
| "loss": 1.0723, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.11252747252747253, | |
| "grad_norm": 1.6926854903696387, | |
| "learning_rate": 5.925925925925926e-06, | |
| "loss": 1.0419, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.11956043956043956, | |
| "grad_norm": 2.1909718155790214, | |
| "learning_rate": 6.296296296296297e-06, | |
| "loss": 1.0298, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.1265934065934066, | |
| "grad_norm": 1.3067568935177465, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 1.0151, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.13362637362637364, | |
| "grad_norm": 1.2377225828827996, | |
| "learning_rate": 7.0370370370370375e-06, | |
| "loss": 0.999, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.14065934065934066, | |
| "grad_norm": 1.18450679388531, | |
| "learning_rate": 7.4074074074074075e-06, | |
| "loss": 1.0112, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.1476923076923077, | |
| "grad_norm": 1.1838798074824592, | |
| "learning_rate": 7.77777777777778e-06, | |
| "loss": 1.0103, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.15472527472527473, | |
| "grad_norm": 1.6373782903630898, | |
| "learning_rate": 8.148148148148148e-06, | |
| "loss": 0.9852, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.16175824175824177, | |
| "grad_norm": 1.0047054227944252, | |
| "learning_rate": 8.518518518518519e-06, | |
| "loss": 0.9733, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.1687912087912088, | |
| "grad_norm": 1.0931935152651058, | |
| "learning_rate": 8.888888888888888e-06, | |
| "loss": 0.9684, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.17582417582417584, | |
| "grad_norm": 1.7486444656061693, | |
| "learning_rate": 9.25925925925926e-06, | |
| "loss": 0.9604, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.18285714285714286, | |
| "grad_norm": 1.0797697063478033, | |
| "learning_rate": 9.62962962962963e-06, | |
| "loss": 0.9545, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.1898901098901099, | |
| "grad_norm": 1.33357617818566, | |
| "learning_rate": 1e-05, | |
| "loss": 0.952, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.19692307692307692, | |
| "grad_norm": 1.3658902210874033, | |
| "learning_rate": 9.999626433348664e-06, | |
| "loss": 0.9361, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.20395604395604397, | |
| "grad_norm": 0.9917827240952933, | |
| "learning_rate": 9.998505789215469e-06, | |
| "loss": 0.9421, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.210989010989011, | |
| "grad_norm": 1.1470622779223378, | |
| "learning_rate": 9.996638235054527e-06, | |
| "loss": 0.931, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.21802197802197804, | |
| "grad_norm": 1.0091725798819224, | |
| "learning_rate": 9.994024049928222e-06, | |
| "loss": 0.923, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.22505494505494505, | |
| "grad_norm": 1.1504273652433235, | |
| "learning_rate": 9.990663624465504e-06, | |
| "loss": 0.912, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.2320879120879121, | |
| "grad_norm": 0.9664429874093261, | |
| "learning_rate": 9.986557460803527e-06, | |
| "loss": 0.896, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.23912087912087912, | |
| "grad_norm": 1.1282340407259235, | |
| "learning_rate": 9.98170617251262e-06, | |
| "loss": 0.8983, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.24615384615384617, | |
| "grad_norm": 0.8113157988650623, | |
| "learning_rate": 9.976110484504587e-06, | |
| "loss": 0.9127, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.2531868131868132, | |
| "grad_norm": 0.8682114462819939, | |
| "learning_rate": 9.969771232924404e-06, | |
| "loss": 0.8915, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.2531868131868132, | |
| "eval_loss": 1.6713504791259766, | |
| "eval_runtime": 226.2942, | |
| "eval_samples_per_second": 13.712, | |
| "eval_steps_per_second": 3.429, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.2602197802197802, | |
| "grad_norm": 0.9962271463149754, | |
| "learning_rate": 9.962689365025259e-06, | |
| "loss": 0.8931, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.2672527472527473, | |
| "grad_norm": 1.0334672110868601, | |
| "learning_rate": 9.954865939027028e-06, | |
| "loss": 0.8686, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.2742857142857143, | |
| "grad_norm": 1.0817099930756773, | |
| "learning_rate": 9.94630212395813e-06, | |
| "loss": 0.8723, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.2813186813186813, | |
| "grad_norm": 0.9550865674827331, | |
| "learning_rate": 9.936999199480854e-06, | |
| "loss": 0.8652, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.28835164835164834, | |
| "grad_norm": 0.721743556234875, | |
| "learning_rate": 9.926958555700134e-06, | |
| "loss": 0.8811, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.2953846153846154, | |
| "grad_norm": 0.8648576858639633, | |
| "learning_rate": 9.916181692955841e-06, | |
| "loss": 0.8679, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.30241758241758243, | |
| "grad_norm": 0.9727121051173584, | |
| "learning_rate": 9.90467022159859e-06, | |
| "loss": 0.8833, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.30945054945054945, | |
| "grad_norm": 0.8209899450978613, | |
| "learning_rate": 9.8924258617491e-06, | |
| "loss": 0.8692, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.31648351648351647, | |
| "grad_norm": 0.8102558598398344, | |
| "learning_rate": 9.879450443041172e-06, | |
| "loss": 0.8649, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.32351648351648354, | |
| "grad_norm": 0.7780994681889157, | |
| "learning_rate": 9.865745904348296e-06, | |
| "loss": 0.8616, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.33054945054945056, | |
| "grad_norm": 0.846186796164239, | |
| "learning_rate": 9.851314293493923e-06, | |
| "loss": 0.8537, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.3375824175824176, | |
| "grad_norm": 0.888533727866688, | |
| "learning_rate": 9.836157766945467e-06, | |
| "loss": 0.8521, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.3446153846153846, | |
| "grad_norm": 0.7390076763494808, | |
| "learning_rate": 9.820278589492076e-06, | |
| "loss": 0.8634, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.3516483516483517, | |
| "grad_norm": 0.7427563819683852, | |
| "learning_rate": 9.80367913390621e-06, | |
| "loss": 0.8497, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.3586813186813187, | |
| "grad_norm": 0.8828003502820779, | |
| "learning_rate": 9.786361880589084e-06, | |
| "loss": 0.8411, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.3657142857142857, | |
| "grad_norm": 0.8404598366983553, | |
| "learning_rate": 9.768329417200029e-06, | |
| "loss": 0.8503, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.37274725274725273, | |
| "grad_norm": 0.7409532940009589, | |
| "learning_rate": 9.749584438269833e-06, | |
| "loss": 0.8472, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.3797802197802198, | |
| "grad_norm": 0.704480780825874, | |
| "learning_rate": 9.730129744798096e-06, | |
| "loss": 0.8481, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.3868131868131868, | |
| "grad_norm": 0.8434855833377527, | |
| "learning_rate": 9.709968243834698e-06, | |
| "loss": 0.839, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.39384615384615385, | |
| "grad_norm": 0.9683384695522282, | |
| "learning_rate": 9.689102948045398e-06, | |
| "loss": 0.8313, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.40087912087912086, | |
| "grad_norm": 0.9186046603340778, | |
| "learning_rate": 9.667536975261667e-06, | |
| "loss": 0.8233, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.40791208791208794, | |
| "grad_norm": 0.7511022110825254, | |
| "learning_rate": 9.6452735480148e-06, | |
| "loss": 0.8241, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.41494505494505496, | |
| "grad_norm": 0.7183393918795576, | |
| "learning_rate": 9.622315993054384e-06, | |
| "loss": 0.8248, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.421978021978022, | |
| "grad_norm": 0.6656476000626029, | |
| "learning_rate": 9.598667740851187e-06, | |
| "loss": 0.8231, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.429010989010989, | |
| "grad_norm": 0.6850840786935145, | |
| "learning_rate": 9.574332325084564e-06, | |
| "loss": 0.8183, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.43604395604395607, | |
| "grad_norm": 0.8678005280431648, | |
| "learning_rate": 9.549313382114427e-06, | |
| "loss": 0.8266, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.4430769230769231, | |
| "grad_norm": 0.778515033629181, | |
| "learning_rate": 9.523614650437876e-06, | |
| "loss": 0.831, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.4501098901098901, | |
| "grad_norm": 0.7952968005831371, | |
| "learning_rate": 9.497239970130561e-06, | |
| "loss": 0.8277, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.45714285714285713, | |
| "grad_norm": 0.6755106613138295, | |
| "learning_rate": 9.470193282272886e-06, | |
| "loss": 0.8193, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.4641758241758242, | |
| "grad_norm": 0.6472446322999437, | |
| "learning_rate": 9.442478628361098e-06, | |
| "loss": 0.8191, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.4712087912087912, | |
| "grad_norm": 0.6592803480610206, | |
| "learning_rate": 9.414100149703373e-06, | |
| "loss": 0.8092, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.47824175824175824, | |
| "grad_norm": 0.6147517002023887, | |
| "learning_rate": 9.385062086801013e-06, | |
| "loss": 0.8207, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.48527472527472526, | |
| "grad_norm": 0.7006129357637451, | |
| "learning_rate": 9.355368778714784e-06, | |
| "loss": 0.8088, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.49230769230769234, | |
| "grad_norm": 0.8466485623047939, | |
| "learning_rate": 9.325024662416553e-06, | |
| "loss": 0.8079, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.49934065934065935, | |
| "grad_norm": 0.6702085323484384, | |
| "learning_rate": 9.294034272126286e-06, | |
| "loss": 0.8058, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.5063736263736264, | |
| "grad_norm": 0.6407728805954488, | |
| "learning_rate": 9.262402238634514e-06, | |
| "loss": 0.8262, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.5063736263736264, | |
| "eval_loss": 1.599279522895813, | |
| "eval_runtime": 226.9414, | |
| "eval_samples_per_second": 13.673, | |
| "eval_steps_per_second": 3.419, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.5134065934065934, | |
| "grad_norm": 0.7583031052152963, | |
| "learning_rate": 9.230133288610366e-06, | |
| "loss": 0.816, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.5204395604395604, | |
| "grad_norm": 0.6318364253543153, | |
| "learning_rate": 9.197232243895285e-06, | |
| "loss": 0.8071, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.5274725274725275, | |
| "grad_norm": 0.6568566221486245, | |
| "learning_rate": 9.163704020782507e-06, | |
| "loss": 0.8073, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.5345054945054946, | |
| "grad_norm": 0.7717955800373636, | |
| "learning_rate": 9.129553629282448e-06, | |
| "loss": 0.8139, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.5415384615384615, | |
| "grad_norm": 0.6843074458720456, | |
| "learning_rate": 9.094786172374066e-06, | |
| "loss": 0.7947, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.5485714285714286, | |
| "grad_norm": 0.5785589869406929, | |
| "learning_rate": 9.059406845242343e-06, | |
| "loss": 0.8037, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.5556043956043956, | |
| "grad_norm": 0.7307405632179867, | |
| "learning_rate": 9.023420934501981e-06, | |
| "loss": 0.791, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.5626373626373626, | |
| "grad_norm": 0.624688349109228, | |
| "learning_rate": 8.98683381740745e-06, | |
| "loss": 0.8101, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.5696703296703297, | |
| "grad_norm": 0.6529550895634888, | |
| "learning_rate": 8.949650961049479e-06, | |
| "loss": 0.795, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.5767032967032967, | |
| "grad_norm": 0.7439504174496507, | |
| "learning_rate": 8.911877921538117e-06, | |
| "loss": 0.7953, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.5837362637362637, | |
| "grad_norm": 0.607525650080481, | |
| "learning_rate": 8.87352034317252e-06, | |
| "loss": 0.8085, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.5907692307692308, | |
| "grad_norm": 0.6363605072935006, | |
| "learning_rate": 8.83458395759753e-06, | |
| "loss": 0.8009, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.5978021978021978, | |
| "grad_norm": 0.80304305040893, | |
| "learning_rate": 8.795074582947214e-06, | |
| "loss": 0.7937, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.6048351648351649, | |
| "grad_norm": 0.7186872256432066, | |
| "learning_rate": 8.754998122975489e-06, | |
| "loss": 0.8059, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.6118681318681318, | |
| "grad_norm": 0.7583738305755567, | |
| "learning_rate": 8.714360566173932e-06, | |
| "loss": 0.8056, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.6189010989010989, | |
| "grad_norm": 0.6845388006016243, | |
| "learning_rate": 8.67316798487695e-06, | |
| "loss": 0.7946, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.625934065934066, | |
| "grad_norm": 0.52893400556635, | |
| "learning_rate": 8.631426534354404e-06, | |
| "loss": 0.7991, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.6329670329670329, | |
| "grad_norm": 0.562141119911408, | |
| "learning_rate": 8.589142451891849e-06, | |
| "loss": 0.7895, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "grad_norm": 0.5986754251442403, | |
| "learning_rate": 8.546322055858526e-06, | |
| "loss": 0.7997, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.6470329670329671, | |
| "grad_norm": 0.585680730864744, | |
| "learning_rate": 8.502971744763216e-06, | |
| "loss": 0.7942, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.654065934065934, | |
| "grad_norm": 0.5372905314311949, | |
| "learning_rate": 8.459097996298137e-06, | |
| "loss": 0.7997, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.6610989010989011, | |
| "grad_norm": 0.5670305768190042, | |
| "learning_rate": 8.414707366371006e-06, | |
| "loss": 0.7929, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.6681318681318681, | |
| "grad_norm": 0.5573782301950155, | |
| "learning_rate": 8.369806488125418e-06, | |
| "loss": 0.7854, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.6751648351648352, | |
| "grad_norm": 0.5677519493613411, | |
| "learning_rate": 8.324402070949658e-06, | |
| "loss": 0.7914, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.6821978021978022, | |
| "grad_norm": 0.5914729175606039, | |
| "learning_rate": 8.278500899474162e-06, | |
| "loss": 0.787, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.6892307692307692, | |
| "grad_norm": 0.5233289657421258, | |
| "learning_rate": 8.232109832557696e-06, | |
| "loss": 0.7848, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.6962637362637363, | |
| "grad_norm": 0.5547427081220171, | |
| "learning_rate": 8.18523580226247e-06, | |
| "loss": 0.7707, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.7032967032967034, | |
| "grad_norm": 0.6955676830431037, | |
| "learning_rate": 8.137885812818296e-06, | |
| "loss": 0.7728, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.7103296703296703, | |
| "grad_norm": 0.7045349043398937, | |
| "learning_rate": 8.090066939575972e-06, | |
| "loss": 0.7715, | |
| "step": 101 | |
| }, | |
| { | |
| "epoch": 0.7173626373626374, | |
| "grad_norm": 0.6443893217574687, | |
| "learning_rate": 8.041786327950037e-06, | |
| "loss": 0.777, | |
| "step": 102 | |
| }, | |
| { | |
| "epoch": 0.7243956043956044, | |
| "grad_norm": 0.6026841629687064, | |
| "learning_rate": 7.993051192351056e-06, | |
| "loss": 0.7925, | |
| "step": 103 | |
| }, | |
| { | |
| "epoch": 0.7314285714285714, | |
| "grad_norm": 0.5850830356296551, | |
| "learning_rate": 7.943868815107594e-06, | |
| "loss": 0.7644, | |
| "step": 104 | |
| }, | |
| { | |
| "epoch": 0.7384615384615385, | |
| "grad_norm": 0.7999343109870123, | |
| "learning_rate": 7.894246545378037e-06, | |
| "loss": 0.7754, | |
| "step": 105 | |
| }, | |
| { | |
| "epoch": 0.7454945054945055, | |
| "grad_norm": 0.7343984497545941, | |
| "learning_rate": 7.844191798052438e-06, | |
| "loss": 0.7904, | |
| "step": 106 | |
| }, | |
| { | |
| "epoch": 0.7525274725274725, | |
| "grad_norm": 0.5774072895167719, | |
| "learning_rate": 7.793712052644535e-06, | |
| "loss": 0.7851, | |
| "step": 107 | |
| }, | |
| { | |
| "epoch": 0.7595604395604396, | |
| "grad_norm": 0.6957073884866434, | |
| "learning_rate": 7.742814852174112e-06, | |
| "loss": 0.7705, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.7595604395604396, | |
| "eval_loss": 1.568657398223877, | |
| "eval_runtime": 225.5453, | |
| "eval_samples_per_second": 13.758, | |
| "eval_steps_per_second": 3.441, | |
| "step": 108 | |
| }, | |
| { | |
| "epoch": 0.7665934065934066, | |
| "grad_norm": 0.7848099301993303, | |
| "learning_rate": 7.691507802039861e-06, | |
| "loss": 0.7667, | |
| "step": 109 | |
| }, | |
| { | |
| "epoch": 0.7736263736263737, | |
| "grad_norm": 0.5890076385548152, | |
| "learning_rate": 7.639798568882947e-06, | |
| "loss": 0.7654, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.7806593406593406, | |
| "grad_norm": 0.66395733723176, | |
| "learning_rate": 7.5876948794414015e-06, | |
| "loss": 0.7891, | |
| "step": 111 | |
| }, | |
| { | |
| "epoch": 0.7876923076923077, | |
| "grad_norm": 0.6066531044825103, | |
| "learning_rate": 7.535204519395538e-06, | |
| "loss": 0.7876, | |
| "step": 112 | |
| }, | |
| { | |
| "epoch": 0.7947252747252748, | |
| "grad_norm": 0.6726394793172733, | |
| "learning_rate": 7.482335332204568e-06, | |
| "loss": 0.7623, | |
| "step": 113 | |
| }, | |
| { | |
| "epoch": 0.8017582417582417, | |
| "grad_norm": 0.6147140261019698, | |
| "learning_rate": 7.429095217934578e-06, | |
| "loss": 0.7723, | |
| "step": 114 | |
| }, | |
| { | |
| "epoch": 0.8087912087912088, | |
| "grad_norm": 0.5959989276123557, | |
| "learning_rate": 7.375492132078051e-06, | |
| "loss": 0.7689, | |
| "step": 115 | |
| }, | |
| { | |
| "epoch": 0.8158241758241759, | |
| "grad_norm": 0.7546232716676239, | |
| "learning_rate": 7.321534084365101e-06, | |
| "loss": 0.7789, | |
| "step": 116 | |
| }, | |
| { | |
| "epoch": 0.8228571428571428, | |
| "grad_norm": 0.4958344733247894, | |
| "learning_rate": 7.267229137566607e-06, | |
| "loss": 0.7766, | |
| "step": 117 | |
| }, | |
| { | |
| "epoch": 0.8298901098901099, | |
| "grad_norm": 0.6580768669811061, | |
| "learning_rate": 7.2125854062894184e-06, | |
| "loss": 0.7741, | |
| "step": 118 | |
| }, | |
| { | |
| "epoch": 0.8369230769230769, | |
| "grad_norm": 0.582649581214651, | |
| "learning_rate": 7.15761105576382e-06, | |
| "loss": 0.77, | |
| "step": 119 | |
| }, | |
| { | |
| "epoch": 0.843956043956044, | |
| "grad_norm": 0.7084036185253777, | |
| "learning_rate": 7.102314300623425e-06, | |
| "loss": 0.7556, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.850989010989011, | |
| "grad_norm": 0.6176038580158096, | |
| "learning_rate": 7.0467034036776945e-06, | |
| "loss": 0.7624, | |
| "step": 121 | |
| }, | |
| { | |
| "epoch": 0.858021978021978, | |
| "grad_norm": 0.6193127391409796, | |
| "learning_rate": 6.990786674677246e-06, | |
| "loss": 0.7699, | |
| "step": 122 | |
| }, | |
| { | |
| "epoch": 0.8650549450549451, | |
| "grad_norm": 0.5136851770278229, | |
| "learning_rate": 6.934572469072163e-06, | |
| "loss": 0.7586, | |
| "step": 123 | |
| }, | |
| { | |
| "epoch": 0.8720879120879121, | |
| "grad_norm": 0.5414980080981208, | |
| "learning_rate": 6.878069186763466e-06, | |
| "loss": 0.7647, | |
| "step": 124 | |
| }, | |
| { | |
| "epoch": 0.8791208791208791, | |
| "grad_norm": 0.5298901704966885, | |
| "learning_rate": 6.821285270847934e-06, | |
| "loss": 0.7638, | |
| "step": 125 | |
| }, | |
| { | |
| "epoch": 0.8861538461538462, | |
| "grad_norm": 0.536579104438463, | |
| "learning_rate": 6.764229206356498e-06, | |
| "loss": 0.7718, | |
| "step": 126 | |
| }, | |
| { | |
| "epoch": 0.8931868131868131, | |
| "grad_norm": 0.5513753693058956, | |
| "learning_rate": 6.706909518986341e-06, | |
| "loss": 0.7557, | |
| "step": 127 | |
| }, | |
| { | |
| "epoch": 0.9002197802197802, | |
| "grad_norm": 0.541397328359122, | |
| "learning_rate": 6.649334773826924e-06, | |
| "loss": 0.7519, | |
| "step": 128 | |
| }, | |
| { | |
| "epoch": 0.9072527472527473, | |
| "grad_norm": 0.4984745030205108, | |
| "learning_rate": 6.591513574080152e-06, | |
| "loss": 0.7877, | |
| "step": 129 | |
| }, | |
| { | |
| "epoch": 0.9142857142857143, | |
| "grad_norm": 0.5218531200041907, | |
| "learning_rate": 6.5334545597748075e-06, | |
| "loss": 0.7713, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.9213186813186813, | |
| "grad_norm": 0.5317339972640247, | |
| "learning_rate": 6.475166406475515e-06, | |
| "loss": 0.7455, | |
| "step": 131 | |
| }, | |
| { | |
| "epoch": 0.9283516483516484, | |
| "grad_norm": 0.5131963532051536, | |
| "learning_rate": 6.41665782398637e-06, | |
| "loss": 0.7582, | |
| "step": 132 | |
| }, | |
| { | |
| "epoch": 0.9353846153846154, | |
| "grad_norm": 0.5202136619940336, | |
| "learning_rate": 6.357937555049465e-06, | |
| "loss": 0.7484, | |
| "step": 133 | |
| }, | |
| { | |
| "epoch": 0.9424175824175824, | |
| "grad_norm": 0.5191886467339242, | |
| "learning_rate": 6.299014374038493e-06, | |
| "loss": 0.772, | |
| "step": 134 | |
| }, | |
| { | |
| "epoch": 0.9494505494505494, | |
| "grad_norm": 0.5650396432233695, | |
| "learning_rate": 6.239897085647624e-06, | |
| "loss": 0.7372, | |
| "step": 135 | |
| }, | |
| { | |
| "epoch": 0.9564835164835165, | |
| "grad_norm": 0.5009481153808558, | |
| "learning_rate": 6.180594523575838e-06, | |
| "loss": 0.7615, | |
| "step": 136 | |
| }, | |
| { | |
| "epoch": 0.9635164835164836, | |
| "grad_norm": 0.5009820597229956, | |
| "learning_rate": 6.12111554920695e-06, | |
| "loss": 0.7549, | |
| "step": 137 | |
| }, | |
| { | |
| "epoch": 0.9705494505494505, | |
| "grad_norm": 0.504199654644018, | |
| "learning_rate": 6.061469050285469e-06, | |
| "loss": 0.7624, | |
| "step": 138 | |
| }, | |
| { | |
| "epoch": 0.9775824175824176, | |
| "grad_norm": 0.4908461913624682, | |
| "learning_rate": 6.0016639395885424e-06, | |
| "loss": 0.7444, | |
| "step": 139 | |
| }, | |
| { | |
| "epoch": 0.9846153846153847, | |
| "grad_norm": 0.5554236899312124, | |
| "learning_rate": 5.941709153594146e-06, | |
| "loss": 0.757, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.9916483516483516, | |
| "grad_norm": 0.627089978818849, | |
| "learning_rate": 5.881613651145732e-06, | |
| "loss": 0.7465, | |
| "step": 141 | |
| }, | |
| { | |
| "epoch": 0.9986813186813187, | |
| "grad_norm": 0.5613844035445381, | |
| "learning_rate": 5.821386412113546e-06, | |
| "loss": 0.7643, | |
| "step": 142 | |
| }, | |
| { | |
| "epoch": 1.0057142857142858, | |
| "grad_norm": 0.481627058589778, | |
| "learning_rate": 5.761036436052788e-06, | |
| "loss": 0.7515, | |
| "step": 143 | |
| }, | |
| { | |
| "epoch": 1.0015384615384615, | |
| "grad_norm": 0.5657262479926654, | |
| "learning_rate": 5.700572740858847e-06, | |
| "loss": 0.7547, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 1.0015384615384615, | |
| "eval_loss": 1.5540971755981445, | |
| "eval_runtime": 225.9682, | |
| "eval_samples_per_second": 13.732, | |
| "eval_steps_per_second": 3.434, | |
| "step": 144 | |
| }, | |
| { | |
| "epoch": 1.0085714285714287, | |
| "grad_norm": 0.7044866764167006, | |
| "learning_rate": 5.640004361419776e-06, | |
| "loss": 0.6997, | |
| "step": 145 | |
| }, | |
| { | |
| "epoch": 1.0156043956043956, | |
| "grad_norm": 0.7035496400173301, | |
| "learning_rate": 5.579340348266251e-06, | |
| "loss": 0.7128, | |
| "step": 146 | |
| }, | |
| { | |
| "epoch": 1.0226373626373626, | |
| "grad_norm": 0.7064639323234874, | |
| "learning_rate": 5.518589766219173e-06, | |
| "loss": 0.7144, | |
| "step": 147 | |
| }, | |
| { | |
| "epoch": 1.0296703296703296, | |
| "grad_norm": 0.7971240631744954, | |
| "learning_rate": 5.457761693035139e-06, | |
| "loss": 0.6987, | |
| "step": 148 | |
| }, | |
| { | |
| "epoch": 1.0367032967032968, | |
| "grad_norm": 0.558357704286495, | |
| "learning_rate": 5.396865218049995e-06, | |
| "loss": 0.6993, | |
| "step": 149 | |
| }, | |
| { | |
| "epoch": 1.0437362637362637, | |
| "grad_norm": 0.756627096578572, | |
| "learning_rate": 5.335909440820635e-06, | |
| "loss": 0.6944, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 1.0507692307692307, | |
| "grad_norm": 0.6399997539019588, | |
| "learning_rate": 5.27490346976529e-06, | |
| "loss": 0.7065, | |
| "step": 151 | |
| }, | |
| { | |
| "epoch": 1.0578021978021979, | |
| "grad_norm": 0.671365696382851, | |
| "learning_rate": 5.21385642080249e-06, | |
| "loss": 0.6901, | |
| "step": 152 | |
| }, | |
| { | |
| "epoch": 1.0648351648351648, | |
| "grad_norm": 0.7118806590752661, | |
| "learning_rate": 5.152777415988894e-06, | |
| "loss": 0.6958, | |
| "step": 153 | |
| }, | |
| { | |
| "epoch": 1.0718681318681318, | |
| "grad_norm": 0.6792290355353818, | |
| "learning_rate": 5.091675582156224e-06, | |
| "loss": 0.696, | |
| "step": 154 | |
| }, | |
| { | |
| "epoch": 1.078901098901099, | |
| "grad_norm": 0.6848137162218305, | |
| "learning_rate": 5.0305600495474586e-06, | |
| "loss": 0.7088, | |
| "step": 155 | |
| }, | |
| { | |
| "epoch": 1.085934065934066, | |
| "grad_norm": 0.7159849638970576, | |
| "learning_rate": 4.969439950452543e-06, | |
| "loss": 0.7031, | |
| "step": 156 | |
| }, | |
| { | |
| "epoch": 1.092967032967033, | |
| "grad_norm": 0.6300333557911728, | |
| "learning_rate": 4.908324417843779e-06, | |
| "loss": 0.7007, | |
| "step": 157 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "grad_norm": 0.7043936127658559, | |
| "learning_rate": 4.847222584011107e-06, | |
| "loss": 0.6994, | |
| "step": 158 | |
| }, | |
| { | |
| "epoch": 1.107032967032967, | |
| "grad_norm": 0.6867413622419056, | |
| "learning_rate": 4.7861435791975124e-06, | |
| "loss": 0.6935, | |
| "step": 159 | |
| }, | |
| { | |
| "epoch": 1.114065934065934, | |
| "grad_norm": 0.6538416822081471, | |
| "learning_rate": 4.72509653023471e-06, | |
| "loss": 0.6941, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 1.1210989010989012, | |
| "grad_norm": 0.7020985463183896, | |
| "learning_rate": 4.664090559179367e-06, | |
| "loss": 0.7048, | |
| "step": 161 | |
| }, | |
| { | |
| "epoch": 1.1281318681318682, | |
| "grad_norm": 0.5772539183951623, | |
| "learning_rate": 4.603134781950007e-06, | |
| "loss": 0.6935, | |
| "step": 162 | |
| }, | |
| { | |
| "epoch": 1.1351648351648351, | |
| "grad_norm": 0.6904805292612487, | |
| "learning_rate": 4.542238306964863e-06, | |
| "loss": 0.6833, | |
| "step": 163 | |
| }, | |
| { | |
| "epoch": 1.1421978021978023, | |
| "grad_norm": 0.5106506921363563, | |
| "learning_rate": 4.48141023378083e-06, | |
| "loss": 0.6853, | |
| "step": 164 | |
| }, | |
| { | |
| "epoch": 1.1492307692307693, | |
| "grad_norm": 0.666628487493406, | |
| "learning_rate": 4.420659651733751e-06, | |
| "loss": 0.6989, | |
| "step": 165 | |
| }, | |
| { | |
| "epoch": 1.1562637362637362, | |
| "grad_norm": 0.5494302202195188, | |
| "learning_rate": 4.359995638580226e-06, | |
| "loss": 0.6958, | |
| "step": 166 | |
| }, | |
| { | |
| "epoch": 1.1632967032967032, | |
| "grad_norm": 0.6245450783892555, | |
| "learning_rate": 4.299427259141155e-06, | |
| "loss": 0.7012, | |
| "step": 167 | |
| }, | |
| { | |
| "epoch": 1.1703296703296704, | |
| "grad_norm": 0.5884535676395213, | |
| "learning_rate": 4.238963563947212e-06, | |
| "loss": 0.6869, | |
| "step": 168 | |
| }, | |
| { | |
| "epoch": 1.1773626373626374, | |
| "grad_norm": 0.5775278896688378, | |
| "learning_rate": 4.178613587886455e-06, | |
| "loss": 0.6977, | |
| "step": 169 | |
| }, | |
| { | |
| "epoch": 1.1843956043956043, | |
| "grad_norm": 0.5884501150840619, | |
| "learning_rate": 4.1183863488542686e-06, | |
| "loss": 0.6896, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 1.1914285714285715, | |
| "grad_norm": 0.6243380627891476, | |
| "learning_rate": 4.058290846405856e-06, | |
| "loss": 0.6769, | |
| "step": 171 | |
| }, | |
| { | |
| "epoch": 1.1984615384615385, | |
| "grad_norm": 0.5198772958280654, | |
| "learning_rate": 3.998336060411459e-06, | |
| "loss": 0.6819, | |
| "step": 172 | |
| }, | |
| { | |
| "epoch": 1.2054945054945054, | |
| "grad_norm": 0.5794957559059861, | |
| "learning_rate": 3.938530949714533e-06, | |
| "loss": 0.6954, | |
| "step": 173 | |
| }, | |
| { | |
| "epoch": 1.2125274725274726, | |
| "grad_norm": 0.5092522893722937, | |
| "learning_rate": 3.878884450793053e-06, | |
| "loss": 0.6992, | |
| "step": 174 | |
| }, | |
| { | |
| "epoch": 1.2195604395604396, | |
| "grad_norm": 0.5343196689876678, | |
| "learning_rate": 3.819405476424164e-06, | |
| "loss": 0.6959, | |
| "step": 175 | |
| }, | |
| { | |
| "epoch": 1.2265934065934065, | |
| "grad_norm": 0.4933833699856012, | |
| "learning_rate": 3.7601029143523767e-06, | |
| "loss": 0.6938, | |
| "step": 176 | |
| }, | |
| { | |
| "epoch": 1.2336263736263735, | |
| "grad_norm": 0.5014086855443777, | |
| "learning_rate": 3.7009856259615074e-06, | |
| "loss": 0.6892, | |
| "step": 177 | |
| }, | |
| { | |
| "epoch": 1.2406593406593407, | |
| "grad_norm": 0.5523527929810598, | |
| "learning_rate": 3.642062444950537e-06, | |
| "loss": 0.7017, | |
| "step": 178 | |
| }, | |
| { | |
| "epoch": 1.2476923076923077, | |
| "grad_norm": 0.4818738912562937, | |
| "learning_rate": 3.5833421760136323e-06, | |
| "loss": 0.6774, | |
| "step": 179 | |
| }, | |
| { | |
| "epoch": 1.2547252747252746, | |
| "grad_norm": 0.5461147310455838, | |
| "learning_rate": 3.524833593524487e-06, | |
| "loss": 0.6979, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.2547252747252746, | |
| "eval_loss": 1.5613536834716797, | |
| "eval_runtime": 227.3007, | |
| "eval_samples_per_second": 13.652, | |
| "eval_steps_per_second": 3.414, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 1.2617582417582418, | |
| "grad_norm": 0.5109365510573675, | |
| "learning_rate": 3.4665454402251937e-06, | |
| "loss": 0.6852, | |
| "step": 181 | |
| }, | |
| { | |
| "epoch": 1.2687912087912088, | |
| "grad_norm": 0.4944349858439552, | |
| "learning_rate": 3.40848642591985e-06, | |
| "loss": 0.6878, | |
| "step": 182 | |
| }, | |
| { | |
| "epoch": 1.2758241758241757, | |
| "grad_norm": 0.5159952929170575, | |
| "learning_rate": 3.350665226173078e-06, | |
| "loss": 0.687, | |
| "step": 183 | |
| }, | |
| { | |
| "epoch": 1.282857142857143, | |
| "grad_norm": 0.5549596460552958, | |
| "learning_rate": 3.293090481013661e-06, | |
| "loss": 0.6846, | |
| "step": 184 | |
| }, | |
| { | |
| "epoch": 1.2898901098901099, | |
| "grad_norm": 0.4726962925967871, | |
| "learning_rate": 3.2357707936435013e-06, | |
| "loss": 0.7052, | |
| "step": 185 | |
| }, | |
| { | |
| "epoch": 1.2969230769230768, | |
| "grad_norm": 0.5871155312495273, | |
| "learning_rate": 3.1787147291520675e-06, | |
| "loss": 0.6913, | |
| "step": 186 | |
| }, | |
| { | |
| "epoch": 1.303956043956044, | |
| "grad_norm": 0.5365998516217266, | |
| "learning_rate": 3.1219308132365365e-06, | |
| "loss": 0.6875, | |
| "step": 187 | |
| }, | |
| { | |
| "epoch": 1.310989010989011, | |
| "grad_norm": 0.5631340881658085, | |
| "learning_rate": 3.0654275309278382e-06, | |
| "loss": 0.6957, | |
| "step": 188 | |
| }, | |
| { | |
| "epoch": 1.318021978021978, | |
| "grad_norm": 0.5382778843347812, | |
| "learning_rate": 3.0092133253227563e-06, | |
| "loss": 0.6882, | |
| "step": 189 | |
| }, | |
| { | |
| "epoch": 1.3250549450549451, | |
| "grad_norm": 0.5205915810175865, | |
| "learning_rate": 2.9532965963223076e-06, | |
| "loss": 0.6925, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 1.332087912087912, | |
| "grad_norm": 0.5089202754962188, | |
| "learning_rate": 2.8976856993765766e-06, | |
| "loss": 0.6946, | |
| "step": 191 | |
| }, | |
| { | |
| "epoch": 1.339120879120879, | |
| "grad_norm": 0.5241959505519166, | |
| "learning_rate": 2.8423889442361797e-06, | |
| "loss": 0.6821, | |
| "step": 192 | |
| }, | |
| { | |
| "epoch": 1.3461538461538463, | |
| "grad_norm": 0.4791744200405188, | |
| "learning_rate": 2.787414593710583e-06, | |
| "loss": 0.6703, | |
| "step": 193 | |
| }, | |
| { | |
| "epoch": 1.3531868131868132, | |
| "grad_norm": 0.5338167716156891, | |
| "learning_rate": 2.7327708624333936e-06, | |
| "loss": 0.6917, | |
| "step": 194 | |
| }, | |
| { | |
| "epoch": 1.3602197802197802, | |
| "grad_norm": 0.5221883229297136, | |
| "learning_rate": 2.678465915634899e-06, | |
| "loss": 0.6767, | |
| "step": 195 | |
| }, | |
| { | |
| "epoch": 1.3672527472527474, | |
| "grad_norm": 0.4931589375431747, | |
| "learning_rate": 2.6245078679219503e-06, | |
| "loss": 0.6779, | |
| "step": 196 | |
| }, | |
| { | |
| "epoch": 1.3742857142857143, | |
| "grad_norm": 0.517354019882498, | |
| "learning_rate": 2.5709047820654236e-06, | |
| "loss": 0.6802, | |
| "step": 197 | |
| }, | |
| { | |
| "epoch": 1.3813186813186813, | |
| "grad_norm": 0.49471704157814983, | |
| "learning_rate": 2.517664667795434e-06, | |
| "loss": 0.685, | |
| "step": 198 | |
| }, | |
| { | |
| "epoch": 1.3883516483516485, | |
| "grad_norm": 0.4837616496269841, | |
| "learning_rate": 2.4647954806044633e-06, | |
| "loss": 0.6735, | |
| "step": 199 | |
| }, | |
| { | |
| "epoch": 1.3953846153846154, | |
| "grad_norm": 0.5172749197596505, | |
| "learning_rate": 2.412305120558599e-06, | |
| "loss": 0.6832, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 1.4024175824175824, | |
| "grad_norm": 0.49124469230673196, | |
| "learning_rate": 2.3602014311170524e-06, | |
| "loss": 0.6935, | |
| "step": 201 | |
| }, | |
| { | |
| "epoch": 1.4094505494505494, | |
| "grad_norm": 0.5064492540961577, | |
| "learning_rate": 2.308492197960141e-06, | |
| "loss": 0.6874, | |
| "step": 202 | |
| }, | |
| { | |
| "epoch": 1.4164835164835166, | |
| "grad_norm": 0.4856449960670854, | |
| "learning_rate": 2.2571851478258903e-06, | |
| "loss": 0.679, | |
| "step": 203 | |
| }, | |
| { | |
| "epoch": 1.4235164835164835, | |
| "grad_norm": 0.4952752928736292, | |
| "learning_rate": 2.2062879473554654e-06, | |
| "loss": 0.7041, | |
| "step": 204 | |
| }, | |
| { | |
| "epoch": 1.4305494505494505, | |
| "grad_norm": 0.4638341342791591, | |
| "learning_rate": 2.155808201947563e-06, | |
| "loss": 0.6998, | |
| "step": 205 | |
| }, | |
| { | |
| "epoch": 1.4375824175824174, | |
| "grad_norm": 0.4785344340695752, | |
| "learning_rate": 2.105753454621966e-06, | |
| "loss": 0.6985, | |
| "step": 206 | |
| }, | |
| { | |
| "epoch": 1.4446153846153846, | |
| "grad_norm": 0.5069994126640873, | |
| "learning_rate": 2.0561311848924082e-06, | |
| "loss": 0.6786, | |
| "step": 207 | |
| }, | |
| { | |
| "epoch": 1.4516483516483516, | |
| "grad_norm": 0.45914925243384214, | |
| "learning_rate": 2.0069488076489445e-06, | |
| "loss": 0.6974, | |
| "step": 208 | |
| }, | |
| { | |
| "epoch": 1.4586813186813186, | |
| "grad_norm": 0.47163331174683165, | |
| "learning_rate": 1.958213672049964e-06, | |
| "loss": 0.6977, | |
| "step": 209 | |
| }, | |
| { | |
| "epoch": 1.4657142857142857, | |
| "grad_norm": 0.4899597697854046, | |
| "learning_rate": 1.909933060424029e-06, | |
| "loss": 0.7008, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 1.4727472527472527, | |
| "grad_norm": 0.4558417478252458, | |
| "learning_rate": 1.862114187181705e-06, | |
| "loss": 0.6876, | |
| "step": 211 | |
| }, | |
| { | |
| "epoch": 1.4797802197802197, | |
| "grad_norm": 0.47448115453479683, | |
| "learning_rate": 1.8147641977375313e-06, | |
| "loss": 0.6988, | |
| "step": 212 | |
| }, | |
| { | |
| "epoch": 1.4868131868131869, | |
| "grad_norm": 0.4556522922174643, | |
| "learning_rate": 1.7678901674423044e-06, | |
| "loss": 0.7004, | |
| "step": 213 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 284, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 71, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.1174316342194995e+17, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |