| { |
| "best_metric": null, |
| "best_model_checkpoint": null, |
| "epoch": 1.9497607655502391, |
| "eval_steps": 500, |
| "global_step": 208, |
| "is_hyper_param_search": false, |
| "is_local_process_zero": true, |
| "is_world_process_zero": true, |
| "log_history": [ |
| { |
| "epoch": 0.01, |
| "learning_rate": 2e-05, |
| "loss": 2.3634, |
| "step": 1 |
| }, |
| { |
| "epoch": 0.02, |
| "learning_rate": 4e-05, |
| "loss": 2.4605, |
| "step": 2 |
| }, |
| { |
| "epoch": 0.03, |
| "learning_rate": 6e-05, |
| "loss": 2.3783, |
| "step": 3 |
| }, |
| { |
| "epoch": 0.04, |
| "learning_rate": 8e-05, |
| "loss": 2.5611, |
| "step": 4 |
| }, |
| { |
| "epoch": 0.05, |
| "learning_rate": 0.0001, |
| "loss": 2.525, |
| "step": 5 |
| }, |
| { |
| "epoch": 0.06, |
| "learning_rate": 0.00012, |
| "loss": 2.4104, |
| "step": 6 |
| }, |
| { |
| "epoch": 0.07, |
| "learning_rate": 0.00014, |
| "loss": 2.3589, |
| "step": 7 |
| }, |
| { |
| "epoch": 0.08, |
| "learning_rate": 0.00016, |
| "loss": 2.4133, |
| "step": 8 |
| }, |
| { |
| "epoch": 0.09, |
| "learning_rate": 0.00018, |
| "loss": 2.1582, |
| "step": 9 |
| }, |
| { |
| "epoch": 0.1, |
| "learning_rate": 0.0002, |
| "loss": 2.1394, |
| "step": 10 |
| }, |
| { |
| "epoch": 0.11, |
| "learning_rate": 0.00019998741276738754, |
| "loss": 2.1466, |
| "step": 11 |
| }, |
| { |
| "epoch": 0.11, |
| "learning_rate": 0.00019994965423831854, |
| "loss": 2.0672, |
| "step": 12 |
| }, |
| { |
| "epoch": 0.12, |
| "learning_rate": 0.0001998867339183008, |
| "loss": 2.1323, |
| "step": 13 |
| }, |
| { |
| "epoch": 0.13, |
| "learning_rate": 0.00019979866764718843, |
| "loss": 2.0887, |
| "step": 14 |
| }, |
| { |
| "epoch": 0.14, |
| "learning_rate": 0.00019968547759519425, |
| "loss": 2.1057, |
| "step": 15 |
| }, |
| { |
| "epoch": 0.15, |
| "learning_rate": 0.00019954719225730847, |
| "loss": 2.0863, |
| "step": 16 |
| }, |
| { |
| "epoch": 0.16, |
| "learning_rate": 0.00019938384644612543, |
| "loss": 2.09, |
| "step": 17 |
| }, |
| { |
| "epoch": 0.17, |
| "learning_rate": 0.00019919548128307954, |
| "loss": 2.0731, |
| "step": 18 |
| }, |
| { |
| "epoch": 0.18, |
| "learning_rate": 0.0001989821441880933, |
| "loss": 2.1503, |
| "step": 19 |
| }, |
| { |
| "epoch": 0.19, |
| "learning_rate": 0.00019874388886763944, |
| "loss": 1.9755, |
| "step": 20 |
| }, |
| { |
| "epoch": 0.2, |
| "learning_rate": 0.00019848077530122083, |
| "loss": 2.117, |
| "step": 21 |
| }, |
| { |
| "epoch": 0.21, |
| "learning_rate": 0.00019819286972627066, |
| "loss": 1.9935, |
| "step": 22 |
| }, |
| { |
| "epoch": 0.22, |
| "learning_rate": 0.00019788024462147788, |
| "loss": 2.0685, |
| "step": 23 |
| }, |
| { |
| "epoch": 0.23, |
| "learning_rate": 0.00019754297868854073, |
| "loss": 2.0086, |
| "step": 24 |
| }, |
| { |
| "epoch": 0.24, |
| "learning_rate": 0.00019718115683235417, |
| "loss": 2.0013, |
| "step": 25 |
| }, |
| { |
| "epoch": 0.25, |
| "learning_rate": 0.00019679487013963564, |
| "loss": 2.0066, |
| "step": 26 |
| }, |
| { |
| "epoch": 0.26, |
| "learning_rate": 0.00019638421585599423, |
| "loss": 1.9823, |
| "step": 27 |
| }, |
| { |
| "epoch": 0.27, |
| "learning_rate": 0.00019594929736144976, |
| "loss": 2.0394, |
| "step": 28 |
| }, |
| { |
| "epoch": 0.28, |
| "learning_rate": 0.0001954902241444074, |
| "loss": 2.0804, |
| "step": 29 |
| }, |
| { |
| "epoch": 0.29, |
| "learning_rate": 0.00019500711177409454, |
| "loss": 1.997, |
| "step": 30 |
| }, |
| { |
| "epoch": 0.3, |
| "learning_rate": 0.00019450008187146684, |
| "loss": 2.0794, |
| "step": 31 |
| }, |
| { |
| "epoch": 0.31, |
| "learning_rate": 0.00019396926207859084, |
| "loss": 1.9769, |
| "step": 32 |
| }, |
| { |
| "epoch": 0.32, |
| "learning_rate": 0.00019341478602651069, |
| "loss": 1.9643, |
| "step": 33 |
| }, |
| { |
| "epoch": 0.33, |
| "learning_rate": 0.00019283679330160726, |
| "loss": 2.041, |
| "step": 34 |
| }, |
| { |
| "epoch": 0.33, |
| "learning_rate": 0.00019223542941045817, |
| "loss": 1.9873, |
| "step": 35 |
| }, |
| { |
| "epoch": 0.34, |
| "learning_rate": 0.00019161084574320696, |
| "loss": 2.0358, |
| "step": 36 |
| }, |
| { |
| "epoch": 0.35, |
| "learning_rate": 0.00019096319953545185, |
| "loss": 2.021, |
| "step": 37 |
| }, |
| { |
| "epoch": 0.36, |
| "learning_rate": 0.00019029265382866214, |
| "loss": 2.0764, |
| "step": 38 |
| }, |
| { |
| "epoch": 0.37, |
| "learning_rate": 0.00018959937742913359, |
| "loss": 2.1639, |
| "step": 39 |
| }, |
| { |
| "epoch": 0.38, |
| "learning_rate": 0.00018888354486549237, |
| "loss": 2.0892, |
| "step": 40 |
| }, |
| { |
| "epoch": 0.39, |
| "learning_rate": 0.00018814533634475822, |
| "loss": 1.9749, |
| "step": 41 |
| }, |
| { |
| "epoch": 0.4, |
| "learning_rate": 0.00018738493770697852, |
| "loss": 2.0521, |
| "step": 42 |
| }, |
| { |
| "epoch": 0.41, |
| "learning_rate": 0.00018660254037844388, |
| "loss": 2.0751, |
| "step": 43 |
| }, |
| { |
| "epoch": 0.42, |
| "learning_rate": 0.00018579834132349772, |
| "loss": 2.0852, |
| "step": 44 |
| }, |
| { |
| "epoch": 0.43, |
| "learning_rate": 0.00018497254299495146, |
| "loss": 1.9871, |
| "step": 45 |
| }, |
| { |
| "epoch": 0.44, |
| "learning_rate": 0.00018412535328311814, |
| "loss": 1.9199, |
| "step": 46 |
| }, |
| { |
| "epoch": 0.45, |
| "learning_rate": 0.00018325698546347715, |
| "loss": 1.9627, |
| "step": 47 |
| }, |
| { |
| "epoch": 0.46, |
| "learning_rate": 0.0001823676581429833, |
| "loss": 2.0465, |
| "step": 48 |
| }, |
| { |
| "epoch": 0.47, |
| "learning_rate": 0.00018145759520503358, |
| "loss": 2.0033, |
| "step": 49 |
| }, |
| { |
| "epoch": 0.48, |
| "learning_rate": 0.00018052702575310588, |
| "loss": 1.8835, |
| "step": 50 |
| }, |
| { |
| "epoch": 0.49, |
| "learning_rate": 0.00017957618405308324, |
| "loss": 1.9823, |
| "step": 51 |
| }, |
| { |
| "epoch": 0.5, |
| "learning_rate": 0.00017860530947427875, |
| "loss": 2.0887, |
| "step": 52 |
| }, |
| { |
| "epoch": 0.51, |
| "learning_rate": 0.0001776146464291757, |
| "loss": 2.0936, |
| "step": 53 |
| }, |
| { |
| "epoch": 0.52, |
| "learning_rate": 0.0001766044443118978, |
| "loss": 2.0576, |
| "step": 54 |
| }, |
| { |
| "epoch": 0.53, |
| "learning_rate": 0.00017557495743542585, |
| "loss": 1.8788, |
| "step": 55 |
| }, |
| { |
| "epoch": 0.54, |
| "learning_rate": 0.0001745264449675755, |
| "loss": 2.0452, |
| "step": 56 |
| }, |
| { |
| "epoch": 0.55, |
| "learning_rate": 0.00017345917086575332, |
| "loss": 2.0411, |
| "step": 57 |
| }, |
| { |
| "epoch": 0.56, |
| "learning_rate": 0.00017237340381050703, |
| "loss": 2.0834, |
| "step": 58 |
| }, |
| { |
| "epoch": 0.56, |
| "learning_rate": 0.00017126941713788632, |
| "loss": 2.113, |
| "step": 59 |
| }, |
| { |
| "epoch": 0.57, |
| "learning_rate": 0.00017014748877063214, |
| "loss": 2.0797, |
| "step": 60 |
| }, |
| { |
| "epoch": 0.58, |
| "learning_rate": 0.00016900790114821122, |
| "loss": 2.0783, |
| "step": 61 |
| }, |
| { |
| "epoch": 0.59, |
| "learning_rate": 0.00016785094115571322, |
| "loss": 1.9576, |
| "step": 62 |
| }, |
| { |
| "epoch": 0.6, |
| "learning_rate": 0.00016667690005162916, |
| "loss": 1.9651, |
| "step": 63 |
| }, |
| { |
| "epoch": 0.61, |
| "learning_rate": 0.00016548607339452853, |
| "loss": 1.9927, |
| "step": 64 |
| }, |
| { |
| "epoch": 0.62, |
| "learning_rate": 0.00016427876096865394, |
| "loss": 2.0967, |
| "step": 65 |
| }, |
| { |
| "epoch": 0.63, |
| "learning_rate": 0.00016305526670845226, |
| "loss": 2.0661, |
| "step": 66 |
| }, |
| { |
| "epoch": 0.64, |
| "learning_rate": 0.00016181589862206052, |
| "loss": 1.9413, |
| "step": 67 |
| }, |
| { |
| "epoch": 0.65, |
| "learning_rate": 0.00016056096871376667, |
| "loss": 1.8223, |
| "step": 68 |
| }, |
| { |
| "epoch": 0.66, |
| "learning_rate": 0.00015929079290546408, |
| "loss": 2.0162, |
| "step": 69 |
| }, |
| { |
| "epoch": 0.67, |
| "learning_rate": 0.00015800569095711982, |
| "loss": 1.9682, |
| "step": 70 |
| }, |
| { |
| "epoch": 0.68, |
| "learning_rate": 0.00015670598638627706, |
| "loss": 1.8836, |
| "step": 71 |
| }, |
| { |
| "epoch": 0.69, |
| "learning_rate": 0.00015539200638661104, |
| "loss": 1.9973, |
| "step": 72 |
| }, |
| { |
| "epoch": 0.7, |
| "learning_rate": 0.00015406408174555976, |
| "loss": 2.1492, |
| "step": 73 |
| }, |
| { |
| "epoch": 0.71, |
| "learning_rate": 0.00015272254676105025, |
| "loss": 1.9856, |
| "step": 74 |
| }, |
| { |
| "epoch": 0.72, |
| "learning_rate": 0.00015136773915734066, |
| "loss": 1.9407, |
| "step": 75 |
| }, |
| { |
| "epoch": 0.73, |
| "learning_rate": 0.00015000000000000001, |
| "loss": 2.0559, |
| "step": 76 |
| }, |
| { |
| "epoch": 0.74, |
| "learning_rate": 0.00014861967361004687, |
| "loss": 1.9553, |
| "step": 77 |
| }, |
| { |
| "epoch": 0.75, |
| "learning_rate": 0.0001472271074772683, |
| "loss": 1.996, |
| "step": 78 |
| }, |
| { |
| "epoch": 0.76, |
| "learning_rate": 0.00014582265217274104, |
| "loss": 2.2306, |
| "step": 79 |
| }, |
| { |
| "epoch": 0.77, |
| "learning_rate": 0.00014440666126057744, |
| "loss": 2.0002, |
| "step": 80 |
| }, |
| { |
| "epoch": 0.78, |
| "learning_rate": 0.00014297949120891718, |
| "loss": 2.068, |
| "step": 81 |
| }, |
| { |
| "epoch": 0.78, |
| "learning_rate": 0.00014154150130018866, |
| "loss": 2.0615, |
| "step": 82 |
| }, |
| { |
| "epoch": 0.79, |
| "learning_rate": 0.00014009305354066137, |
| "loss": 2.0083, |
| "step": 83 |
| }, |
| { |
| "epoch": 0.8, |
| "learning_rate": 0.00013863451256931287, |
| "loss": 1.9979, |
| "step": 84 |
| }, |
| { |
| "epoch": 0.81, |
| "learning_rate": 0.00013716624556603274, |
| "loss": 1.8525, |
| "step": 85 |
| }, |
| { |
| "epoch": 0.82, |
| "learning_rate": 0.00013568862215918717, |
| "loss": 2.045, |
| "step": 86 |
| }, |
| { |
| "epoch": 0.83, |
| "learning_rate": 0.00013420201433256689, |
| "loss": 1.9144, |
| "step": 87 |
| }, |
| { |
| "epoch": 0.84, |
| "learning_rate": 0.00013270679633174218, |
| "loss": 1.9353, |
| "step": 88 |
| }, |
| { |
| "epoch": 0.85, |
| "learning_rate": 0.0001312033445698487, |
| "loss": 2.1257, |
| "step": 89 |
| }, |
| { |
| "epoch": 0.86, |
| "learning_rate": 0.0001296920375328275, |
| "loss": 1.906, |
| "step": 90 |
| }, |
| { |
| "epoch": 0.87, |
| "learning_rate": 0.00012817325568414297, |
| "loss": 1.9963, |
| "step": 91 |
| }, |
| { |
| "epoch": 0.88, |
| "learning_rate": 0.00012664738136900348, |
| "loss": 2.0734, |
| "step": 92 |
| }, |
| { |
| "epoch": 0.89, |
| "learning_rate": 0.0001251147987181079, |
| "loss": 2.0182, |
| "step": 93 |
| }, |
| { |
| "epoch": 0.9, |
| "learning_rate": 0.00012357589355094275, |
| "loss": 1.9574, |
| "step": 94 |
| }, |
| { |
| "epoch": 0.91, |
| "learning_rate": 0.00012203105327865407, |
| "loss": 2.1562, |
| "step": 95 |
| }, |
| { |
| "epoch": 0.92, |
| "learning_rate": 0.00012048066680651908, |
| "loss": 2.082, |
| "step": 96 |
| }, |
| { |
| "epoch": 0.93, |
| "learning_rate": 0.00011892512443604102, |
| "loss": 1.9546, |
| "step": 97 |
| }, |
| { |
| "epoch": 0.94, |
| "learning_rate": 0.00011736481776669306, |
| "loss": 2.0957, |
| "step": 98 |
| }, |
| { |
| "epoch": 0.95, |
| "learning_rate": 0.000115800139597335, |
| "loss": 1.9348, |
| "step": 99 |
| }, |
| { |
| "epoch": 0.96, |
| "learning_rate": 0.00011423148382732853, |
| "loss": 2.0772, |
| "step": 100 |
| }, |
| { |
| "epoch": 0.97, |
| "learning_rate": 0.00011265924535737493, |
| "loss": 2.0298, |
| "step": 101 |
| }, |
| { |
| "epoch": 0.98, |
| "learning_rate": 0.00011108381999010111, |
| "loss": 2.0272, |
| "step": 102 |
| }, |
| { |
| "epoch": 0.99, |
| "learning_rate": 0.00010950560433041826, |
| "loss": 2.0293, |
| "step": 103 |
| }, |
| { |
| "epoch": 1.0, |
| "learning_rate": 0.00010792499568567884, |
| "loss": 1.8576, |
| "step": 104 |
| }, |
| { |
| "epoch": 1.0, |
| "learning_rate": 0.00010634239196565646, |
| "loss": 1.9792, |
| "step": 105 |
| }, |
| { |
| "epoch": 1.01, |
| "learning_rate": 0.00010475819158237425, |
| "loss": 1.8629, |
| "step": 106 |
| }, |
| { |
| "epoch": 1.02, |
| "learning_rate": 0.00010317279334980678, |
| "loss": 2.0062, |
| "step": 107 |
| }, |
| { |
| "epoch": 1.03, |
| "learning_rate": 0.00010158659638348081, |
| "loss": 1.937, |
| "step": 108 |
| }, |
| { |
| "epoch": 1.0, |
| "learning_rate": 0.0001, |
| "loss": 2.0037, |
| "step": 109 |
| }, |
| { |
| "epoch": 1.01, |
| "learning_rate": 9.84134036165192e-05, |
| "loss": 1.996, |
| "step": 110 |
| }, |
| { |
| "epoch": 1.02, |
| "learning_rate": 9.682720665019325e-05, |
| "loss": 1.9546, |
| "step": 111 |
| }, |
| { |
| "epoch": 1.03, |
| "learning_rate": 9.524180841762577e-05, |
| "loss": 1.7773, |
| "step": 112 |
| }, |
| { |
| "epoch": 1.04, |
| "learning_rate": 9.365760803434355e-05, |
| "loss": 1.9081, |
| "step": 113 |
| }, |
| { |
| "epoch": 1.05, |
| "learning_rate": 9.207500431432115e-05, |
| "loss": 1.9541, |
| "step": 114 |
| }, |
| { |
| "epoch": 1.06, |
| "learning_rate": 9.049439566958175e-05, |
| "loss": 1.8794, |
| "step": 115 |
| }, |
| { |
| "epoch": 1.07, |
| "learning_rate": 8.891618000989891e-05, |
| "loss": 1.8764, |
| "step": 116 |
| }, |
| { |
| "epoch": 1.08, |
| "learning_rate": 8.734075464262507e-05, |
| "loss": 1.9087, |
| "step": 117 |
| }, |
| { |
| "epoch": 1.09, |
| "learning_rate": 8.57685161726715e-05, |
| "loss": 1.9862, |
| "step": 118 |
| }, |
| { |
| "epoch": 1.1, |
| "learning_rate": 8.4199860402665e-05, |
| "loss": 2.0019, |
| "step": 119 |
| }, |
| { |
| "epoch": 1.11, |
| "learning_rate": 8.263518223330697e-05, |
| "loss": 1.9839, |
| "step": 120 |
| }, |
| { |
| "epoch": 1.12, |
| "learning_rate": 8.107487556395901e-05, |
| "loss": 1.8805, |
| "step": 121 |
| }, |
| { |
| "epoch": 1.13, |
| "learning_rate": 7.951933319348095e-05, |
| "loss": 1.9964, |
| "step": 122 |
| }, |
| { |
| "epoch": 1.14, |
| "learning_rate": 7.796894672134594e-05, |
| "loss": 1.9832, |
| "step": 123 |
| }, |
| { |
| "epoch": 1.15, |
| "learning_rate": 7.642410644905726e-05, |
| "loss": 1.8678, |
| "step": 124 |
| }, |
| { |
| "epoch": 1.16, |
| "learning_rate": 7.488520128189209e-05, |
| "loss": 1.9307, |
| "step": 125 |
| }, |
| { |
| "epoch": 1.17, |
| "learning_rate": 7.335261863099651e-05, |
| "loss": 1.9972, |
| "step": 126 |
| }, |
| { |
| "epoch": 1.17, |
| "learning_rate": 7.182674431585704e-05, |
| "loss": 1.9596, |
| "step": 127 |
| }, |
| { |
| "epoch": 1.18, |
| "learning_rate": 7.030796246717255e-05, |
| "loss": 1.8535, |
| "step": 128 |
| }, |
| { |
| "epoch": 1.19, |
| "learning_rate": 6.87966554301513e-05, |
| "loss": 1.9233, |
| "step": 129 |
| }, |
| { |
| "epoch": 1.2, |
| "learning_rate": 6.729320366825784e-05, |
| "loss": 1.9672, |
| "step": 130 |
| }, |
| { |
| "epoch": 1.21, |
| "learning_rate": 6.579798566743314e-05, |
| "loss": 1.9245, |
| "step": 131 |
| }, |
| { |
| "epoch": 1.22, |
| "learning_rate": 6.431137784081282e-05, |
| "loss": 1.9376, |
| "step": 132 |
| }, |
| { |
| "epoch": 1.23, |
| "learning_rate": 6.283375443396726e-05, |
| "loss": 1.957, |
| "step": 133 |
| }, |
| { |
| "epoch": 1.24, |
| "learning_rate": 6.136548743068713e-05, |
| "loss": 1.9098, |
| "step": 134 |
| }, |
| { |
| "epoch": 1.25, |
| "learning_rate": 5.9906946459338656e-05, |
| "loss": 1.8468, |
| "step": 135 |
| }, |
| { |
| "epoch": 1.26, |
| "learning_rate": 5.845849869981137e-05, |
| "loss": 1.9754, |
| "step": 136 |
| }, |
| { |
| "epoch": 1.27, |
| "learning_rate": 5.702050879108284e-05, |
| "loss": 2.0446, |
| "step": 137 |
| }, |
| { |
| "epoch": 1.28, |
| "learning_rate": 5.559333873942259e-05, |
| "loss": 1.959, |
| "step": 138 |
| }, |
| { |
| "epoch": 1.29, |
| "learning_rate": 5.417734782725896e-05, |
| "loss": 2.0479, |
| "step": 139 |
| }, |
| { |
| "epoch": 1.3, |
| "learning_rate": 5.277289252273174e-05, |
| "loss": 2.0104, |
| "step": 140 |
| }, |
| { |
| "epoch": 1.31, |
| "learning_rate": 5.138032638995315e-05, |
| "loss": 1.8287, |
| "step": 141 |
| }, |
| { |
| "epoch": 1.32, |
| "learning_rate": 5.000000000000002e-05, |
| "loss": 2.0405, |
| "step": 142 |
| }, |
| { |
| "epoch": 1.33, |
| "learning_rate": 4.8632260842659393e-05, |
| "loss": 1.7512, |
| "step": 143 |
| }, |
| { |
| "epoch": 1.34, |
| "learning_rate": 4.727745323894976e-05, |
| "loss": 1.9735, |
| "step": 144 |
| }, |
| { |
| "epoch": 1.35, |
| "learning_rate": 4.593591825444028e-05, |
| "loss": 1.9186, |
| "step": 145 |
| }, |
| { |
| "epoch": 1.36, |
| "learning_rate": 4.4607993613388976e-05, |
| "loss": 1.9525, |
| "step": 146 |
| }, |
| { |
| "epoch": 1.37, |
| "learning_rate": 4.329401361372294e-05, |
| "loss": 1.9657, |
| "step": 147 |
| }, |
| { |
| "epoch": 1.38, |
| "learning_rate": 4.19943090428802e-05, |
| "loss": 1.884, |
| "step": 148 |
| }, |
| { |
| "epoch": 1.39, |
| "learning_rate": 4.070920709453597e-05, |
| "loss": 1.8609, |
| "step": 149 |
| }, |
| { |
| "epoch": 1.39, |
| "learning_rate": 3.943903128623335e-05, |
| "loss": 1.9839, |
| "step": 150 |
| }, |
| { |
| "epoch": 1.4, |
| "learning_rate": 3.8184101377939476e-05, |
| "loss": 2.133, |
| "step": 151 |
| }, |
| { |
| "epoch": 1.41, |
| "learning_rate": 3.694473329154778e-05, |
| "loss": 1.9601, |
| "step": 152 |
| }, |
| { |
| "epoch": 1.42, |
| "learning_rate": 3.5721239031346066e-05, |
| "loss": 1.9665, |
| "step": 153 |
| }, |
| { |
| "epoch": 1.43, |
| "learning_rate": 3.45139266054715e-05, |
| "loss": 1.9298, |
| "step": 154 |
| }, |
| { |
| "epoch": 1.44, |
| "learning_rate": 3.332309994837085e-05, |
| "loss": 1.9882, |
| "step": 155 |
| }, |
| { |
| "epoch": 1.45, |
| "learning_rate": 3.21490588442868e-05, |
| "loss": 1.9381, |
| "step": 156 |
| }, |
| { |
| "epoch": 1.46, |
| "learning_rate": 3.099209885178882e-05, |
| "loss": 1.9314, |
| "step": 157 |
| }, |
| { |
| "epoch": 1.47, |
| "learning_rate": 2.9852511229367865e-05, |
| "loss": 2.0151, |
| "step": 158 |
| }, |
| { |
| "epoch": 1.48, |
| "learning_rate": 2.8730582862113742e-05, |
| "loss": 1.8828, |
| "step": 159 |
| }, |
| { |
| "epoch": 1.49, |
| "learning_rate": 2.7626596189492983e-05, |
| "loss": 1.9468, |
| "step": 160 |
| }, |
| { |
| "epoch": 1.5, |
| "learning_rate": 2.654082913424668e-05, |
| "loss": 1.9597, |
| "step": 161 |
| }, |
| { |
| "epoch": 1.51, |
| "learning_rate": 2.5473555032424533e-05, |
| "loss": 2.0673, |
| "step": 162 |
| }, |
| { |
| "epoch": 1.52, |
| "learning_rate": 2.4425042564574184e-05, |
| "loss": 1.8841, |
| "step": 163 |
| }, |
| { |
| "epoch": 1.53, |
| "learning_rate": 2.339555568810221e-05, |
| "loss": 1.8973, |
| "step": 164 |
| }, |
| { |
| "epoch": 1.54, |
| "learning_rate": 2.2385353570824308e-05, |
| "loss": 2.0362, |
| "step": 165 |
| }, |
| { |
| "epoch": 1.55, |
| "learning_rate": 2.139469052572127e-05, |
| "loss": 1.8493, |
| "step": 166 |
| }, |
| { |
| "epoch": 1.56, |
| "learning_rate": 2.042381594691678e-05, |
| "loss": 1.9594, |
| "step": 167 |
| }, |
| { |
| "epoch": 1.57, |
| "learning_rate": 1.947297424689414e-05, |
| "loss": 1.8966, |
| "step": 168 |
| }, |
| { |
| "epoch": 1.58, |
| "learning_rate": 1.854240479496643e-05, |
| "loss": 1.881, |
| "step": 169 |
| }, |
| { |
| "epoch": 1.59, |
| "learning_rate": 1.763234185701673e-05, |
| "loss": 1.9359, |
| "step": 170 |
| }, |
| { |
| "epoch": 1.6, |
| "learning_rate": 1.6743014536522873e-05, |
| "loss": 2.0055, |
| "step": 171 |
| }, |
| { |
| "epoch": 1.61, |
| "learning_rate": 1.587464671688187e-05, |
| "loss": 1.8821, |
| "step": 172 |
| }, |
| { |
| "epoch": 1.61, |
| "learning_rate": 1.5027457005048573e-05, |
| "loss": 1.7915, |
| "step": 173 |
| }, |
| { |
| "epoch": 1.62, |
| "learning_rate": 1.4201658676502294e-05, |
| "loss": 1.9249, |
| "step": 174 |
| }, |
| { |
| "epoch": 1.63, |
| "learning_rate": 1.339745962155613e-05, |
| "loss": 1.9576, |
| "step": 175 |
| }, |
| { |
| "epoch": 1.64, |
| "learning_rate": 1.2615062293021507e-05, |
| "loss": 1.9853, |
| "step": 176 |
| }, |
| { |
| "epoch": 1.65, |
| "learning_rate": 1.1854663655241805e-05, |
| "loss": 1.9205, |
| "step": 177 |
| }, |
| { |
| "epoch": 1.66, |
| "learning_rate": 1.1116455134507664e-05, |
| "loss": 1.8719, |
| "step": 178 |
| }, |
| { |
| "epoch": 1.67, |
| "learning_rate": 1.0400622570866425e-05, |
| "loss": 1.9103, |
| "step": 179 |
| }, |
| { |
| "epoch": 1.68, |
| "learning_rate": 9.707346171337894e-06, |
| "loss": 1.8143, |
| "step": 180 |
| }, |
| { |
| "epoch": 1.69, |
| "learning_rate": 9.036800464548157e-06, |
| "loss": 2.0218, |
| "step": 181 |
| }, |
| { |
| "epoch": 1.7, |
| "learning_rate": 8.38915425679304e-06, |
| "loss": 2.02, |
| "step": 182 |
| }, |
| { |
| "epoch": 1.71, |
| "learning_rate": 7.764570589541875e-06, |
| "loss": 2.0398, |
| "step": 183 |
| }, |
| { |
| "epoch": 1.72, |
| "learning_rate": 7.163206698392744e-06, |
| "loss": 1.8979, |
| "step": 184 |
| }, |
| { |
| "epoch": 1.73, |
| "learning_rate": 6.585213973489335e-06, |
| "loss": 1.9078, |
| "step": 185 |
| }, |
| { |
| "epoch": 1.74, |
| "learning_rate": 6.030737921409169e-06, |
| "loss": 1.9421, |
| "step": 186 |
| }, |
| { |
| "epoch": 1.75, |
| "learning_rate": 5.499918128533155e-06, |
| "loss": 2.0254, |
| "step": 187 |
| }, |
| { |
| "epoch": 1.76, |
| "learning_rate": 4.992888225905468e-06, |
| "loss": 2.0184, |
| "step": 188 |
| }, |
| { |
| "epoch": 1.77, |
| "learning_rate": 4.509775855592613e-06, |
| "loss": 2.2174, |
| "step": 189 |
| }, |
| { |
| "epoch": 1.78, |
| "learning_rate": 4.050702638550275e-06, |
| "loss": 2.0188, |
| "step": 190 |
| }, |
| { |
| "epoch": 1.79, |
| "learning_rate": 3.615784144005796e-06, |
| "loss": 2.0355, |
| "step": 191 |
| }, |
| { |
| "epoch": 1.8, |
| "learning_rate": 3.2051298603643753e-06, |
| "loss": 1.8947, |
| "step": 192 |
| }, |
| { |
| "epoch": 1.81, |
| "learning_rate": 2.818843167645835e-06, |
| "loss": 1.952, |
| "step": 193 |
| }, |
| { |
| "epoch": 1.82, |
| "learning_rate": 2.4570213114592954e-06, |
| "loss": 1.9703, |
| "step": 194 |
| }, |
| { |
| "epoch": 1.83, |
| "learning_rate": 2.119755378522137e-06, |
| "loss": 1.9051, |
| "step": 195 |
| }, |
| { |
| "epoch": 1.83, |
| "learning_rate": 1.8071302737293295e-06, |
| "loss": 1.9829, |
| "step": 196 |
| }, |
| { |
| "epoch": 1.84, |
| "learning_rate": 1.5192246987791981e-06, |
| "loss": 2.1096, |
| "step": 197 |
| }, |
| { |
| "epoch": 1.85, |
| "learning_rate": 1.2561111323605712e-06, |
| "loss": 1.9162, |
| "step": 198 |
| }, |
| { |
| "epoch": 1.86, |
| "learning_rate": 1.0178558119067315e-06, |
| "loss": 1.944, |
| "step": 199 |
| }, |
| { |
| "epoch": 1.87, |
| "learning_rate": 8.04518716920466e-07, |
| "loss": 1.9719, |
| "step": 200 |
| }, |
| { |
| "epoch": 1.88, |
| "learning_rate": 6.161535538745878e-07, |
| "loss": 1.8808, |
| "step": 201 |
| }, |
| { |
| "epoch": 1.89, |
| "learning_rate": 4.5280774269154115e-07, |
| "loss": 1.9213, |
| "step": 202 |
| }, |
| { |
| "epoch": 1.9, |
| "learning_rate": 3.145224048057727e-07, |
| "loss": 1.9598, |
| "step": 203 |
| }, |
| { |
| "epoch": 1.91, |
| "learning_rate": 2.0133235281156736e-07, |
| "loss": 1.9825, |
| "step": 204 |
| }, |
| { |
| "epoch": 1.92, |
| "learning_rate": 1.1326608169920372e-07, |
| "loss": 1.9515, |
| "step": 205 |
| }, |
| { |
| "epoch": 1.93, |
| "learning_rate": 5.0345761681491746e-08, |
| "loss": 1.7554, |
| "step": 206 |
| }, |
| { |
| "epoch": 1.94, |
| "learning_rate": 1.2587232612493172e-08, |
| "loss": 1.8226, |
| "step": 207 |
| }, |
| { |
| "epoch": 1.95, |
| "learning_rate": 0.0, |
| "loss": 1.9169, |
| "step": 208 |
| } |
| ], |
| "logging_steps": 1, |
| "max_steps": 208, |
| "num_input_tokens_seen": 0, |
| "num_train_epochs": 2, |
| "save_steps": 500, |
| "total_flos": 8.124538453921628e+17, |
| "train_batch_size": 2, |
| "trial_name": null, |
| "trial_params": null |
| } |
|
|