| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.028070175438596492, | |
| "eval_steps": 500, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0002807017543859649, | |
| "grad_norm": 0.7608377933502197, | |
| "learning_rate": 0.0, | |
| "loss": 1.7214, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.0005614035087719298, | |
| "grad_norm": 0.8957514762878418, | |
| "learning_rate": 4e-05, | |
| "loss": 1.9895, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.0008421052631578948, | |
| "grad_norm": 1.089669942855835, | |
| "learning_rate": 8e-05, | |
| "loss": 2.072, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.0011228070175438596, | |
| "grad_norm": 1.0289334058761597, | |
| "learning_rate": 0.00012, | |
| "loss": 1.8603, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.0014035087719298245, | |
| "grad_norm": 0.767257809638977, | |
| "learning_rate": 0.00016, | |
| "loss": 1.4284, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.0016842105263157896, | |
| "grad_norm": 0.5978246331214905, | |
| "learning_rate": 0.0002, | |
| "loss": 1.2488, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.0019649122807017545, | |
| "grad_norm": 0.4895220100879669, | |
| "learning_rate": 0.00019789473684210526, | |
| "loss": 0.8206, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.002245614035087719, | |
| "grad_norm": 0.4384584426879883, | |
| "learning_rate": 0.00019578947368421054, | |
| "loss": 0.7915, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.0025263157894736842, | |
| "grad_norm": 0.44429001212120056, | |
| "learning_rate": 0.0001936842105263158, | |
| "loss": 0.9091, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.002807017543859649, | |
| "grad_norm": 0.3982425332069397, | |
| "learning_rate": 0.00019157894736842104, | |
| "loss": 1.0326, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.003087719298245614, | |
| "grad_norm": 0.386045902967453, | |
| "learning_rate": 0.00018947368421052632, | |
| "loss": 0.8021, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.003368421052631579, | |
| "grad_norm": 0.22769981622695923, | |
| "learning_rate": 0.0001873684210526316, | |
| "loss": 0.7192, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.003649122807017544, | |
| "grad_norm": 0.2643820643424988, | |
| "learning_rate": 0.00018526315789473685, | |
| "loss": 0.8537, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.003929824561403509, | |
| "grad_norm": 0.29013004899024963, | |
| "learning_rate": 0.0001831578947368421, | |
| "loss": 0.7323, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.004210526315789474, | |
| "grad_norm": 0.28618916869163513, | |
| "learning_rate": 0.00018105263157894739, | |
| "loss": 0.8345, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.004491228070175438, | |
| "grad_norm": 0.2927887439727783, | |
| "learning_rate": 0.00017894736842105264, | |
| "loss": 0.6711, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.004771929824561404, | |
| "grad_norm": 0.3290823996067047, | |
| "learning_rate": 0.0001768421052631579, | |
| "loss": 0.9138, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.0050526315789473685, | |
| "grad_norm": 0.4104742109775543, | |
| "learning_rate": 0.00017473684210526317, | |
| "loss": 0.7287, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.005333333333333333, | |
| "grad_norm": 0.3225669860839844, | |
| "learning_rate": 0.00017263157894736842, | |
| "loss": 0.7856, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.005614035087719298, | |
| "grad_norm": 0.2503960430622101, | |
| "learning_rate": 0.0001705263157894737, | |
| "loss": 0.7148, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.005894736842105263, | |
| "grad_norm": 0.3453228771686554, | |
| "learning_rate": 0.00016842105263157895, | |
| "loss": 0.7775, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.006175438596491228, | |
| "grad_norm": 0.26591363549232483, | |
| "learning_rate": 0.00016631578947368423, | |
| "loss": 0.6831, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.006456140350877193, | |
| "grad_norm": 0.41043636202812195, | |
| "learning_rate": 0.00016421052631578948, | |
| "loss": 0.57, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.006736842105263158, | |
| "grad_norm": 0.21183069050312042, | |
| "learning_rate": 0.00016210526315789473, | |
| "loss": 0.7367, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.007017543859649123, | |
| "grad_norm": 0.22407041490077972, | |
| "learning_rate": 0.00016, | |
| "loss": 0.5179, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.007298245614035088, | |
| "grad_norm": 0.2489241659641266, | |
| "learning_rate": 0.00015789473684210527, | |
| "loss": 0.5823, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.007578947368421052, | |
| "grad_norm": 0.2461841106414795, | |
| "learning_rate": 0.00015578947368421052, | |
| "loss": 0.6216, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.007859649122807018, | |
| "grad_norm": 0.22744964063167572, | |
| "learning_rate": 0.0001536842105263158, | |
| "loss": 0.4537, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.008140350877192983, | |
| "grad_norm": 0.27331238985061646, | |
| "learning_rate": 0.00015157894736842108, | |
| "loss": 0.5216, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.008421052631578947, | |
| "grad_norm": 0.21308669447898865, | |
| "learning_rate": 0.00014947368421052633, | |
| "loss": 0.6252, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.008701754385964912, | |
| "grad_norm": 0.26308220624923706, | |
| "learning_rate": 0.00014736842105263158, | |
| "loss": 0.7632, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.008982456140350877, | |
| "grad_norm": 0.23341232538223267, | |
| "learning_rate": 0.00014526315789473686, | |
| "loss": 0.7243, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.009263157894736843, | |
| "grad_norm": 0.328726589679718, | |
| "learning_rate": 0.0001431578947368421, | |
| "loss": 0.7539, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.009543859649122808, | |
| "grad_norm": 0.2569667398929596, | |
| "learning_rate": 0.00014105263157894736, | |
| "loss": 0.7572, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.009824561403508772, | |
| "grad_norm": 0.23884397745132446, | |
| "learning_rate": 0.00013894736842105264, | |
| "loss": 0.6237, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.010105263157894737, | |
| "grad_norm": 0.19555659592151642, | |
| "learning_rate": 0.0001368421052631579, | |
| "loss": 0.6168, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.010385964912280702, | |
| "grad_norm": 0.18646731972694397, | |
| "learning_rate": 0.00013473684210526317, | |
| "loss": 0.5538, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.010666666666666666, | |
| "grad_norm": 0.22244583070278168, | |
| "learning_rate": 0.00013263157894736842, | |
| "loss": 0.695, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.010947368421052631, | |
| "grad_norm": 0.1918996125459671, | |
| "learning_rate": 0.0001305263157894737, | |
| "loss": 0.4743, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.011228070175438596, | |
| "grad_norm": 0.2257566899061203, | |
| "learning_rate": 0.00012842105263157895, | |
| "loss": 0.6041, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.011508771929824562, | |
| "grad_norm": 0.23808154463768005, | |
| "learning_rate": 0.0001263157894736842, | |
| "loss": 0.6772, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.011789473684210527, | |
| "grad_norm": 0.16062161326408386, | |
| "learning_rate": 0.00012421052631578949, | |
| "loss": 0.4744, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.012070175438596491, | |
| "grad_norm": 0.27486950159072876, | |
| "learning_rate": 0.00012210526315789474, | |
| "loss": 0.6789, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.012350877192982456, | |
| "grad_norm": 0.22151979804039001, | |
| "learning_rate": 0.00012, | |
| "loss": 0.5758, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.01263157894736842, | |
| "grad_norm": 0.25173574686050415, | |
| "learning_rate": 0.00011789473684210525, | |
| "loss": 0.5524, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.012912280701754385, | |
| "grad_norm": 0.18759620189666748, | |
| "learning_rate": 0.00011578947368421053, | |
| "loss": 0.5261, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.01319298245614035, | |
| "grad_norm": 0.1941239982843399, | |
| "learning_rate": 0.0001136842105263158, | |
| "loss": 0.7774, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.013473684210526317, | |
| "grad_norm": 0.21030020713806152, | |
| "learning_rate": 0.00011157894736842105, | |
| "loss": 0.61, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.013754385964912281, | |
| "grad_norm": 0.20546339452266693, | |
| "learning_rate": 0.00010947368421052633, | |
| "loss": 0.605, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.014035087719298246, | |
| "grad_norm": 0.1955367475748062, | |
| "learning_rate": 0.00010736842105263158, | |
| "loss": 0.4951, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.01431578947368421, | |
| "grad_norm": 0.2012670934200287, | |
| "learning_rate": 0.00010526315789473685, | |
| "loss": 0.5174, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.014596491228070175, | |
| "grad_norm": 0.21794946491718292, | |
| "learning_rate": 0.00010315789473684211, | |
| "loss": 0.6387, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.01487719298245614, | |
| "grad_norm": 0.1851382851600647, | |
| "learning_rate": 0.00010105263157894738, | |
| "loss": 0.4906, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.015157894736842105, | |
| "grad_norm": 0.19877484440803528, | |
| "learning_rate": 9.894736842105263e-05, | |
| "loss": 0.5981, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.015438596491228071, | |
| "grad_norm": 0.28672927618026733, | |
| "learning_rate": 9.68421052631579e-05, | |
| "loss": 0.7185, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.015719298245614036, | |
| "grad_norm": 0.24410443007946014, | |
| "learning_rate": 9.473684210526316e-05, | |
| "loss": 0.4882, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.016, | |
| "grad_norm": 0.2676307260990143, | |
| "learning_rate": 9.263157894736843e-05, | |
| "loss": 0.628, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.016280701754385965, | |
| "grad_norm": 0.2523461878299713, | |
| "learning_rate": 9.052631578947369e-05, | |
| "loss": 0.478, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.01656140350877193, | |
| "grad_norm": 0.2071600705385208, | |
| "learning_rate": 8.842105263157894e-05, | |
| "loss": 0.5096, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.016842105263157894, | |
| "grad_norm": 0.20321086049079895, | |
| "learning_rate": 8.631578947368421e-05, | |
| "loss": 0.5864, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.01712280701754386, | |
| "grad_norm": 0.20558391511440277, | |
| "learning_rate": 8.421052631578948e-05, | |
| "loss": 0.6181, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.017403508771929824, | |
| "grad_norm": 0.22859832644462585, | |
| "learning_rate": 8.210526315789474e-05, | |
| "loss": 0.8814, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.01768421052631579, | |
| "grad_norm": 0.20681747794151306, | |
| "learning_rate": 8e-05, | |
| "loss": 0.7479, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.017964912280701753, | |
| "grad_norm": 0.26385411620140076, | |
| "learning_rate": 7.789473684210526e-05, | |
| "loss": 0.5815, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.018245614035087718, | |
| "grad_norm": 0.21943461894989014, | |
| "learning_rate": 7.578947368421054e-05, | |
| "loss": 0.657, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.018526315789473686, | |
| "grad_norm": 0.24849221110343933, | |
| "learning_rate": 7.368421052631579e-05, | |
| "loss": 0.5341, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.01880701754385965, | |
| "grad_norm": 0.2595747411251068, | |
| "learning_rate": 7.157894736842105e-05, | |
| "loss": 0.5888, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.019087719298245615, | |
| "grad_norm": 0.23266763985157013, | |
| "learning_rate": 6.947368421052632e-05, | |
| "loss": 0.4899, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.01936842105263158, | |
| "grad_norm": 0.2767358720302582, | |
| "learning_rate": 6.736842105263159e-05, | |
| "loss": 0.6119, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.019649122807017545, | |
| "grad_norm": 0.23197737336158752, | |
| "learning_rate": 6.526315789473685e-05, | |
| "loss": 0.6986, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.01992982456140351, | |
| "grad_norm": 0.218978613615036, | |
| "learning_rate": 6.31578947368421e-05, | |
| "loss": 0.582, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.020210526315789474, | |
| "grad_norm": 0.2475346177816391, | |
| "learning_rate": 6.105263157894737e-05, | |
| "loss": 0.615, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.02049122807017544, | |
| "grad_norm": 0.21986079216003418, | |
| "learning_rate": 5.894736842105263e-05, | |
| "loss": 0.6171, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.020771929824561403, | |
| "grad_norm": 0.2430456131696701, | |
| "learning_rate": 5.68421052631579e-05, | |
| "loss": 0.6572, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.021052631578947368, | |
| "grad_norm": 0.20442380011081696, | |
| "learning_rate": 5.4736842105263165e-05, | |
| "loss": 0.5898, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.021333333333333333, | |
| "grad_norm": 0.2875303328037262, | |
| "learning_rate": 5.2631578947368424e-05, | |
| "loss": 0.7135, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.021614035087719297, | |
| "grad_norm": 0.26638996601104736, | |
| "learning_rate": 5.052631578947369e-05, | |
| "loss": 0.564, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.021894736842105262, | |
| "grad_norm": 0.23086141049861908, | |
| "learning_rate": 4.842105263157895e-05, | |
| "loss": 0.5978, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 0.022175438596491227, | |
| "grad_norm": 0.24883733689785004, | |
| "learning_rate": 4.6315789473684214e-05, | |
| "loss": 0.7299, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 0.02245614035087719, | |
| "grad_norm": 0.1697564423084259, | |
| "learning_rate": 4.421052631578947e-05, | |
| "loss": 0.4933, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.02273684210526316, | |
| "grad_norm": 0.20351547002792358, | |
| "learning_rate": 4.210526315789474e-05, | |
| "loss": 0.5148, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 0.023017543859649124, | |
| "grad_norm": 0.1920584887266159, | |
| "learning_rate": 4e-05, | |
| "loss": 0.4831, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 0.02329824561403509, | |
| "grad_norm": 0.1843676120042801, | |
| "learning_rate": 3.789473684210527e-05, | |
| "loss": 0.5108, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 0.023578947368421053, | |
| "grad_norm": 0.2634964883327484, | |
| "learning_rate": 3.578947368421053e-05, | |
| "loss": 0.6499, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 0.023859649122807018, | |
| "grad_norm": 0.19457034766674042, | |
| "learning_rate": 3.368421052631579e-05, | |
| "loss": 0.4723, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 0.024140350877192983, | |
| "grad_norm": 0.2747279405593872, | |
| "learning_rate": 3.157894736842105e-05, | |
| "loss": 0.5834, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 0.024421052631578948, | |
| "grad_norm": 0.24927914142608643, | |
| "learning_rate": 2.9473684210526314e-05, | |
| "loss": 0.7072, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 0.024701754385964912, | |
| "grad_norm": 0.1812397688627243, | |
| "learning_rate": 2.7368421052631583e-05, | |
| "loss": 0.4519, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 0.024982456140350877, | |
| "grad_norm": 0.1891927719116211, | |
| "learning_rate": 2.5263157894736845e-05, | |
| "loss": 0.4875, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 0.02526315789473684, | |
| "grad_norm": 0.1999116688966751, | |
| "learning_rate": 2.3157894736842107e-05, | |
| "loss": 0.6227, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.025543859649122806, | |
| "grad_norm": 0.1899036467075348, | |
| "learning_rate": 2.105263157894737e-05, | |
| "loss": 0.6303, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 0.02582456140350877, | |
| "grad_norm": 0.20942147076129913, | |
| "learning_rate": 1.8947368421052634e-05, | |
| "loss": 0.6452, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 0.026105263157894736, | |
| "grad_norm": 0.19382309913635254, | |
| "learning_rate": 1.6842105263157896e-05, | |
| "loss": 0.5471, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 0.0263859649122807, | |
| "grad_norm": 0.24067223072052002, | |
| "learning_rate": 1.4736842105263157e-05, | |
| "loss": 0.6691, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 0.02666666666666667, | |
| "grad_norm": 0.15286678075790405, | |
| "learning_rate": 1.2631578947368422e-05, | |
| "loss": 0.4751, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 0.026947368421052633, | |
| "grad_norm": 0.3723171055316925, | |
| "learning_rate": 1.0526315789473684e-05, | |
| "loss": 0.554, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 0.027228070175438598, | |
| "grad_norm": 0.22701109945774078, | |
| "learning_rate": 8.421052631578948e-06, | |
| "loss": 0.6832, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 0.027508771929824562, | |
| "grad_norm": 0.2766507565975189, | |
| "learning_rate": 6.315789473684211e-06, | |
| "loss": 0.6373, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 0.027789473684210527, | |
| "grad_norm": 0.24072766304016113, | |
| "learning_rate": 4.210526315789474e-06, | |
| "loss": 0.4333, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 0.028070175438596492, | |
| "grad_norm": 0.2390160858631134, | |
| "learning_rate": 2.105263157894737e-06, | |
| "loss": 0.6499, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 1, | |
| "max_steps": 100, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 7364055070973952.0, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |