| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.2749003984063745, | |
| "eval_steps": 50, | |
| "global_step": 100, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 6.666666666666667e-06, | |
| "loss": 1.1496, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 1.3333333333333333e-05, | |
| "loss": 1.1986, | |
| "step": 2 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2e-05, | |
| "loss": 1.1561, | |
| "step": 3 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 1.9999075218579184e-05, | |
| "loss": 0.9038, | |
| "step": 4 | |
| }, | |
| { | |
| "epoch": 0.06, | |
| "learning_rate": 1.9996301045360874e-05, | |
| "loss": 0.899, | |
| "step": 5 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 1.9991677993445832e-05, | |
| "loss": 0.8235, | |
| "step": 6 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 1.9985206917896563e-05, | |
| "loss": 0.8104, | |
| "step": 7 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 1.9976889015579167e-05, | |
| "loss": 0.7751, | |
| "step": 8 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 1.9966725824941933e-05, | |
| "loss": 0.7499, | |
| "step": 9 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 1.9954719225730847e-05, | |
| "loss": 0.7429, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 1.994087143864188e-05, | |
| "loss": 0.7482, | |
| "step": 11 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 1.992518502491028e-05, | |
| "loss": 0.7385, | |
| "step": 12 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 1.9907662885836836e-05, | |
| "loss": 0.6971, | |
| "step": 13 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 1.9888308262251286e-05, | |
| "loss": 0.705, | |
| "step": 14 | |
| }, | |
| { | |
| "epoch": 0.19, | |
| "learning_rate": 1.986712473391289e-05, | |
| "loss": 0.6891, | |
| "step": 15 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 1.9844116218848335e-05, | |
| "loss": 0.7004, | |
| "step": 16 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 1.9819286972627066e-05, | |
| "loss": 0.6712, | |
| "step": 17 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 1.9792641587574212e-05, | |
| "loss": 0.6944, | |
| "step": 18 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 1.9764184991921178e-05, | |
| "loss": 0.6836, | |
| "step": 19 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 1.973392244889415e-05, | |
| "loss": 0.6736, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 1.9701859555740647e-05, | |
| "loss": 0.6823, | |
| "step": 21 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 1.966800224269424e-05, | |
| "loss": 0.6733, | |
| "step": 22 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 1.9632356771877735e-05, | |
| "loss": 0.6527, | |
| "step": 23 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 1.9594929736144978e-05, | |
| "loss": 0.664, | |
| "step": 24 | |
| }, | |
| { | |
| "epoch": 0.32, | |
| "learning_rate": 1.955572805786141e-05, | |
| "loss": 0.6484, | |
| "step": 25 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 1.9514758987623784e-05, | |
| "loss": 0.6465, | |
| "step": 26 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 1.9472030102919102e-05, | |
| "loss": 0.6533, | |
| "step": 27 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 1.94275493067231e-05, | |
| "loss": 0.6473, | |
| "step": 28 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 1.938132482603856e-05, | |
| "loss": 0.6254, | |
| "step": 29 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 1.9333365210373668e-05, | |
| "loss": 0.6211, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 1.9283679330160726e-05, | |
| "loss": 0.6017, | |
| "step": 31 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 1.9232276375115517e-05, | |
| "loss": 0.6232, | |
| "step": 32 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 1.9179165852537596e-05, | |
| "loss": 0.6031, | |
| "step": 33 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 1.9124357585551872e-05, | |
| "loss": 0.6083, | |
| "step": 34 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 1.9067861711291744e-05, | |
| "loss": 0.6143, | |
| "step": 35 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 1.900968867902419e-05, | |
| "loss": 0.596, | |
| "step": 36 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 1.89498492482171e-05, | |
| "loss": 0.5904, | |
| "step": 37 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 1.8888354486549238e-05, | |
| "loss": 0.5805, | |
| "step": 38 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 1.8825215767863215e-05, | |
| "loss": 0.5741, | |
| "step": 39 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 1.876044477006183e-05, | |
| "loss": 0.5856, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 1.8694053472948154e-05, | |
| "loss": 0.5808, | |
| "step": 41 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 1.8626054156009807e-05, | |
| "loss": 0.5673, | |
| "step": 42 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 1.8556459396147777e-05, | |
| "loss": 0.5792, | |
| "step": 43 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 1.8485282065350237e-05, | |
| "loss": 0.5631, | |
| "step": 44 | |
| }, | |
| { | |
| "epoch": 0.57, | |
| "learning_rate": 1.8412535328311813e-05, | |
| "loss": 0.5808, | |
| "step": 45 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 1.8338232639998672e-05, | |
| "loss": 0.5732, | |
| "step": 46 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 1.826238774315995e-05, | |
| "loss": 0.5631, | |
| "step": 47 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 1.8185014665785936e-05, | |
| "loss": 0.5682, | |
| "step": 48 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 1.810612771851352e-05, | |
| "loss": 0.569, | |
| "step": 49 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 1.8025741491979326e-05, | |
| "loss": 0.5659, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "eval_loss": 0.5658291578292847, | |
| "eval_runtime": 19.2393, | |
| "eval_samples_per_second": 42.205, | |
| "eval_steps_per_second": 0.676, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 1.7943870854121126e-05, | |
| "loss": 0.5567, | |
| "step": 51 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 1.7860530947427878e-05, | |
| "loss": 0.5498, | |
| "step": 52 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 1.777573718613904e-05, | |
| "loss": 0.557, | |
| "step": 53 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 1.768950525339362e-05, | |
| "loss": 0.5646, | |
| "step": 54 | |
| }, | |
| { | |
| "epoch": 0.7, | |
| "learning_rate": 1.7601851098329484e-05, | |
| "loss": 0.55, | |
| "step": 55 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 1.7512790933133435e-05, | |
| "loss": 0.5467, | |
| "step": 56 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 1.74223412300427e-05, | |
| "loss": 0.5596, | |
| "step": 57 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 1.7330518718298263e-05, | |
| "loss": 0.552, | |
| "step": 58 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 1.72373403810507e-05, | |
| "loss": 0.541, | |
| "step": 59 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 1.7142823452219036e-05, | |
| "loss": 0.5476, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 1.7046985413303215e-05, | |
| "loss": 0.5569, | |
| "step": 61 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 1.6949843990150798e-05, | |
| "loss": 0.5527, | |
| "step": 62 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 1.6851417149678442e-05, | |
| "loss": 0.5657, | |
| "step": 63 | |
| }, | |
| { | |
| "epoch": 0.82, | |
| "learning_rate": 1.6751723096548834e-05, | |
| "loss": 0.5468, | |
| "step": 64 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 1.6650780269803587e-05, | |
| "loss": 0.5526, | |
| "step": 65 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 1.6548607339452853e-05, | |
| "loss": 0.5525, | |
| "step": 66 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 1.644522320302217e-05, | |
| "loss": 0.5489, | |
| "step": 67 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 1.634064698205725e-05, | |
| "loss": 0.5491, | |
| "step": 68 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 1.6234898018587336e-05, | |
| "loss": 0.5502, | |
| "step": 69 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 1.612799587154777e-05, | |
| "loss": 0.545, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 1.6019960313162436e-05, | |
| "loss": 0.5307, | |
| "step": 71 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 1.5910811325286768e-05, | |
| "loss": 0.5437, | |
| "step": 72 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 1.5800569095711983e-05, | |
| "loss": 0.5483, | |
| "step": 73 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 1.5689254014431225e-05, | |
| "loss": 0.5466, | |
| "step": 74 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 1.5576886669868297e-05, | |
| "loss": 0.534, | |
| "step": 75 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 1.5463487845069708e-05, | |
| "loss": 0.5488, | |
| "step": 76 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.5349078513860728e-05, | |
| "loss": 0.5506, | |
| "step": 77 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 1.5233679836966122e-05, | |
| "loss": 0.5438, | |
| "step": 78 | |
| }, | |
| { | |
| "epoch": 1.01, | |
| "learning_rate": 1.5117313158096371e-05, | |
| "loss": 0.4824, | |
| "step": 79 | |
| }, | |
| { | |
| "epoch": 1.02, | |
| "learning_rate": 1.5000000000000002e-05, | |
| "loss": 0.4288, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 1.03, | |
| "learning_rate": 1.4881762060482814e-05, | |
| "loss": 0.4267, | |
| "step": 81 | |
| }, | |
| { | |
| "epoch": 1.05, | |
| "learning_rate": 1.476262120839475e-05, | |
| "loss": 0.4343, | |
| "step": 82 | |
| }, | |
| { | |
| "epoch": 1.06, | |
| "learning_rate": 1.4642599479585106e-05, | |
| "loss": 0.449, | |
| "step": 83 | |
| }, | |
| { | |
| "epoch": 1.07, | |
| "learning_rate": 1.4521719072826858e-05, | |
| "loss": 0.4231, | |
| "step": 84 | |
| }, | |
| { | |
| "epoch": 1.08, | |
| "learning_rate": 1.4400002345710871e-05, | |
| "loss": 0.4136, | |
| "step": 85 | |
| }, | |
| { | |
| "epoch": 1.1, | |
| "learning_rate": 1.427747181051071e-05, | |
| "loss": 0.4262, | |
| "step": 86 | |
| }, | |
| { | |
| "epoch": 1.11, | |
| "learning_rate": 1.4154150130018867e-05, | |
| "loss": 0.4185, | |
| "step": 87 | |
| }, | |
| { | |
| "epoch": 1.12, | |
| "learning_rate": 1.4030060113355118e-05, | |
| "loss": 0.4338, | |
| "step": 88 | |
| }, | |
| { | |
| "epoch": 1.13, | |
| "learning_rate": 1.3905224711747844e-05, | |
| "loss": 0.4227, | |
| "step": 89 | |
| }, | |
| { | |
| "epoch": 1.15, | |
| "learning_rate": 1.3779667014289067e-05, | |
| "loss": 0.4179, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 1.16, | |
| "learning_rate": 1.3653410243663953e-05, | |
| "loss": 0.415, | |
| "step": 91 | |
| }, | |
| { | |
| "epoch": 1.17, | |
| "learning_rate": 1.3526477751855645e-05, | |
| "loss": 0.4049, | |
| "step": 92 | |
| }, | |
| { | |
| "epoch": 1.19, | |
| "learning_rate": 1.3398893015826166e-05, | |
| "loss": 0.4007, | |
| "step": 93 | |
| }, | |
| { | |
| "epoch": 1.2, | |
| "learning_rate": 1.3270679633174219e-05, | |
| "loss": 0.4147, | |
| "step": 94 | |
| }, | |
| { | |
| "epoch": 1.21, | |
| "learning_rate": 1.3141861317770628e-05, | |
| "loss": 0.4081, | |
| "step": 95 | |
| }, | |
| { | |
| "epoch": 1.22, | |
| "learning_rate": 1.3012461895372343e-05, | |
| "loss": 0.4071, | |
| "step": 96 | |
| }, | |
| { | |
| "epoch": 1.24, | |
| "learning_rate": 1.2882505299215711e-05, | |
| "loss": 0.3992, | |
| "step": 97 | |
| }, | |
| { | |
| "epoch": 1.25, | |
| "learning_rate": 1.2752015565589852e-05, | |
| "loss": 0.4061, | |
| "step": 98 | |
| }, | |
| { | |
| "epoch": 1.26, | |
| "learning_rate": 1.2621016829391022e-05, | |
| "loss": 0.4243, | |
| "step": 99 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "learning_rate": 1.2489533319658703e-05, | |
| "loss": 0.4143, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 1.27, | |
| "eval_loss": 0.5516753792762756, | |
| "eval_runtime": 19.1839, | |
| "eval_samples_per_second": 42.327, | |
| "eval_steps_per_second": 0.678, | |
| "step": 100 | |
| } | |
| ], | |
| "logging_steps": 1.0, | |
| "max_steps": 234, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 50, | |
| "total_flos": 212668549038080.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |