File size: 5,011 Bytes
775fb14 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 | {
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 9.878419452887538,
"eval_steps": 500,
"global_step": 13000,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.3799392097264438,
"grad_norm": 0.3757433295249939,
"learning_rate": 1.9240121580547112e-05,
"loss": 0.2367,
"step": 500
},
{
"epoch": 0.7598784194528876,
"grad_norm": 0.4858400225639343,
"learning_rate": 1.8480243161094226e-05,
"loss": 0.1529,
"step": 1000
},
{
"epoch": 1.1398176291793314,
"grad_norm": 0.4924967288970947,
"learning_rate": 1.772036474164134e-05,
"loss": 0.1399,
"step": 1500
},
{
"epoch": 1.5197568389057752,
"grad_norm": 0.4734797775745392,
"learning_rate": 1.6960486322188453e-05,
"loss": 0.1275,
"step": 2000
},
{
"epoch": 1.8996960486322187,
"grad_norm": 0.6022694110870361,
"learning_rate": 1.6200607902735563e-05,
"loss": 0.1247,
"step": 2500
},
{
"epoch": 2.2796352583586628,
"grad_norm": 0.6316580772399902,
"learning_rate": 1.5440729483282677e-05,
"loss": 0.1151,
"step": 3000
},
{
"epoch": 2.6595744680851063,
"grad_norm": 0.37824785709381104,
"learning_rate": 1.4680851063829789e-05,
"loss": 0.1107,
"step": 3500
},
{
"epoch": 3.0395136778115504,
"grad_norm": 0.58962082862854,
"learning_rate": 1.39209726443769e-05,
"loss": 0.1074,
"step": 4000
},
{
"epoch": 3.419452887537994,
"grad_norm": 0.710283637046814,
"learning_rate": 1.3161094224924014e-05,
"loss": 0.0996,
"step": 4500
},
{
"epoch": 3.7993920972644375,
"grad_norm": 0.7615976333618164,
"learning_rate": 1.2401215805471124e-05,
"loss": 0.0987,
"step": 5000
},
{
"epoch": 4.179331306990881,
"grad_norm": 0.5415639877319336,
"learning_rate": 1.1641337386018238e-05,
"loss": 0.0923,
"step": 5500
},
{
"epoch": 4.5592705167173255,
"grad_norm": 0.765820324420929,
"learning_rate": 1.088145896656535e-05,
"loss": 0.0887,
"step": 6000
},
{
"epoch": 4.939209726443769,
"grad_norm": 0.4675578773021698,
"learning_rate": 1.0121580547112462e-05,
"loss": 0.0887,
"step": 6500
},
{
"epoch": 5.319148936170213,
"grad_norm": 0.5862268805503845,
"learning_rate": 9.361702127659576e-06,
"loss": 0.0819,
"step": 7000
},
{
"epoch": 5.699088145896656,
"grad_norm": 0.7189024090766907,
"learning_rate": 8.601823708206687e-06,
"loss": 0.0779,
"step": 7500
},
{
"epoch": 6.079027355623101,
"grad_norm": 0.48072344064712524,
"learning_rate": 7.841945288753801e-06,
"loss": 0.077,
"step": 8000
},
{
"epoch": 6.458966565349544,
"grad_norm": 0.524259090423584,
"learning_rate": 7.082066869300912e-06,
"loss": 0.0726,
"step": 8500
},
{
"epoch": 6.838905775075988,
"grad_norm": 0.659052312374115,
"learning_rate": 6.322188449848025e-06,
"loss": 0.072,
"step": 9000
},
{
"epoch": 7.218844984802431,
"grad_norm": 0.7663692831993103,
"learning_rate": 5.562310030395137e-06,
"loss": 0.0687,
"step": 9500
},
{
"epoch": 7.598784194528875,
"grad_norm": 0.5136072635650635,
"learning_rate": 4.80243161094225e-06,
"loss": 0.0645,
"step": 10000
},
{
"epoch": 7.9787234042553195,
"grad_norm": 0.8392360806465149,
"learning_rate": 4.042553191489362e-06,
"loss": 0.068,
"step": 10500
},
{
"epoch": 8.358662613981762,
"grad_norm": 0.6674140095710754,
"learning_rate": 3.2826747720364744e-06,
"loss": 0.062,
"step": 11000
},
{
"epoch": 8.738601823708207,
"grad_norm": 0.6431790590286255,
"learning_rate": 2.5227963525835868e-06,
"loss": 0.0635,
"step": 11500
},
{
"epoch": 9.118541033434651,
"grad_norm": 0.8986673355102539,
"learning_rate": 1.7629179331306991e-06,
"loss": 0.0604,
"step": 12000
},
{
"epoch": 9.498480243161094,
"grad_norm": 0.5945561528205872,
"learning_rate": 1.0030395136778117e-06,
"loss": 0.0595,
"step": 12500
},
{
"epoch": 9.878419452887538,
"grad_norm": 0.7079329490661621,
"learning_rate": 2.4316109422492405e-07,
"loss": 0.0593,
"step": 13000
}
],
"logging_steps": 500,
"max_steps": 13160,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 500,
"total_flos": 2.755897861208064e+16,
"train_batch_size": 16,
"trial_name": null,
"trial_params": null
}
|