File size: 4,709 Bytes
1025751 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 | {
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 10.0,
"eval_steps": 500,
"global_step": 60,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.17,
"grad_norm": 10.550161397700021,
"learning_rate": 3.3333333333333333e-06,
"loss": 1.8491,
"step": 1
},
{
"epoch": 0.83,
"grad_norm": 8.369449500494765,
"learning_rate": 1.6666666666666667e-05,
"loss": 1.8197,
"step": 5
},
{
"epoch": 1.0,
"eval_loss": 2.1416637897491455,
"eval_runtime": 2.3253,
"eval_samples_per_second": 5.161,
"eval_steps_per_second": 0.43,
"step": 6
},
{
"epoch": 1.67,
"grad_norm": 6.008460274326708,
"learning_rate": 1.973044870579824e-05,
"loss": 1.3836,
"step": 10
},
{
"epoch": 2.0,
"eval_loss": 1.7507458925247192,
"eval_runtime": 2.1866,
"eval_samples_per_second": 5.488,
"eval_steps_per_second": 0.457,
"step": 12
},
{
"epoch": 2.5,
"grad_norm": 7.347536654652698,
"learning_rate": 1.866025403784439e-05,
"loss": 0.9111,
"step": 15
},
{
"epoch": 3.0,
"eval_loss": 2.1440508365631104,
"eval_runtime": 2.1965,
"eval_samples_per_second": 5.463,
"eval_steps_per_second": 0.455,
"step": 18
},
{
"epoch": 3.33,
"grad_norm": 8.924017881530585,
"learning_rate": 1.686241637868734e-05,
"loss": 0.4865,
"step": 20
},
{
"epoch": 4.0,
"eval_loss": 2.5978963375091553,
"eval_runtime": 2.1946,
"eval_samples_per_second": 5.468,
"eval_steps_per_second": 0.456,
"step": 24
},
{
"epoch": 4.17,
"grad_norm": 3.8795868824604973,
"learning_rate": 1.4487991802004625e-05,
"loss": 0.2128,
"step": 25
},
{
"epoch": 5.0,
"grad_norm": 2.3312651987420017,
"learning_rate": 1.1736481776669307e-05,
"loss": 0.0827,
"step": 30
},
{
"epoch": 5.0,
"eval_loss": 2.817784547805786,
"eval_runtime": 2.1833,
"eval_samples_per_second": 5.496,
"eval_steps_per_second": 0.458,
"step": 30
},
{
"epoch": 5.83,
"grad_norm": 1.668395846222473,
"learning_rate": 8.839070858747697e-06,
"loss": 0.0365,
"step": 35
},
{
"epoch": 6.0,
"eval_loss": 3.2582452297210693,
"eval_runtime": 2.1864,
"eval_samples_per_second": 5.489,
"eval_steps_per_second": 0.457,
"step": 36
},
{
"epoch": 6.67,
"grad_norm": 1.082911988071084,
"learning_rate": 6.039202339608432e-06,
"loss": 0.019,
"step": 40
},
{
"epoch": 7.0,
"eval_loss": 3.6268527507781982,
"eval_runtime": 2.176,
"eval_samples_per_second": 5.515,
"eval_steps_per_second": 0.46,
"step": 42
},
{
"epoch": 7.5,
"grad_norm": 0.6033031953021625,
"learning_rate": 3.5721239031346067e-06,
"loss": 0.0127,
"step": 45
},
{
"epoch": 8.0,
"eval_loss": 3.825711250305176,
"eval_runtime": 2.1746,
"eval_samples_per_second": 5.518,
"eval_steps_per_second": 0.46,
"step": 48
},
{
"epoch": 8.33,
"grad_norm": 0.39765335521647993,
"learning_rate": 1.6451218858706374e-06,
"loss": 0.0092,
"step": 50
},
{
"epoch": 9.0,
"eval_loss": 3.902661085128784,
"eval_runtime": 2.1697,
"eval_samples_per_second": 5.531,
"eval_steps_per_second": 0.461,
"step": 54
},
{
"epoch": 9.17,
"grad_norm": 0.42322353991647504,
"learning_rate": 4.2010487684511105e-07,
"loss": 0.0077,
"step": 55
},
{
"epoch": 10.0,
"grad_norm": 0.39922154152767036,
"learning_rate": 0.0,
"loss": 0.0071,
"step": 60
},
{
"epoch": 10.0,
"eval_loss": 3.917660713195801,
"eval_runtime": 2.1754,
"eval_samples_per_second": 5.516,
"eval_steps_per_second": 0.46,
"step": 60
},
{
"epoch": 10.0,
"step": 60,
"total_flos": 12562779340800.0,
"train_loss": 0.41619996732721726,
"train_runtime": 395.7925,
"train_samples_per_second": 18.09,
"train_steps_per_second": 0.152
}
],
"logging_steps": 5,
"max_steps": 60,
"num_input_tokens_seen": 0,
"num_train_epochs": 10,
"save_steps": 1000000000,
"total_flos": 12562779340800.0,
"train_batch_size": 8,
"trial_name": null,
"trial_params": null
}
|