File size: 5,119 Bytes
287dc56 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 | {
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 0.9995270376793316,
"eval_steps": 500,
"global_step": 12680,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.03941352672237112,
"grad_norm": 9.434146881103516,
"learning_rate": 1.9840000000000003e-05,
"loss": 3.5972,
"step": 500
},
{
"epoch": 0.07882705344474224,
"grad_norm": 8.484919548034668,
"learning_rate": 1.973587517972203e-05,
"loss": 3.0731,
"step": 1000
},
{
"epoch": 0.11824058016711335,
"grad_norm": 8.578750610351562,
"learning_rate": 1.9469620320570853e-05,
"loss": 2.9624,
"step": 1500
},
{
"epoch": 0.15765410688948447,
"grad_norm": 6.2477827072143555,
"learning_rate": 1.920336546141967e-05,
"loss": 2.8351,
"step": 2000
},
{
"epoch": 0.19706763361185559,
"grad_norm": 6.6669602394104,
"learning_rate": 1.8937110602268492e-05,
"loss": 2.817,
"step": 2500
},
{
"epoch": 0.2364811603342267,
"grad_norm": 4.857083797454834,
"learning_rate": 1.8670855743117314e-05,
"loss": 2.7781,
"step": 3000
},
{
"epoch": 0.2758946870565978,
"grad_norm": 5.961228847503662,
"learning_rate": 1.8404600883966135e-05,
"loss": 2.7681,
"step": 3500
},
{
"epoch": 0.31530821377896895,
"grad_norm": 3.896094560623169,
"learning_rate": 1.8138346024814953e-05,
"loss": 2.6833,
"step": 4000
},
{
"epoch": 0.35472174050134003,
"grad_norm": 4.19826078414917,
"learning_rate": 1.7872091165663774e-05,
"loss": 2.7317,
"step": 4500
},
{
"epoch": 0.39413526722371117,
"grad_norm": 4.363683223724365,
"learning_rate": 1.7605836306512596e-05,
"loss": 2.6456,
"step": 5000
},
{
"epoch": 0.4335487939460823,
"grad_norm": 4.608914852142334,
"learning_rate": 1.7339581447361414e-05,
"loss": 2.6575,
"step": 5500
},
{
"epoch": 0.4729623206684534,
"grad_norm": 5.031886577606201,
"learning_rate": 1.707332658821024e-05,
"loss": 2.6423,
"step": 6000
},
{
"epoch": 0.5123758473908245,
"grad_norm": 4.503154277801514,
"learning_rate": 1.6807071729059057e-05,
"loss": 2.6062,
"step": 6500
},
{
"epoch": 0.5517893741131956,
"grad_norm": 3.5765438079833984,
"learning_rate": 1.654134937962618e-05,
"loss": 2.6487,
"step": 7000
},
{
"epoch": 0.5912029008355668,
"grad_norm": 3.3804619312286377,
"learning_rate": 1.6275094520475e-05,
"loss": 2.6378,
"step": 7500
},
{
"epoch": 0.6306164275579379,
"grad_norm": 4.173604965209961,
"learning_rate": 1.600883966132382e-05,
"loss": 2.5821,
"step": 8000
},
{
"epoch": 0.670029954280309,
"grad_norm": 3.0175459384918213,
"learning_rate": 1.5743117311890943e-05,
"loss": 2.5756,
"step": 8500
},
{
"epoch": 0.7094434810026801,
"grad_norm": 4.371723651885986,
"learning_rate": 1.5476862452739764e-05,
"loss": 2.5365,
"step": 9000
},
{
"epoch": 0.7488570077250513,
"grad_norm": 3.797525644302368,
"learning_rate": 1.5210607593588584e-05,
"loss": 2.5734,
"step": 9500
},
{
"epoch": 0.7882705344474223,
"grad_norm": 2.7907519340515137,
"learning_rate": 1.4944352734437404e-05,
"loss": 2.5801,
"step": 10000
},
{
"epoch": 0.8276840611697934,
"grad_norm": 3.6856374740600586,
"learning_rate": 1.4678097875286225e-05,
"loss": 2.5198,
"step": 10500
},
{
"epoch": 0.8670975878921646,
"grad_norm": 3.692336320877075,
"learning_rate": 1.4411843016135045e-05,
"loss": 2.5294,
"step": 11000
},
{
"epoch": 0.9065111146145357,
"grad_norm": 3.4679853916168213,
"learning_rate": 1.4145588156983866e-05,
"loss": 2.5818,
"step": 11500
},
{
"epoch": 0.9459246413369068,
"grad_norm": 3.318463087081909,
"learning_rate": 1.3879333297832686e-05,
"loss": 2.5145,
"step": 12000
},
{
"epoch": 0.985338168059278,
"grad_norm": 3.9721946716308594,
"learning_rate": 1.361361094839981e-05,
"loss": 2.5793,
"step": 12500
}
],
"logging_steps": 500,
"max_steps": 38058,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 1268,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": false
},
"attributes": {}
}
},
"total_flos": 3313182965760000.0,
"train_batch_size": 4,
"trial_name": null,
"trial_params": null
}
|