File size: 4,213 Bytes
5eb03eb | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 | {
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0303030303030303,
"eval_steps": 500,
"global_step": 200,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.15151515151515152,
"grad_norm": 1.0779532194137573,
"learning_rate": 9e-05,
"loss": 0.5809,
"step": 10
},
{
"epoch": 0.30303030303030304,
"grad_norm": 1.257666826248169,
"learning_rate": 9.944739353007344e-05,
"loss": 0.2898,
"step": 20
},
{
"epoch": 0.45454545454545453,
"grad_norm": 1.1526721715927124,
"learning_rate": 9.755282581475769e-05,
"loss": 0.1944,
"step": 30
},
{
"epoch": 0.6060606060606061,
"grad_norm": 0.740468442440033,
"learning_rate": 9.43611409721806e-05,
"loss": 0.1489,
"step": 40
},
{
"epoch": 0.7575757575757576,
"grad_norm": 0.5122345089912415,
"learning_rate": 8.995939984474624e-05,
"loss": 0.1241,
"step": 50
},
{
"epoch": 0.9090909090909091,
"grad_norm": 0.3294401168823242,
"learning_rate": 8.44676704559283e-05,
"loss": 0.1162,
"step": 60
},
{
"epoch": 1.0606060606060606,
"grad_norm": 0.3713489770889282,
"learning_rate": 7.803575286758364e-05,
"loss": 0.105,
"step": 70
},
{
"epoch": 1.2121212121212122,
"grad_norm": 0.38999056816101074,
"learning_rate": 7.083909302476453e-05,
"loss": 0.102,
"step": 80
},
{
"epoch": 1.3636363636363638,
"grad_norm": 0.313165545463562,
"learning_rate": 6.307399704769099e-05,
"loss": 0.0917,
"step": 90
},
{
"epoch": 1.5151515151515151,
"grad_norm": 0.2271764725446701,
"learning_rate": 5.495227651252315e-05,
"loss": 0.0896,
"step": 100
},
{
"epoch": 1.6666666666666665,
"grad_norm": 0.2582443356513977,
"learning_rate": 4.669547078371504e-05,
"loss": 0.0906,
"step": 110
},
{
"epoch": 1.8181818181818183,
"grad_norm": 0.28969135880470276,
"learning_rate": 3.852880399766243e-05,
"loss": 0.0898,
"step": 120
},
{
"epoch": 1.9696969696969697,
"grad_norm": 0.21474944055080414,
"learning_rate": 3.0675041535377405e-05,
"loss": 0.0858,
"step": 130
},
{
"epoch": 2.121212121212121,
"grad_norm": 0.17501652240753174,
"learning_rate": 2.3348413563600325e-05,
"loss": 0.0827,
"step": 140
},
{
"epoch": 2.2727272727272725,
"grad_norm": 0.17531634867191315,
"learning_rate": 1.6748771394307585e-05,
"loss": 0.0835,
"step": 150
},
{
"epoch": 2.4242424242424243,
"grad_norm": 0.1865883618593216,
"learning_rate": 1.1056136061894384e-05,
"loss": 0.0795,
"step": 160
},
{
"epoch": 2.5757575757575757,
"grad_norm": 0.23194383084774017,
"learning_rate": 6.425787818636131e-06,
"loss": 0.0871,
"step": 170
},
{
"epoch": 2.7272727272727275,
"grad_norm": 0.15936115384101868,
"learning_rate": 2.9840304941919415e-06,
"loss": 0.0838,
"step": 180
},
{
"epoch": 2.878787878787879,
"grad_norm": 0.18423740565776825,
"learning_rate": 8.247462563808817e-07,
"loss": 0.0791,
"step": 190
},
{
"epoch": 3.0303030303030303,
"grad_norm": 0.13376280665397644,
"learning_rate": 6.834750376549792e-09,
"loss": 0.0796,
"step": 200
}
],
"logging_steps": 10,
"max_steps": 200,
"num_input_tokens_seen": 0,
"num_train_epochs": 4,
"save_steps": 40,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 0.0,
"train_batch_size": 128,
"trial_name": null,
"trial_params": null
}
|