File size: 3,861 Bytes
5e1e986 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
{
"best_global_step": null,
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 3.0,
"eval_steps": 500,
"global_step": 375,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.16,
"grad_norm": 1.7216755151748657,
"learning_rate": 5e-05,
"loss": 2.4397,
"step": 20
},
{
"epoch": 0.32,
"grad_norm": 1.6662434339523315,
"learning_rate": 9.999782741484788e-05,
"loss": 2.2225,
"step": 40
},
{
"epoch": 0.48,
"grad_norm": 1.1107380390167236,
"learning_rate": 9.904493906342123e-05,
"loss": 1.9769,
"step": 60
},
{
"epoch": 0.64,
"grad_norm": 1.3425688743591309,
"learning_rate": 9.639210244594334e-05,
"loss": 1.8957,
"step": 80
},
{
"epoch": 0.8,
"grad_norm": 1.6660774946212769,
"learning_rate": 9.213126762075088e-05,
"loss": 1.9327,
"step": 100
},
{
"epoch": 0.96,
"grad_norm": 1.0810869932174683,
"learning_rate": 8.641011952560371e-05,
"loss": 1.902,
"step": 120
},
{
"epoch": 1.12,
"grad_norm": 1.5443850755691528,
"learning_rate": 7.94269590651393e-05,
"loss": 1.8109,
"step": 140
},
{
"epoch": 1.28,
"grad_norm": 1.0120549201965332,
"learning_rate": 7.142382979661386e-05,
"loss": 1.7861,
"step": 160
},
{
"epoch": 1.44,
"grad_norm": 1.5919784307479858,
"learning_rate": 6.26781284501043e-05,
"loss": 1.8054,
"step": 180
},
{
"epoch": 1.6,
"grad_norm": 1.353637933731079,
"learning_rate": 5.3492990071209806e-05,
"loss": 1.7685,
"step": 200
},
{
"epoch": 1.76,
"grad_norm": 1.2411092519760132,
"learning_rate": 4.418678104714214e-05,
"loss": 1.8407,
"step": 220
},
{
"epoch": 1.92,
"grad_norm": 1.9999362230300903,
"learning_rate": 3.5082064198777e-05,
"loss": 1.7761,
"step": 240
},
{
"epoch": 2.08,
"grad_norm": 1.3810490369796753,
"learning_rate": 2.6494418419978482e-05,
"loss": 1.7177,
"step": 260
},
{
"epoch": 2.24,
"grad_norm": 1.3220276832580566,
"learning_rate": 1.872150038705015e-05,
"loss": 1.6397,
"step": 280
},
{
"epoch": 2.4,
"grad_norm": 1.6852960586547852,
"learning_rate": 1.203272747076598e-05,
"loss": 1.7622,
"step": 300
},
{
"epoch": 2.56,
"grad_norm": 1.4293591976165771,
"learning_rate": 6.659939451910341e-06,
"loss": 1.6719,
"step": 320
},
{
"epoch": 2.7199999999999998,
"grad_norm": 1.658665657043457,
"learning_rate": 2.7893627149161716e-06,
"loss": 1.6602,
"step": 340
},
{
"epoch": 2.88,
"grad_norm": 1.8376392126083374,
"learning_rate": 5.551554489528432e-07,
"loss": 1.7036,
"step": 360
},
{
"epoch": 3.0,
"step": 375,
"total_flos": 1382710662660096.0,
"train_loss": 1.8455538279215495,
"train_runtime": 977.2087,
"train_samples_per_second": 3.07,
"train_steps_per_second": 0.384
}
],
"logging_steps": 20,
"max_steps": 375,
"num_input_tokens_seen": 0,
"num_train_epochs": 3,
"save_steps": 500,
"stateful_callbacks": {
"TrainerControl": {
"args": {
"should_epoch_stop": false,
"should_evaluate": false,
"should_log": false,
"should_save": true,
"should_training_stop": true
},
"attributes": {}
}
},
"total_flos": 1382710662660096.0,
"train_batch_size": 1,
"trial_name": null,
"trial_params": null
}
|