Upload 8 files
Browse files- README.md +8 -8
- all_results.json +9 -9
- eval_results.json +5 -5
- model.safetensors +1 -1
- train_results.json +4 -4
- trainer_state.json +838 -271
- training_args.bin +1 -1
README.md
CHANGED
|
@@ -4,19 +4,19 @@ base_model: agentlans/multilingual-e5-small-aligned
|
|
| 4 |
tags:
|
| 5 |
- generated_from_trainer
|
| 6 |
model-index:
|
| 7 |
-
- name: multilingual-e5-small-aligned-readability
|
| 8 |
results: []
|
| 9 |
---
|
| 10 |
|
| 11 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 12 |
should probably proofread and complete it, then remove this comment. -->
|
| 13 |
|
| 14 |
-
# multilingual-e5-small-aligned-readability
|
| 15 |
|
| 16 |
This model is a fine-tuned version of [agentlans/multilingual-e5-small-aligned](https://huggingface.co/agentlans/multilingual-e5-small-aligned) on an unknown dataset.
|
| 17 |
It achieves the following results on the evaluation set:
|
| 18 |
-
- Loss:
|
| 19 |
-
- Mse:
|
| 20 |
|
| 21 |
## Model description
|
| 22 |
|
|
@@ -36,7 +36,7 @@ More information needed
|
|
| 36 |
|
| 37 |
The following hyperparameters were used during training:
|
| 38 |
- learning_rate: 5e-05
|
| 39 |
-
- train_batch_size:
|
| 40 |
- eval_batch_size: 8
|
| 41 |
- seed: 42
|
| 42 |
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
|
@@ -47,9 +47,9 @@ The following hyperparameters were used during training:
|
|
| 47 |
|
| 48 |
| Training Loss | Epoch | Step | Validation Loss | Mse |
|
| 49 |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
|
| 50 |
-
|
|
| 51 |
-
|
|
| 52 |
-
|
|
| 53 |
|
| 54 |
|
| 55 |
### Framework versions
|
|
|
|
| 4 |
tags:
|
| 5 |
- generated_from_trainer
|
| 6 |
model-index:
|
| 7 |
+
- name: multilingual-e5-small-aligned-transformed-readability
|
| 8 |
results: []
|
| 9 |
---
|
| 10 |
|
| 11 |
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
| 12 |
should probably proofread and complete it, then remove this comment. -->
|
| 13 |
|
| 14 |
+
# multilingual-e5-small-aligned-transformed-readability
|
| 15 |
|
| 16 |
This model is a fine-tuned version of [agentlans/multilingual-e5-small-aligned](https://huggingface.co/agentlans/multilingual-e5-small-aligned) on an unknown dataset.
|
| 17 |
It achieves the following results on the evaluation set:
|
| 18 |
+
- Loss: 0.1989
|
| 19 |
+
- Mse: 0.1989
|
| 20 |
|
| 21 |
## Model description
|
| 22 |
|
|
|
|
| 36 |
|
| 37 |
The following hyperparameters were used during training:
|
| 38 |
- learning_rate: 5e-05
|
| 39 |
+
- train_batch_size: 32
|
| 40 |
- eval_batch_size: 8
|
| 41 |
- seed: 42
|
| 42 |
- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
|
|
|
|
| 47 |
|
| 48 |
| Training Loss | Epoch | Step | Validation Loss | Mse |
|
| 49 |
|:-------------:|:-----:|:-----:|:---------------:|:------:|
|
| 50 |
+
| 0.2104 | 1.0 | 27096 | 0.2061 | 0.2061 |
|
| 51 |
+
| 0.1718 | 2.0 | 54192 | 0.2066 | 0.2066 |
|
| 52 |
+
| 0.141 | 3.0 | 81288 | 0.1989 | 0.1989 |
|
| 53 |
|
| 54 |
|
| 55 |
### Framework versions
|
all_results.json
CHANGED
|
@@ -1,15 +1,15 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 3.0,
|
| 3 |
-
"eval_loss":
|
| 4 |
-
"eval_mse":
|
| 5 |
-
"eval_runtime":
|
| 6 |
"eval_samples": 96338,
|
| 7 |
-
"eval_samples_per_second":
|
| 8 |
-
"eval_steps_per_second":
|
| 9 |
"total_flos": 4.283504864539085e+16,
|
| 10 |
-
"train_loss":
|
| 11 |
-
"train_runtime":
|
| 12 |
"train_samples": 867042,
|
| 13 |
-
"train_samples_per_second":
|
| 14 |
-
"train_steps_per_second":
|
| 15 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 3.0,
|
| 3 |
+
"eval_loss": 0.19889499247074127,
|
| 4 |
+
"eval_mse": 0.19889500241300032,
|
| 5 |
+
"eval_runtime": 57.9433,
|
| 6 |
"eval_samples": 96338,
|
| 7 |
+
"eval_samples_per_second": 1662.626,
|
| 8 |
+
"eval_steps_per_second": 207.841,
|
| 9 |
"total_flos": 4.283504864539085e+16,
|
| 10 |
+
"train_loss": 0.17857247165732115,
|
| 11 |
+
"train_runtime": 4471.1905,
|
| 12 |
"train_samples": 867042,
|
| 13 |
+
"train_samples_per_second": 581.752,
|
| 14 |
+
"train_steps_per_second": 18.18
|
| 15 |
}
|
eval_results.json
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 3.0,
|
| 3 |
-
"eval_loss":
|
| 4 |
-
"eval_mse":
|
| 5 |
-
"eval_runtime":
|
| 6 |
"eval_samples": 96338,
|
| 7 |
-
"eval_samples_per_second":
|
| 8 |
-
"eval_steps_per_second":
|
| 9 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 3.0,
|
| 3 |
+
"eval_loss": 0.19889499247074127,
|
| 4 |
+
"eval_mse": 0.19889500241300032,
|
| 5 |
+
"eval_runtime": 57.9433,
|
| 6 |
"eval_samples": 96338,
|
| 7 |
+
"eval_samples_per_second": 1662.626,
|
| 8 |
+
"eval_steps_per_second": 207.841
|
| 9 |
}
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 470640124
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ab2e42c2ac7fbb26e46c050038c5d4e57d29fb8cc3a58380747ebb7b05714bd2
|
| 3 |
size 470640124
|
train_results.json
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
{
|
| 2 |
"epoch": 3.0,
|
| 3 |
"total_flos": 4.283504864539085e+16,
|
| 4 |
-
"train_loss":
|
| 5 |
-
"train_runtime":
|
| 6 |
"train_samples": 867042,
|
| 7 |
-
"train_samples_per_second":
|
| 8 |
-
"train_steps_per_second":
|
| 9 |
}
|
|
|
|
| 1 |
{
|
| 2 |
"epoch": 3.0,
|
| 3 |
"total_flos": 4.283504864539085e+16,
|
| 4 |
+
"train_loss": 0.17857247165732115,
|
| 5 |
+
"train_runtime": 4471.1905,
|
| 6 |
"train_samples": 867042,
|
| 7 |
+
"train_samples_per_second": 581.752,
|
| 8 |
+
"train_steps_per_second": 18.18
|
| 9 |
}
|
trainer_state.json
CHANGED
|
@@ -1,619 +1,1186 @@
|
|
| 1 |
{
|
| 2 |
-
"best_metric":
|
| 3 |
-
"best_model_checkpoint": "multilingual-e5-small-aligned-readability/checkpoint-
|
| 4 |
"epoch": 3.0,
|
| 5 |
"eval_steps": 500,
|
| 6 |
-
"global_step":
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
| 10 |
"log_history": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
{
|
| 12 |
"epoch": 0.03690581635665781,
|
| 13 |
-
"grad_norm":
|
| 14 |
"learning_rate": 4.938490306072237e-05,
|
| 15 |
-
"loss":
|
| 16 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
},
|
| 18 |
{
|
| 19 |
"epoch": 0.07381163271331562,
|
| 20 |
-
"grad_norm":
|
| 21 |
"learning_rate": 4.876980612144474e-05,
|
| 22 |
-
"loss":
|
| 23 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 24 |
},
|
| 25 |
{
|
| 26 |
"epoch": 0.11071744906997343,
|
| 27 |
-
"grad_norm":
|
| 28 |
"learning_rate": 4.815470918216711e-05,
|
| 29 |
-
"loss":
|
| 30 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
},
|
| 32 |
{
|
| 33 |
"epoch": 0.14762326542663123,
|
| 34 |
-
"grad_norm":
|
| 35 |
"learning_rate": 4.7539612242889484e-05,
|
| 36 |
-
"loss":
|
| 37 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
},
|
| 39 |
{
|
| 40 |
"epoch": 0.18452908178328906,
|
| 41 |
-
"grad_norm":
|
| 42 |
"learning_rate": 4.692451530361185e-05,
|
| 43 |
-
"loss":
|
| 44 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
},
|
| 46 |
{
|
| 47 |
"epoch": 0.22143489813994685,
|
| 48 |
-
"grad_norm":
|
| 49 |
"learning_rate": 4.6309418364334224e-05,
|
| 50 |
-
"loss":
|
| 51 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
},
|
| 53 |
{
|
| 54 |
"epoch": 0.2583407144966047,
|
| 55 |
-
"grad_norm":
|
| 56 |
"learning_rate": 4.5694321425056594e-05,
|
| 57 |
-
"loss":
|
| 58 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
},
|
| 60 |
{
|
| 61 |
"epoch": 0.29524653085326247,
|
| 62 |
-
"grad_norm":
|
| 63 |
"learning_rate": 4.507922448577896e-05,
|
| 64 |
-
"loss":
|
| 65 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
},
|
| 67 |
{
|
| 68 |
"epoch": 0.33215234720992026,
|
| 69 |
-
"grad_norm":
|
| 70 |
"learning_rate": 4.4464127546501335e-05,
|
| 71 |
-
"loss":
|
| 72 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
},
|
| 74 |
{
|
| 75 |
"epoch": 0.3690581635665781,
|
| 76 |
-
"grad_norm":
|
| 77 |
"learning_rate": 4.38490306072237e-05,
|
| 78 |
-
"loss":
|
| 79 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
},
|
| 81 |
{
|
| 82 |
"epoch": 0.4059639799232359,
|
| 83 |
-
"grad_norm":
|
| 84 |
"learning_rate": 4.323393366794607e-05,
|
| 85 |
-
"loss":
|
| 86 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
},
|
| 88 |
{
|
| 89 |
"epoch": 0.4428697962798937,
|
| 90 |
-
"grad_norm":
|
| 91 |
"learning_rate": 4.261883672866844e-05,
|
| 92 |
-
"loss":
|
| 93 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 94 |
},
|
| 95 |
{
|
| 96 |
"epoch": 0.4797756126365515,
|
| 97 |
-
"grad_norm":
|
| 98 |
"learning_rate": 4.200373978939081e-05,
|
| 99 |
-
"loss":
|
| 100 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
},
|
| 102 |
{
|
| 103 |
"epoch": 0.5166814289932093,
|
| 104 |
-
"grad_norm":
|
| 105 |
"learning_rate": 4.138864285011318e-05,
|
| 106 |
-
"loss":
|
| 107 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
},
|
| 109 |
{
|
| 110 |
"epoch": 0.5535872453498671,
|
| 111 |
-
"grad_norm":
|
| 112 |
"learning_rate": 4.077354591083555e-05,
|
| 113 |
-
"loss":
|
| 114 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
},
|
| 116 |
{
|
| 117 |
"epoch": 0.5904930617065249,
|
| 118 |
-
"grad_norm":
|
| 119 |
"learning_rate": 4.015844897155792e-05,
|
| 120 |
-
"loss":
|
| 121 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
},
|
| 123 |
{
|
| 124 |
"epoch": 0.6273988780631827,
|
| 125 |
-
"grad_norm":
|
| 126 |
"learning_rate": 3.954335203228029e-05,
|
| 127 |
-
"loss":
|
| 128 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
},
|
| 130 |
{
|
| 131 |
"epoch": 0.6643046944198405,
|
| 132 |
-
"grad_norm":
|
| 133 |
"learning_rate": 3.892825509300266e-05,
|
| 134 |
-
"loss":
|
| 135 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 136 |
},
|
| 137 |
{
|
| 138 |
"epoch": 0.7012105107764984,
|
| 139 |
-
"grad_norm":
|
| 140 |
"learning_rate": 3.8313158153725024e-05,
|
| 141 |
-
"loss":
|
| 142 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 143 |
},
|
| 144 |
{
|
| 145 |
"epoch": 0.7381163271331562,
|
| 146 |
-
"grad_norm":
|
| 147 |
"learning_rate": 3.76980612144474e-05,
|
| 148 |
-
"loss":
|
| 149 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 150 |
},
|
| 151 |
{
|
| 152 |
"epoch": 0.775022143489814,
|
| 153 |
-
"grad_norm":
|
| 154 |
"learning_rate": 3.708296427516977e-05,
|
| 155 |
-
"loss":
|
| 156 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 157 |
},
|
| 158 |
{
|
| 159 |
"epoch": 0.8119279598464718,
|
| 160 |
-
"grad_norm":
|
| 161 |
"learning_rate": 3.6467867335892135e-05,
|
| 162 |
-
"loss":
|
| 163 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 164 |
},
|
| 165 |
{
|
| 166 |
"epoch": 0.8488337762031296,
|
| 167 |
-
"grad_norm":
|
| 168 |
"learning_rate": 3.585277039661451e-05,
|
| 169 |
-
"loss":
|
| 170 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 171 |
},
|
| 172 |
{
|
| 173 |
"epoch": 0.8857395925597874,
|
| 174 |
-
"grad_norm":
|
| 175 |
"learning_rate": 3.5237673457336876e-05,
|
| 176 |
-
"loss":
|
| 177 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 178 |
},
|
| 179 |
{
|
| 180 |
"epoch": 0.9226454089164452,
|
| 181 |
-
"grad_norm":
|
| 182 |
"learning_rate": 3.4622576518059246e-05,
|
| 183 |
-
"loss":
|
| 184 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
},
|
| 186 |
{
|
| 187 |
"epoch": 0.959551225273103,
|
| 188 |
-
"grad_norm":
|
| 189 |
"learning_rate": 3.400747957878162e-05,
|
| 190 |
-
"loss":
|
| 191 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
},
|
| 193 |
{
|
| 194 |
"epoch": 0.9964570416297609,
|
| 195 |
-
"grad_norm":
|
| 196 |
"learning_rate": 3.3392382639503986e-05,
|
| 197 |
-
"loss":
|
| 198 |
-
"step":
|
| 199 |
},
|
| 200 |
{
|
| 201 |
"epoch": 1.0,
|
| 202 |
-
"eval_loss":
|
| 203 |
-
"eval_mse":
|
| 204 |
-
"eval_runtime":
|
| 205 |
-
"eval_samples_per_second":
|
| 206 |
-
"eval_steps_per_second":
|
| 207 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 208 |
},
|
| 209 |
{
|
| 210 |
"epoch": 1.0333628579864187,
|
| 211 |
-
"grad_norm":
|
| 212 |
"learning_rate": 3.277728570022636e-05,
|
| 213 |
-
"loss":
|
| 214 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 215 |
},
|
| 216 |
{
|
| 217 |
"epoch": 1.0702686743430765,
|
| 218 |
-
"grad_norm":
|
| 219 |
"learning_rate": 3.216218876094873e-05,
|
| 220 |
-
"loss":
|
| 221 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 222 |
},
|
| 223 |
{
|
| 224 |
"epoch": 1.1071744906997343,
|
| 225 |
-
"grad_norm":
|
| 226 |
"learning_rate": 3.15470918216711e-05,
|
| 227 |
-
"loss":
|
| 228 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 229 |
},
|
| 230 |
{
|
| 231 |
"epoch": 1.144080307056392,
|
| 232 |
-
"grad_norm":
|
| 233 |
"learning_rate": 3.093199488239347e-05,
|
| 234 |
-
"loss":
|
| 235 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 236 |
},
|
| 237 |
{
|
| 238 |
"epoch": 1.1809861234130499,
|
| 239 |
-
"grad_norm":
|
| 240 |
"learning_rate": 3.0316897943115834e-05,
|
| 241 |
-
"loss":
|
| 242 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
},
|
| 244 |
{
|
| 245 |
"epoch": 1.2178919397697077,
|
| 246 |
-
"grad_norm":
|
| 247 |
"learning_rate": 2.9701801003838208e-05,
|
| 248 |
-
"loss":
|
| 249 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
},
|
| 251 |
{
|
| 252 |
"epoch": 1.2547977561263655,
|
| 253 |
-
"grad_norm":
|
| 254 |
"learning_rate": 2.9086704064560578e-05,
|
| 255 |
-
"loss":
|
| 256 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 257 |
},
|
| 258 |
{
|
| 259 |
"epoch": 1.2917035724830233,
|
| 260 |
-
"grad_norm":
|
| 261 |
"learning_rate": 2.8471607125282945e-05,
|
| 262 |
-
"loss":
|
| 263 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 264 |
},
|
| 265 |
{
|
| 266 |
"epoch": 1.328609388839681,
|
| 267 |
-
"grad_norm":
|
| 268 |
"learning_rate": 2.7856510186005312e-05,
|
| 269 |
-
"loss":
|
| 270 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 271 |
},
|
| 272 |
{
|
| 273 |
"epoch": 1.3655152051963388,
|
| 274 |
-
"grad_norm":
|
| 275 |
"learning_rate": 2.7241413246727686e-05,
|
| 276 |
-
"loss":
|
| 277 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 278 |
},
|
| 279 |
{
|
| 280 |
"epoch": 1.4024210215529966,
|
| 281 |
-
"grad_norm":
|
| 282 |
"learning_rate": 2.6626316307450056e-05,
|
| 283 |
-
"loss":
|
| 284 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 285 |
},
|
| 286 |
{
|
| 287 |
"epoch": 1.4393268379096544,
|
| 288 |
-
"grad_norm":
|
| 289 |
"learning_rate": 2.6011219368172423e-05,
|
| 290 |
-
"loss":
|
| 291 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 292 |
},
|
| 293 |
{
|
| 294 |
"epoch": 1.4762326542663124,
|
| 295 |
-
"grad_norm":
|
| 296 |
"learning_rate": 2.5396122428894797e-05,
|
| 297 |
-
"loss":
|
| 298 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 299 |
},
|
| 300 |
{
|
| 301 |
"epoch": 1.51313847062297,
|
| 302 |
-
"grad_norm":
|
| 303 |
"learning_rate": 2.4781025489617167e-05,
|
| 304 |
-
"loss":
|
| 305 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 306 |
},
|
| 307 |
{
|
| 308 |
"epoch": 1.550044286979628,
|
| 309 |
-
"grad_norm":
|
| 310 |
"learning_rate": 2.4165928550339534e-05,
|
| 311 |
-
"loss":
|
| 312 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 313 |
},
|
| 314 |
{
|
| 315 |
"epoch": 1.5869501033362858,
|
| 316 |
-
"grad_norm":
|
| 317 |
"learning_rate": 2.3550831611061904e-05,
|
| 318 |
-
"loss":
|
| 319 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 320 |
},
|
| 321 |
{
|
| 322 |
"epoch": 1.6238559196929436,
|
| 323 |
-
"grad_norm":
|
| 324 |
"learning_rate": 2.2935734671784274e-05,
|
| 325 |
-
"loss":
|
| 326 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 327 |
},
|
| 328 |
{
|
| 329 |
"epoch": 1.6607617360496014,
|
| 330 |
-
"grad_norm":
|
| 331 |
"learning_rate": 2.2320637732506645e-05,
|
| 332 |
-
"loss":
|
| 333 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 334 |
},
|
| 335 |
{
|
| 336 |
"epoch": 1.6976675524062592,
|
| 337 |
-
"grad_norm":
|
| 338 |
"learning_rate": 2.1705540793229015e-05,
|
| 339 |
-
"loss":
|
| 340 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 341 |
},
|
| 342 |
{
|
| 343 |
"epoch": 1.734573368762917,
|
| 344 |
-
"grad_norm":
|
| 345 |
"learning_rate": 2.1090443853951382e-05,
|
| 346 |
-
"loss":
|
| 347 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 348 |
},
|
| 349 |
{
|
| 350 |
"epoch": 1.7714791851195748,
|
| 351 |
-
"grad_norm":
|
| 352 |
"learning_rate": 2.0475346914673755e-05,
|
| 353 |
-
"loss":
|
| 354 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 355 |
},
|
| 356 |
{
|
| 357 |
"epoch": 1.8083850014762326,
|
| 358 |
-
"grad_norm":
|
| 359 |
"learning_rate": 1.9860249975396122e-05,
|
| 360 |
-
"loss":
|
| 361 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 362 |
},
|
| 363 |
{
|
| 364 |
"epoch": 1.8452908178328906,
|
| 365 |
-
"grad_norm":
|
| 366 |
"learning_rate": 1.9245153036118493e-05,
|
| 367 |
-
"loss":
|
| 368 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 369 |
},
|
| 370 |
{
|
| 371 |
"epoch": 1.8821966341895484,
|
| 372 |
-
"grad_norm":
|
| 373 |
"learning_rate": 1.8630056096840863e-05,
|
| 374 |
-
"loss":
|
| 375 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 376 |
},
|
| 377 |
{
|
| 378 |
"epoch": 1.9191024505462062,
|
| 379 |
-
"grad_norm":
|
| 380 |
"learning_rate": 1.8014959157563233e-05,
|
| 381 |
-
"loss":
|
| 382 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 383 |
},
|
| 384 |
{
|
| 385 |
"epoch": 1.956008266902864,
|
| 386 |
-
"grad_norm":
|
| 387 |
"learning_rate": 1.7399862218285603e-05,
|
| 388 |
-
"loss":
|
| 389 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 390 |
},
|
| 391 |
{
|
| 392 |
"epoch": 1.9929140832595218,
|
| 393 |
-
"grad_norm":
|
| 394 |
"learning_rate": 1.678476527900797e-05,
|
| 395 |
-
"loss":
|
| 396 |
-
"step":
|
| 397 |
},
|
| 398 |
{
|
| 399 |
"epoch": 2.0,
|
| 400 |
-
"eval_loss":
|
| 401 |
-
"eval_mse":
|
| 402 |
-
"eval_runtime":
|
| 403 |
-
"eval_samples_per_second":
|
| 404 |
-
"eval_steps_per_second":
|
| 405 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 406 |
},
|
| 407 |
{
|
| 408 |
"epoch": 2.0298198996161796,
|
| 409 |
-
"grad_norm":
|
| 410 |
"learning_rate": 1.6169668339730344e-05,
|
| 411 |
-
"loss":
|
| 412 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 413 |
},
|
| 414 |
{
|
| 415 |
"epoch": 2.0667257159728374,
|
| 416 |
-
"grad_norm":
|
| 417 |
"learning_rate": 1.555457140045271e-05,
|
| 418 |
-
"loss":
|
| 419 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 420 |
},
|
| 421 |
{
|
| 422 |
"epoch": 2.103631532329495,
|
| 423 |
-
"grad_norm":
|
| 424 |
"learning_rate": 1.4939474461175081e-05,
|
| 425 |
-
"loss":
|
| 426 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 427 |
},
|
| 428 |
{
|
| 429 |
"epoch": 2.140537348686153,
|
| 430 |
-
"grad_norm":
|
| 431 |
"learning_rate": 1.4324377521897453e-05,
|
| 432 |
-
"loss":
|
| 433 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 434 |
},
|
| 435 |
{
|
| 436 |
"epoch": 2.1774431650428108,
|
| 437 |
-
"grad_norm":
|
| 438 |
"learning_rate": 1.3709280582619822e-05,
|
| 439 |
-
"loss":
|
| 440 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 441 |
},
|
| 442 |
{
|
| 443 |
"epoch": 2.2143489813994686,
|
| 444 |
-
"grad_norm":
|
| 445 |
"learning_rate": 1.3094183643342192e-05,
|
| 446 |
-
"loss":
|
| 447 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 448 |
},
|
| 449 |
{
|
| 450 |
"epoch": 2.2512547977561264,
|
| 451 |
-
"grad_norm":
|
| 452 |
"learning_rate": 1.2479086704064562e-05,
|
| 453 |
-
"loss":
|
| 454 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 455 |
},
|
| 456 |
{
|
| 457 |
"epoch": 2.288160614112784,
|
| 458 |
-
"grad_norm":
|
| 459 |
"learning_rate": 1.186398976478693e-05,
|
| 460 |
-
"loss":
|
| 461 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 462 |
},
|
| 463 |
{
|
| 464 |
"epoch": 2.325066430469442,
|
| 465 |
-
"grad_norm":
|
| 466 |
"learning_rate": 1.1248892825509301e-05,
|
| 467 |
-
"loss":
|
| 468 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 469 |
},
|
| 470 |
{
|
| 471 |
"epoch": 2.3619722468260997,
|
| 472 |
-
"grad_norm":
|
| 473 |
"learning_rate": 1.0633795886231671e-05,
|
| 474 |
-
"loss":
|
| 475 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 476 |
},
|
| 477 |
{
|
| 478 |
"epoch": 2.3988780631827575,
|
| 479 |
-
"grad_norm":
|
| 480 |
"learning_rate": 1.001869894695404e-05,
|
| 481 |
-
"loss":
|
| 482 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 483 |
},
|
| 484 |
{
|
| 485 |
"epoch": 2.4357838795394153,
|
| 486 |
-
"grad_norm":
|
| 487 |
"learning_rate": 9.40360200767641e-06,
|
| 488 |
-
"loss":
|
| 489 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 490 |
},
|
| 491 |
{
|
| 492 |
"epoch": 2.472689695896073,
|
| 493 |
-
"grad_norm":
|
| 494 |
"learning_rate": 8.78850506839878e-06,
|
| 495 |
-
"loss":
|
| 496 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 497 |
},
|
| 498 |
{
|
| 499 |
"epoch": 2.509595512252731,
|
| 500 |
-
"grad_norm":
|
| 501 |
"learning_rate": 8.17340812912115e-06,
|
| 502 |
-
"loss":
|
| 503 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 504 |
},
|
| 505 |
{
|
| 506 |
"epoch": 2.5465013286093887,
|
| 507 |
-
"grad_norm":
|
| 508 |
"learning_rate": 7.55831118984352e-06,
|
| 509 |
-
"loss":
|
| 510 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 511 |
},
|
| 512 |
{
|
| 513 |
"epoch": 2.5834071449660465,
|
| 514 |
-
"grad_norm":
|
| 515 |
"learning_rate": 6.94321425056589e-06,
|
| 516 |
-
"loss":
|
| 517 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 518 |
},
|
| 519 |
{
|
| 520 |
"epoch": 2.6203129613227043,
|
| 521 |
-
"grad_norm":
|
| 522 |
"learning_rate": 6.328117311288259e-06,
|
| 523 |
-
"loss":
|
| 524 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 525 |
},
|
| 526 |
{
|
| 527 |
"epoch": 2.657218777679362,
|
| 528 |
-
"grad_norm":
|
| 529 |
"learning_rate": 5.713020372010629e-06,
|
| 530 |
-
"loss":
|
| 531 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 532 |
},
|
| 533 |
{
|
| 534 |
"epoch": 2.69412459403602,
|
| 535 |
-
"grad_norm":
|
| 536 |
"learning_rate": 5.097923432732999e-06,
|
| 537 |
-
"loss":
|
| 538 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 539 |
},
|
| 540 |
{
|
| 541 |
"epoch": 2.7310304103926777,
|
| 542 |
-
"grad_norm":
|
| 543 |
"learning_rate": 4.482826493455368e-06,
|
| 544 |
-
"loss":
|
| 545 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 546 |
},
|
| 547 |
{
|
| 548 |
"epoch": 2.7679362267493355,
|
| 549 |
-
"grad_norm":
|
| 550 |
"learning_rate": 3.8677295541777385e-06,
|
| 551 |
-
"loss":
|
| 552 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 553 |
},
|
| 554 |
{
|
| 555 |
"epoch": 2.8048420431059933,
|
| 556 |
-
"grad_norm":
|
| 557 |
"learning_rate": 3.2526326149001084e-06,
|
| 558 |
-
"loss":
|
| 559 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 560 |
},
|
| 561 |
{
|
| 562 |
"epoch": 2.841747859462651,
|
| 563 |
-
"grad_norm":
|
| 564 |
"learning_rate": 2.6375356756224782e-06,
|
| 565 |
-
"loss":
|
| 566 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 567 |
},
|
| 568 |
{
|
| 569 |
"epoch": 2.878653675819309,
|
| 570 |
-
"grad_norm":
|
| 571 |
"learning_rate": 2.022438736344848e-06,
|
| 572 |
-
"loss":
|
| 573 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 574 |
},
|
| 575 |
{
|
| 576 |
"epoch": 2.9155594921759667,
|
| 577 |
-
"grad_norm":
|
| 578 |
"learning_rate": 1.4073417970672177e-06,
|
| 579 |
-
"loss":
|
| 580 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 581 |
},
|
| 582 |
{
|
| 583 |
"epoch": 2.952465308532625,
|
| 584 |
-
"grad_norm":
|
| 585 |
"learning_rate": 7.922448577895876e-07,
|
| 586 |
-
"loss":
|
| 587 |
-
"step":
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 588 |
},
|
| 589 |
{
|
| 590 |
"epoch": 2.9893711248892827,
|
| 591 |
-
"grad_norm":
|
| 592 |
"learning_rate": 1.771479185119575e-07,
|
| 593 |
-
"loss":
|
| 594 |
-
"step":
|
| 595 |
},
|
| 596 |
{
|
| 597 |
"epoch": 3.0,
|
| 598 |
-
"eval_loss":
|
| 599 |
-
"eval_mse":
|
| 600 |
-
"eval_runtime":
|
| 601 |
-
"eval_samples_per_second":
|
| 602 |
-
"eval_steps_per_second":
|
| 603 |
-
"step":
|
| 604 |
},
|
| 605 |
{
|
| 606 |
"epoch": 3.0,
|
| 607 |
-
"step":
|
| 608 |
"total_flos": 4.283504864539085e+16,
|
| 609 |
-
"train_loss":
|
| 610 |
-
"train_runtime":
|
| 611 |
-
"train_samples_per_second":
|
| 612 |
-
"train_steps_per_second":
|
| 613 |
}
|
| 614 |
],
|
| 615 |
"logging_steps": 500,
|
| 616 |
-
"max_steps":
|
| 617 |
"num_input_tokens_seen": 0,
|
| 618 |
"num_train_epochs": 3,
|
| 619 |
"save_steps": 500,
|
|
@@ -630,7 +1197,7 @@
|
|
| 630 |
}
|
| 631 |
},
|
| 632 |
"total_flos": 4.283504864539085e+16,
|
| 633 |
-
"train_batch_size":
|
| 634 |
"trial_name": null,
|
| 635 |
"trial_params": null
|
| 636 |
}
|
|
|
|
| 1 |
{
|
| 2 |
+
"best_metric": 0.19889499247074127,
|
| 3 |
+
"best_model_checkpoint": "multilingual-e5-small-aligned-transformed-readability/checkpoint-81288",
|
| 4 |
"epoch": 3.0,
|
| 5 |
"eval_steps": 500,
|
| 6 |
+
"global_step": 81288,
|
| 7 |
"is_hyper_param_search": false,
|
| 8 |
"is_local_process_zero": true,
|
| 9 |
"is_world_process_zero": true,
|
| 10 |
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.018452908178328904,
|
| 13 |
+
"grad_norm": 2.7571725845336914,
|
| 14 |
+
"learning_rate": 4.969245153036119e-05,
|
| 15 |
+
"loss": 0.3765,
|
| 16 |
+
"step": 500
|
| 17 |
+
},
|
| 18 |
{
|
| 19 |
"epoch": 0.03690581635665781,
|
| 20 |
+
"grad_norm": 2.832648515701294,
|
| 21 |
"learning_rate": 4.938490306072237e-05,
|
| 22 |
+
"loss": 0.2708,
|
| 23 |
+
"step": 1000
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"epoch": 0.05535872453498671,
|
| 27 |
+
"grad_norm": 1.4104365110397339,
|
| 28 |
+
"learning_rate": 4.907735459108356e-05,
|
| 29 |
+
"loss": 0.2557,
|
| 30 |
+
"step": 1500
|
| 31 |
},
|
| 32 |
{
|
| 33 |
"epoch": 0.07381163271331562,
|
| 34 |
+
"grad_norm": 1.8531866073608398,
|
| 35 |
"learning_rate": 4.876980612144474e-05,
|
| 36 |
+
"loss": 0.2635,
|
| 37 |
+
"step": 2000
|
| 38 |
+
},
|
| 39 |
+
{
|
| 40 |
+
"epoch": 0.09226454089164453,
|
| 41 |
+
"grad_norm": 1.649173378944397,
|
| 42 |
+
"learning_rate": 4.846225765180593e-05,
|
| 43 |
+
"loss": 0.2558,
|
| 44 |
+
"step": 2500
|
| 45 |
},
|
| 46 |
{
|
| 47 |
"epoch": 0.11071744906997343,
|
| 48 |
+
"grad_norm": 1.7052029371261597,
|
| 49 |
"learning_rate": 4.815470918216711e-05,
|
| 50 |
+
"loss": 0.2514,
|
| 51 |
+
"step": 3000
|
| 52 |
+
},
|
| 53 |
+
{
|
| 54 |
+
"epoch": 0.12917035724830234,
|
| 55 |
+
"grad_norm": 3.926635980606079,
|
| 56 |
+
"learning_rate": 4.78471607125283e-05,
|
| 57 |
+
"loss": 0.252,
|
| 58 |
+
"step": 3500
|
| 59 |
},
|
| 60 |
{
|
| 61 |
"epoch": 0.14762326542663123,
|
| 62 |
+
"grad_norm": 3.181887626647949,
|
| 63 |
"learning_rate": 4.7539612242889484e-05,
|
| 64 |
+
"loss": 0.2541,
|
| 65 |
+
"step": 4000
|
| 66 |
+
},
|
| 67 |
+
{
|
| 68 |
+
"epoch": 0.16607617360496013,
|
| 69 |
+
"grad_norm": 4.0558180809021,
|
| 70 |
+
"learning_rate": 4.723206377325067e-05,
|
| 71 |
+
"loss": 0.2421,
|
| 72 |
+
"step": 4500
|
| 73 |
},
|
| 74 |
{
|
| 75 |
"epoch": 0.18452908178328906,
|
| 76 |
+
"grad_norm": 1.432974934577942,
|
| 77 |
"learning_rate": 4.692451530361185e-05,
|
| 78 |
+
"loss": 0.2362,
|
| 79 |
+
"step": 5000
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"epoch": 0.20298198996161795,
|
| 83 |
+
"grad_norm": 3.173771858215332,
|
| 84 |
+
"learning_rate": 4.661696683397304e-05,
|
| 85 |
+
"loss": 0.2443,
|
| 86 |
+
"step": 5500
|
| 87 |
},
|
| 88 |
{
|
| 89 |
"epoch": 0.22143489813994685,
|
| 90 |
+
"grad_norm": 2.175633668899536,
|
| 91 |
"learning_rate": 4.6309418364334224e-05,
|
| 92 |
+
"loss": 0.2329,
|
| 93 |
+
"step": 6000
|
| 94 |
+
},
|
| 95 |
+
{
|
| 96 |
+
"epoch": 0.23988780631827575,
|
| 97 |
+
"grad_norm": 4.211012840270996,
|
| 98 |
+
"learning_rate": 4.60018698946954e-05,
|
| 99 |
+
"loss": 0.2303,
|
| 100 |
+
"step": 6500
|
| 101 |
},
|
| 102 |
{
|
| 103 |
"epoch": 0.2583407144966047,
|
| 104 |
+
"grad_norm": 1.5053297281265259,
|
| 105 |
"learning_rate": 4.5694321425056594e-05,
|
| 106 |
+
"loss": 0.2272,
|
| 107 |
+
"step": 7000
|
| 108 |
+
},
|
| 109 |
+
{
|
| 110 |
+
"epoch": 0.27679362267493357,
|
| 111 |
+
"grad_norm": 2.2658045291900635,
|
| 112 |
+
"learning_rate": 4.538677295541778e-05,
|
| 113 |
+
"loss": 0.2309,
|
| 114 |
+
"step": 7500
|
| 115 |
},
|
| 116 |
{
|
| 117 |
"epoch": 0.29524653085326247,
|
| 118 |
+
"grad_norm": 3.0872204303741455,
|
| 119 |
"learning_rate": 4.507922448577896e-05,
|
| 120 |
+
"loss": 0.228,
|
| 121 |
+
"step": 8000
|
| 122 |
+
},
|
| 123 |
+
{
|
| 124 |
+
"epoch": 0.31369943903159137,
|
| 125 |
+
"grad_norm": 1.5754343271255493,
|
| 126 |
+
"learning_rate": 4.477167601614014e-05,
|
| 127 |
+
"loss": 0.2344,
|
| 128 |
+
"step": 8500
|
| 129 |
},
|
| 130 |
{
|
| 131 |
"epoch": 0.33215234720992026,
|
| 132 |
+
"grad_norm": 8.282055854797363,
|
| 133 |
"learning_rate": 4.4464127546501335e-05,
|
| 134 |
+
"loss": 0.2235,
|
| 135 |
+
"step": 9000
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"epoch": 0.3506052553882492,
|
| 139 |
+
"grad_norm": 2.818925619125366,
|
| 140 |
+
"learning_rate": 4.415657907686251e-05,
|
| 141 |
+
"loss": 0.225,
|
| 142 |
+
"step": 9500
|
| 143 |
},
|
| 144 |
{
|
| 145 |
"epoch": 0.3690581635665781,
|
| 146 |
+
"grad_norm": 4.582856178283691,
|
| 147 |
"learning_rate": 4.38490306072237e-05,
|
| 148 |
+
"loss": 0.2195,
|
| 149 |
+
"step": 10000
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"epoch": 0.387511071744907,
|
| 153 |
+
"grad_norm": 4.176349639892578,
|
| 154 |
+
"learning_rate": 4.354148213758489e-05,
|
| 155 |
+
"loss": 0.2249,
|
| 156 |
+
"step": 10500
|
| 157 |
},
|
| 158 |
{
|
| 159 |
"epoch": 0.4059639799232359,
|
| 160 |
+
"grad_norm": 1.69513738155365,
|
| 161 |
"learning_rate": 4.323393366794607e-05,
|
| 162 |
+
"loss": 0.2227,
|
| 163 |
+
"step": 11000
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"epoch": 0.4244168881015648,
|
| 167 |
+
"grad_norm": 2.0948939323425293,
|
| 168 |
+
"learning_rate": 4.2926385198307254e-05,
|
| 169 |
+
"loss": 0.2248,
|
| 170 |
+
"step": 11500
|
| 171 |
},
|
| 172 |
{
|
| 173 |
"epoch": 0.4428697962798937,
|
| 174 |
+
"grad_norm": 2.4989616870880127,
|
| 175 |
"learning_rate": 4.261883672866844e-05,
|
| 176 |
+
"loss": 0.2194,
|
| 177 |
+
"step": 12000
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"epoch": 0.4613227044582226,
|
| 181 |
+
"grad_norm": 1.1772059202194214,
|
| 182 |
+
"learning_rate": 4.2311288259029624e-05,
|
| 183 |
+
"loss": 0.2232,
|
| 184 |
+
"step": 12500
|
| 185 |
},
|
| 186 |
{
|
| 187 |
"epoch": 0.4797756126365515,
|
| 188 |
+
"grad_norm": 5.26480770111084,
|
| 189 |
"learning_rate": 4.200373978939081e-05,
|
| 190 |
+
"loss": 0.2199,
|
| 191 |
+
"step": 13000
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"epoch": 0.49822852081488045,
|
| 195 |
+
"grad_norm": 1.3563578128814697,
|
| 196 |
+
"learning_rate": 4.1696191319751994e-05,
|
| 197 |
+
"loss": 0.2264,
|
| 198 |
+
"step": 13500
|
| 199 |
},
|
| 200 |
{
|
| 201 |
"epoch": 0.5166814289932093,
|
| 202 |
+
"grad_norm": 1.2438708543777466,
|
| 203 |
"learning_rate": 4.138864285011318e-05,
|
| 204 |
+
"loss": 0.2239,
|
| 205 |
+
"step": 14000
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"epoch": 0.5351343371715382,
|
| 209 |
+
"grad_norm": 2.229975700378418,
|
| 210 |
+
"learning_rate": 4.1081094380474365e-05,
|
| 211 |
+
"loss": 0.211,
|
| 212 |
+
"step": 14500
|
| 213 |
},
|
| 214 |
{
|
| 215 |
"epoch": 0.5535872453498671,
|
| 216 |
+
"grad_norm": 1.4763661623001099,
|
| 217 |
"learning_rate": 4.077354591083555e-05,
|
| 218 |
+
"loss": 0.2176,
|
| 219 |
+
"step": 15000
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"epoch": 0.572040153528196,
|
| 223 |
+
"grad_norm": 2.88029408454895,
|
| 224 |
+
"learning_rate": 4.0465997441196735e-05,
|
| 225 |
+
"loss": 0.2229,
|
| 226 |
+
"step": 15500
|
| 227 |
},
|
| 228 |
{
|
| 229 |
"epoch": 0.5904930617065249,
|
| 230 |
+
"grad_norm": 0.7661384344100952,
|
| 231 |
"learning_rate": 4.015844897155792e-05,
|
| 232 |
+
"loss": 0.2195,
|
| 233 |
+
"step": 16000
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"epoch": 0.6089459698848538,
|
| 237 |
+
"grad_norm": 2.0358428955078125,
|
| 238 |
+
"learning_rate": 3.9850900501919105e-05,
|
| 239 |
+
"loss": 0.2161,
|
| 240 |
+
"step": 16500
|
| 241 |
},
|
| 242 |
{
|
| 243 |
"epoch": 0.6273988780631827,
|
| 244 |
+
"grad_norm": 1.9549895524978638,
|
| 245 |
"learning_rate": 3.954335203228029e-05,
|
| 246 |
+
"loss": 0.2193,
|
| 247 |
+
"step": 17000
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"epoch": 0.6458517862415116,
|
| 251 |
+
"grad_norm": 2.1742184162139893,
|
| 252 |
+
"learning_rate": 3.9235803562641475e-05,
|
| 253 |
+
"loss": 0.2171,
|
| 254 |
+
"step": 17500
|
| 255 |
},
|
| 256 |
{
|
| 257 |
"epoch": 0.6643046944198405,
|
| 258 |
+
"grad_norm": 1.1012811660766602,
|
| 259 |
"learning_rate": 3.892825509300266e-05,
|
| 260 |
+
"loss": 0.2246,
|
| 261 |
+
"step": 18000
|
| 262 |
+
},
|
| 263 |
+
{
|
| 264 |
+
"epoch": 0.6827576025981694,
|
| 265 |
+
"grad_norm": 2.7291996479034424,
|
| 266 |
+
"learning_rate": 3.8620706623363846e-05,
|
| 267 |
+
"loss": 0.2114,
|
| 268 |
+
"step": 18500
|
| 269 |
},
|
| 270 |
{
|
| 271 |
"epoch": 0.7012105107764984,
|
| 272 |
+
"grad_norm": 1.3418771028518677,
|
| 273 |
"learning_rate": 3.8313158153725024e-05,
|
| 274 |
+
"loss": 0.2173,
|
| 275 |
+
"step": 19000
|
| 276 |
+
},
|
| 277 |
+
{
|
| 278 |
+
"epoch": 0.7196634189548273,
|
| 279 |
+
"grad_norm": 2.7479825019836426,
|
| 280 |
+
"learning_rate": 3.8005609684086216e-05,
|
| 281 |
+
"loss": 0.2163,
|
| 282 |
+
"step": 19500
|
| 283 |
},
|
| 284 |
{
|
| 285 |
"epoch": 0.7381163271331562,
|
| 286 |
+
"grad_norm": 1.7314202785491943,
|
| 287 |
"learning_rate": 3.76980612144474e-05,
|
| 288 |
+
"loss": 0.2142,
|
| 289 |
+
"step": 20000
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"epoch": 0.7565692353114851,
|
| 293 |
+
"grad_norm": 1.5135014057159424,
|
| 294 |
+
"learning_rate": 3.739051274480858e-05,
|
| 295 |
+
"loss": 0.2156,
|
| 296 |
+
"step": 20500
|
| 297 |
},
|
| 298 |
{
|
| 299 |
"epoch": 0.775022143489814,
|
| 300 |
+
"grad_norm": 0.9992055296897888,
|
| 301 |
"learning_rate": 3.708296427516977e-05,
|
| 302 |
+
"loss": 0.2136,
|
| 303 |
+
"step": 21000
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"epoch": 0.7934750516681429,
|
| 307 |
+
"grad_norm": 1.2363203763961792,
|
| 308 |
+
"learning_rate": 3.6775415805530957e-05,
|
| 309 |
+
"loss": 0.2134,
|
| 310 |
+
"step": 21500
|
| 311 |
},
|
| 312 |
{
|
| 313 |
"epoch": 0.8119279598464718,
|
| 314 |
+
"grad_norm": 1.8317536115646362,
|
| 315 |
"learning_rate": 3.6467867335892135e-05,
|
| 316 |
+
"loss": 0.217,
|
| 317 |
+
"step": 22000
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"epoch": 0.8303808680248007,
|
| 321 |
+
"grad_norm": 1.7996548414230347,
|
| 322 |
+
"learning_rate": 3.616031886625332e-05,
|
| 323 |
+
"loss": 0.213,
|
| 324 |
+
"step": 22500
|
| 325 |
},
|
| 326 |
{
|
| 327 |
"epoch": 0.8488337762031296,
|
| 328 |
+
"grad_norm": 1.1373772621154785,
|
| 329 |
"learning_rate": 3.585277039661451e-05,
|
| 330 |
+
"loss": 0.2249,
|
| 331 |
+
"step": 23000
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"epoch": 0.8672866843814585,
|
| 335 |
+
"grad_norm": 1.2996028661727905,
|
| 336 |
+
"learning_rate": 3.554522192697569e-05,
|
| 337 |
+
"loss": 0.207,
|
| 338 |
+
"step": 23500
|
| 339 |
},
|
| 340 |
{
|
| 341 |
"epoch": 0.8857395925597874,
|
| 342 |
+
"grad_norm": 1.505035638809204,
|
| 343 |
"learning_rate": 3.5237673457336876e-05,
|
| 344 |
+
"loss": 0.2119,
|
| 345 |
+
"step": 24000
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"epoch": 0.9041925007381163,
|
| 349 |
+
"grad_norm": 1.2497526407241821,
|
| 350 |
+
"learning_rate": 3.493012498769807e-05,
|
| 351 |
+
"loss": 0.2096,
|
| 352 |
+
"step": 24500
|
| 353 |
},
|
| 354 |
{
|
| 355 |
"epoch": 0.9226454089164452,
|
| 356 |
+
"grad_norm": 2.1352574825286865,
|
| 357 |
"learning_rate": 3.4622576518059246e-05,
|
| 358 |
+
"loss": 0.2143,
|
| 359 |
+
"step": 25000
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"epoch": 0.9410983170947741,
|
| 363 |
+
"grad_norm": 1.664171576499939,
|
| 364 |
+
"learning_rate": 3.431502804842043e-05,
|
| 365 |
+
"loss": 0.2036,
|
| 366 |
+
"step": 25500
|
| 367 |
},
|
| 368 |
{
|
| 369 |
"epoch": 0.959551225273103,
|
| 370 |
+
"grad_norm": 2.7897629737854004,
|
| 371 |
"learning_rate": 3.400747957878162e-05,
|
| 372 |
+
"loss": 0.2118,
|
| 373 |
+
"step": 26000
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"epoch": 0.978004133451432,
|
| 377 |
+
"grad_norm": 1.0113285779953003,
|
| 378 |
+
"learning_rate": 3.36999311091428e-05,
|
| 379 |
+
"loss": 0.2175,
|
| 380 |
+
"step": 26500
|
| 381 |
},
|
| 382 |
{
|
| 383 |
"epoch": 0.9964570416297609,
|
| 384 |
+
"grad_norm": 2.9997363090515137,
|
| 385 |
"learning_rate": 3.3392382639503986e-05,
|
| 386 |
+
"loss": 0.2104,
|
| 387 |
+
"step": 27000
|
| 388 |
},
|
| 389 |
{
|
| 390 |
"epoch": 1.0,
|
| 391 |
+
"eval_loss": 0.20612339675426483,
|
| 392 |
+
"eval_mse": 0.20612340591663994,
|
| 393 |
+
"eval_runtime": 57.193,
|
| 394 |
+
"eval_samples_per_second": 1684.438,
|
| 395 |
+
"eval_steps_per_second": 210.568,
|
| 396 |
+
"step": 27096
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"epoch": 1.0149099498080898,
|
| 400 |
+
"grad_norm": 1.4240479469299316,
|
| 401 |
+
"learning_rate": 3.308483416986517e-05,
|
| 402 |
+
"loss": 0.1821,
|
| 403 |
+
"step": 27500
|
| 404 |
},
|
| 405 |
{
|
| 406 |
"epoch": 1.0333628579864187,
|
| 407 |
+
"grad_norm": 1.0634160041809082,
|
| 408 |
"learning_rate": 3.277728570022636e-05,
|
| 409 |
+
"loss": 0.1745,
|
| 410 |
+
"step": 28000
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"epoch": 1.0518157661647476,
|
| 414 |
+
"grad_norm": 1.9994093179702759,
|
| 415 |
+
"learning_rate": 3.246973723058754e-05,
|
| 416 |
+
"loss": 0.1712,
|
| 417 |
+
"step": 28500
|
| 418 |
},
|
| 419 |
{
|
| 420 |
"epoch": 1.0702686743430765,
|
| 421 |
+
"grad_norm": 0.736122727394104,
|
| 422 |
"learning_rate": 3.216218876094873e-05,
|
| 423 |
+
"loss": 0.1738,
|
| 424 |
+
"step": 29000
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"epoch": 1.0887215825214054,
|
| 428 |
+
"grad_norm": 1.7938990592956543,
|
| 429 |
+
"learning_rate": 3.185464029130991e-05,
|
| 430 |
+
"loss": 0.1698,
|
| 431 |
+
"step": 29500
|
| 432 |
},
|
| 433 |
{
|
| 434 |
"epoch": 1.1071744906997343,
|
| 435 |
+
"grad_norm": 1.9040451049804688,
|
| 436 |
"learning_rate": 3.15470918216711e-05,
|
| 437 |
+
"loss": 0.1734,
|
| 438 |
+
"step": 30000
|
| 439 |
+
},
|
| 440 |
+
{
|
| 441 |
+
"epoch": 1.1256273988780632,
|
| 442 |
+
"grad_norm": 1.222025990486145,
|
| 443 |
+
"learning_rate": 3.123954335203228e-05,
|
| 444 |
+
"loss": 0.1715,
|
| 445 |
+
"step": 30500
|
| 446 |
},
|
| 447 |
{
|
| 448 |
"epoch": 1.144080307056392,
|
| 449 |
+
"grad_norm": 1.4371784925460815,
|
| 450 |
"learning_rate": 3.093199488239347e-05,
|
| 451 |
+
"loss": 0.1688,
|
| 452 |
+
"step": 31000
|
| 453 |
+
},
|
| 454 |
+
{
|
| 455 |
+
"epoch": 1.162533215234721,
|
| 456 |
+
"grad_norm": 5.807870864868164,
|
| 457 |
+
"learning_rate": 3.062444641275465e-05,
|
| 458 |
+
"loss": 0.179,
|
| 459 |
+
"step": 31500
|
| 460 |
},
|
| 461 |
{
|
| 462 |
"epoch": 1.1809861234130499,
|
| 463 |
+
"grad_norm": 1.3887362480163574,
|
| 464 |
"learning_rate": 3.0316897943115834e-05,
|
| 465 |
+
"loss": 0.179,
|
| 466 |
+
"step": 32000
|
| 467 |
+
},
|
| 468 |
+
{
|
| 469 |
+
"epoch": 1.1994390315913788,
|
| 470 |
+
"grad_norm": 2.2503085136413574,
|
| 471 |
+
"learning_rate": 3.0009349473477023e-05,
|
| 472 |
+
"loss": 0.1738,
|
| 473 |
+
"step": 32500
|
| 474 |
},
|
| 475 |
{
|
| 476 |
"epoch": 1.2178919397697077,
|
| 477 |
+
"grad_norm": 2.3477783203125,
|
| 478 |
"learning_rate": 2.9701801003838208e-05,
|
| 479 |
+
"loss": 0.1722,
|
| 480 |
+
"step": 33000
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"epoch": 1.2363448479480366,
|
| 484 |
+
"grad_norm": 2.7416176795959473,
|
| 485 |
+
"learning_rate": 2.939425253419939e-05,
|
| 486 |
+
"loss": 0.1786,
|
| 487 |
+
"step": 33500
|
| 488 |
},
|
| 489 |
{
|
| 490 |
"epoch": 1.2547977561263655,
|
| 491 |
+
"grad_norm": 0.7052303552627563,
|
| 492 |
"learning_rate": 2.9086704064560578e-05,
|
| 493 |
+
"loss": 0.1728,
|
| 494 |
+
"step": 34000
|
| 495 |
+
},
|
| 496 |
+
{
|
| 497 |
+
"epoch": 1.2732506643046944,
|
| 498 |
+
"grad_norm": 2.529670000076294,
|
| 499 |
+
"learning_rate": 2.877915559492176e-05,
|
| 500 |
+
"loss": 0.1741,
|
| 501 |
+
"step": 34500
|
| 502 |
},
|
| 503 |
{
|
| 504 |
"epoch": 1.2917035724830233,
|
| 505 |
+
"grad_norm": 1.9189903736114502,
|
| 506 |
"learning_rate": 2.8471607125282945e-05,
|
| 507 |
+
"loss": 0.1762,
|
| 508 |
+
"step": 35000
|
| 509 |
+
},
|
| 510 |
+
{
|
| 511 |
+
"epoch": 1.3101564806613522,
|
| 512 |
+
"grad_norm": 2.1008570194244385,
|
| 513 |
+
"learning_rate": 2.8164058655644134e-05,
|
| 514 |
+
"loss": 0.1719,
|
| 515 |
+
"step": 35500
|
| 516 |
},
|
| 517 |
{
|
| 518 |
"epoch": 1.328609388839681,
|
| 519 |
+
"grad_norm": 2.663116216659546,
|
| 520 |
"learning_rate": 2.7856510186005312e-05,
|
| 521 |
+
"loss": 0.1739,
|
| 522 |
+
"step": 36000
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"epoch": 1.34706229701801,
|
| 526 |
+
"grad_norm": 3.453697443008423,
|
| 527 |
+
"learning_rate": 2.75489617163665e-05,
|
| 528 |
+
"loss": 0.1691,
|
| 529 |
+
"step": 36500
|
| 530 |
},
|
| 531 |
{
|
| 532 |
"epoch": 1.3655152051963388,
|
| 533 |
+
"grad_norm": 5.848513603210449,
|
| 534 |
"learning_rate": 2.7241413246727686e-05,
|
| 535 |
+
"loss": 0.1674,
|
| 536 |
+
"step": 37000
|
| 537 |
+
},
|
| 538 |
+
{
|
| 539 |
+
"epoch": 1.3839681133746677,
|
| 540 |
+
"grad_norm": 1.1454991102218628,
|
| 541 |
+
"learning_rate": 2.6933864777088867e-05,
|
| 542 |
+
"loss": 0.1764,
|
| 543 |
+
"step": 37500
|
| 544 |
},
|
| 545 |
{
|
| 546 |
"epoch": 1.4024210215529966,
|
| 547 |
+
"grad_norm": 0.9938109517097473,
|
| 548 |
"learning_rate": 2.6626316307450056e-05,
|
| 549 |
+
"loss": 0.1706,
|
| 550 |
+
"step": 38000
|
| 551 |
+
},
|
| 552 |
+
{
|
| 553 |
+
"epoch": 1.4208739297313255,
|
| 554 |
+
"grad_norm": 2.252068042755127,
|
| 555 |
+
"learning_rate": 2.631876783781124e-05,
|
| 556 |
+
"loss": 0.1665,
|
| 557 |
+
"step": 38500
|
| 558 |
},
|
| 559 |
{
|
| 560 |
"epoch": 1.4393268379096544,
|
| 561 |
+
"grad_norm": 1.9789129495620728,
|
| 562 |
"learning_rate": 2.6011219368172423e-05,
|
| 563 |
+
"loss": 0.1746,
|
| 564 |
+
"step": 39000
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"epoch": 1.4577797460879833,
|
| 568 |
+
"grad_norm": 1.5638952255249023,
|
| 569 |
+
"learning_rate": 2.570367089853361e-05,
|
| 570 |
+
"loss": 0.1699,
|
| 571 |
+
"step": 39500
|
| 572 |
},
|
| 573 |
{
|
| 574 |
"epoch": 1.4762326542663124,
|
| 575 |
+
"grad_norm": 2.094984292984009,
|
| 576 |
"learning_rate": 2.5396122428894797e-05,
|
| 577 |
+
"loss": 0.1715,
|
| 578 |
+
"step": 40000
|
| 579 |
+
},
|
| 580 |
+
{
|
| 581 |
+
"epoch": 1.4946855624446413,
|
| 582 |
+
"grad_norm": 2.625145435333252,
|
| 583 |
+
"learning_rate": 2.508857395925598e-05,
|
| 584 |
+
"loss": 0.1708,
|
| 585 |
+
"step": 40500
|
| 586 |
},
|
| 587 |
{
|
| 588 |
"epoch": 1.51313847062297,
|
| 589 |
+
"grad_norm": 1.2873293161392212,
|
| 590 |
"learning_rate": 2.4781025489617167e-05,
|
| 591 |
+
"loss": 0.1721,
|
| 592 |
+
"step": 41000
|
| 593 |
+
},
|
| 594 |
+
{
|
| 595 |
+
"epoch": 1.531591378801299,
|
| 596 |
+
"grad_norm": 2.8465254306793213,
|
| 597 |
+
"learning_rate": 2.447347701997835e-05,
|
| 598 |
+
"loss": 0.1761,
|
| 599 |
+
"step": 41500
|
| 600 |
},
|
| 601 |
{
|
| 602 |
"epoch": 1.550044286979628,
|
| 603 |
+
"grad_norm": 1.4705593585968018,
|
| 604 |
"learning_rate": 2.4165928550339534e-05,
|
| 605 |
+
"loss": 0.1756,
|
| 606 |
+
"step": 42000
|
| 607 |
+
},
|
| 608 |
+
{
|
| 609 |
+
"epoch": 1.568497195157957,
|
| 610 |
+
"grad_norm": 0.9254862666130066,
|
| 611 |
+
"learning_rate": 2.3858380080700722e-05,
|
| 612 |
+
"loss": 0.1759,
|
| 613 |
+
"step": 42500
|
| 614 |
},
|
| 615 |
{
|
| 616 |
"epoch": 1.5869501033362858,
|
| 617 |
+
"grad_norm": 1.8685784339904785,
|
| 618 |
"learning_rate": 2.3550831611061904e-05,
|
| 619 |
+
"loss": 0.167,
|
| 620 |
+
"step": 43000
|
| 621 |
+
},
|
| 622 |
+
{
|
| 623 |
+
"epoch": 1.6054030115146147,
|
| 624 |
+
"grad_norm": 1.4468207359313965,
|
| 625 |
+
"learning_rate": 2.324328314142309e-05,
|
| 626 |
+
"loss": 0.1776,
|
| 627 |
+
"step": 43500
|
| 628 |
},
|
| 629 |
{
|
| 630 |
"epoch": 1.6238559196929436,
|
| 631 |
+
"grad_norm": 2.477450132369995,
|
| 632 |
"learning_rate": 2.2935734671784274e-05,
|
| 633 |
+
"loss": 0.174,
|
| 634 |
+
"step": 44000
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"epoch": 1.6423088278712725,
|
| 638 |
+
"grad_norm": 11.740235328674316,
|
| 639 |
+
"learning_rate": 2.262818620214546e-05,
|
| 640 |
+
"loss": 0.1652,
|
| 641 |
+
"step": 44500
|
| 642 |
},
|
| 643 |
{
|
| 644 |
"epoch": 1.6607617360496014,
|
| 645 |
+
"grad_norm": 2.253143548965454,
|
| 646 |
"learning_rate": 2.2320637732506645e-05,
|
| 647 |
+
"loss": 0.1776,
|
| 648 |
+
"step": 45000
|
| 649 |
+
},
|
| 650 |
+
{
|
| 651 |
+
"epoch": 1.6792146442279303,
|
| 652 |
+
"grad_norm": 4.1611151695251465,
|
| 653 |
+
"learning_rate": 2.201308926286783e-05,
|
| 654 |
+
"loss": 0.1643,
|
| 655 |
+
"step": 45500
|
| 656 |
},
|
| 657 |
{
|
| 658 |
"epoch": 1.6976675524062592,
|
| 659 |
+
"grad_norm": 3.693655252456665,
|
| 660 |
"learning_rate": 2.1705540793229015e-05,
|
| 661 |
+
"loss": 0.1705,
|
| 662 |
+
"step": 46000
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"epoch": 1.7161204605845881,
|
| 666 |
+
"grad_norm": 3.8450114727020264,
|
| 667 |
+
"learning_rate": 2.13979923235902e-05,
|
| 668 |
+
"loss": 0.1715,
|
| 669 |
+
"step": 46500
|
| 670 |
},
|
| 671 |
{
|
| 672 |
"epoch": 1.734573368762917,
|
| 673 |
+
"grad_norm": 3.296321392059326,
|
| 674 |
"learning_rate": 2.1090443853951382e-05,
|
| 675 |
+
"loss": 0.1642,
|
| 676 |
+
"step": 47000
|
| 677 |
+
},
|
| 678 |
+
{
|
| 679 |
+
"epoch": 1.753026276941246,
|
| 680 |
+
"grad_norm": 2.0819671154022217,
|
| 681 |
+
"learning_rate": 2.0782895384312567e-05,
|
| 682 |
+
"loss": 0.1624,
|
| 683 |
+
"step": 47500
|
| 684 |
},
|
| 685 |
{
|
| 686 |
"epoch": 1.7714791851195748,
|
| 687 |
+
"grad_norm": 0.8893182873725891,
|
| 688 |
"learning_rate": 2.0475346914673755e-05,
|
| 689 |
+
"loss": 0.1678,
|
| 690 |
+
"step": 48000
|
| 691 |
+
},
|
| 692 |
+
{
|
| 693 |
+
"epoch": 1.7899320932979037,
|
| 694 |
+
"grad_norm": 2.971529960632324,
|
| 695 |
+
"learning_rate": 2.0167798445034937e-05,
|
| 696 |
+
"loss": 0.1664,
|
| 697 |
+
"step": 48500
|
| 698 |
},
|
| 699 |
{
|
| 700 |
"epoch": 1.8083850014762326,
|
| 701 |
+
"grad_norm": 2.0590310096740723,
|
| 702 |
"learning_rate": 1.9860249975396122e-05,
|
| 703 |
+
"loss": 0.1779,
|
| 704 |
+
"step": 49000
|
| 705 |
+
},
|
| 706 |
+
{
|
| 707 |
+
"epoch": 1.8268379096545617,
|
| 708 |
+
"grad_norm": 2.0498523712158203,
|
| 709 |
+
"learning_rate": 1.955270150575731e-05,
|
| 710 |
+
"loss": 0.1695,
|
| 711 |
+
"step": 49500
|
| 712 |
},
|
| 713 |
{
|
| 714 |
"epoch": 1.8452908178328906,
|
| 715 |
+
"grad_norm": 1.6503143310546875,
|
| 716 |
"learning_rate": 1.9245153036118493e-05,
|
| 717 |
+
"loss": 0.1678,
|
| 718 |
+
"step": 50000
|
| 719 |
+
},
|
| 720 |
+
{
|
| 721 |
+
"epoch": 1.8637437260112195,
|
| 722 |
+
"grad_norm": 1.0318537950515747,
|
| 723 |
+
"learning_rate": 1.8937604566479678e-05,
|
| 724 |
+
"loss": 0.1644,
|
| 725 |
+
"step": 50500
|
| 726 |
},
|
| 727 |
{
|
| 728 |
"epoch": 1.8821966341895484,
|
| 729 |
+
"grad_norm": 1.936584711074829,
|
| 730 |
"learning_rate": 1.8630056096840863e-05,
|
| 731 |
+
"loss": 0.1697,
|
| 732 |
+
"step": 51000
|
| 733 |
+
},
|
| 734 |
+
{
|
| 735 |
+
"epoch": 1.9006495423678773,
|
| 736 |
+
"grad_norm": 2.5828168392181396,
|
| 737 |
+
"learning_rate": 1.8322507627202048e-05,
|
| 738 |
+
"loss": 0.1696,
|
| 739 |
+
"step": 51500
|
| 740 |
},
|
| 741 |
{
|
| 742 |
"epoch": 1.9191024505462062,
|
| 743 |
+
"grad_norm": 3.156874895095825,
|
| 744 |
"learning_rate": 1.8014959157563233e-05,
|
| 745 |
+
"loss": 0.1673,
|
| 746 |
+
"step": 52000
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"epoch": 1.937555358724535,
|
| 750 |
+
"grad_norm": 3.178074836730957,
|
| 751 |
+
"learning_rate": 1.7707410687924418e-05,
|
| 752 |
+
"loss": 0.176,
|
| 753 |
+
"step": 52500
|
| 754 |
},
|
| 755 |
{
|
| 756 |
"epoch": 1.956008266902864,
|
| 757 |
+
"grad_norm": 1.48374342918396,
|
| 758 |
"learning_rate": 1.7399862218285603e-05,
|
| 759 |
+
"loss": 0.1677,
|
| 760 |
+
"step": 53000
|
| 761 |
+
},
|
| 762 |
+
{
|
| 763 |
+
"epoch": 1.974461175081193,
|
| 764 |
+
"grad_norm": 3.43747878074646,
|
| 765 |
+
"learning_rate": 1.709231374864679e-05,
|
| 766 |
+
"loss": 0.1696,
|
| 767 |
+
"step": 53500
|
| 768 |
},
|
| 769 |
{
|
| 770 |
"epoch": 1.9929140832595218,
|
| 771 |
+
"grad_norm": 1.6862876415252686,
|
| 772 |
"learning_rate": 1.678476527900797e-05,
|
| 773 |
+
"loss": 0.1718,
|
| 774 |
+
"step": 54000
|
| 775 |
},
|
| 776 |
{
|
| 777 |
"epoch": 2.0,
|
| 778 |
+
"eval_loss": 0.20655478537082672,
|
| 779 |
+
"eval_mse": 0.2065547745621827,
|
| 780 |
+
"eval_runtime": 52.3234,
|
| 781 |
+
"eval_samples_per_second": 1841.202,
|
| 782 |
+
"eval_steps_per_second": 230.165,
|
| 783 |
+
"step": 54192
|
| 784 |
+
},
|
| 785 |
+
{
|
| 786 |
+
"epoch": 2.0113669914378507,
|
| 787 |
+
"grad_norm": 0.8314543962478638,
|
| 788 |
+
"learning_rate": 1.647721680936916e-05,
|
| 789 |
+
"loss": 0.1449,
|
| 790 |
+
"step": 54500
|
| 791 |
},
|
| 792 |
{
|
| 793 |
"epoch": 2.0298198996161796,
|
| 794 |
+
"grad_norm": 1.8953380584716797,
|
| 795 |
"learning_rate": 1.6169668339730344e-05,
|
| 796 |
+
"loss": 0.1357,
|
| 797 |
+
"step": 55000
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"epoch": 2.0482728077945085,
|
| 801 |
+
"grad_norm": 0.7893266081809998,
|
| 802 |
+
"learning_rate": 1.5862119870091526e-05,
|
| 803 |
+
"loss": 0.138,
|
| 804 |
+
"step": 55500
|
| 805 |
},
|
| 806 |
{
|
| 807 |
"epoch": 2.0667257159728374,
|
| 808 |
+
"grad_norm": 1.337292194366455,
|
| 809 |
"learning_rate": 1.555457140045271e-05,
|
| 810 |
+
"loss": 0.1407,
|
| 811 |
+
"step": 56000
|
| 812 |
+
},
|
| 813 |
+
{
|
| 814 |
+
"epoch": 2.0851786241511663,
|
| 815 |
+
"grad_norm": 1.6890192031860352,
|
| 816 |
+
"learning_rate": 1.5247022930813898e-05,
|
| 817 |
+
"loss": 0.1406,
|
| 818 |
+
"step": 56500
|
| 819 |
},
|
| 820 |
{
|
| 821 |
"epoch": 2.103631532329495,
|
| 822 |
+
"grad_norm": 2.1817214488983154,
|
| 823 |
"learning_rate": 1.4939474461175081e-05,
|
| 824 |
+
"loss": 0.1332,
|
| 825 |
+
"step": 57000
|
| 826 |
+
},
|
| 827 |
+
{
|
| 828 |
+
"epoch": 2.122084440507824,
|
| 829 |
+
"grad_norm": 1.477333664894104,
|
| 830 |
+
"learning_rate": 1.4631925991536266e-05,
|
| 831 |
+
"loss": 0.1415,
|
| 832 |
+
"step": 57500
|
| 833 |
},
|
| 834 |
{
|
| 835 |
"epoch": 2.140537348686153,
|
| 836 |
+
"grad_norm": 3.889193534851074,
|
| 837 |
"learning_rate": 1.4324377521897453e-05,
|
| 838 |
+
"loss": 0.1399,
|
| 839 |
+
"step": 58000
|
| 840 |
+
},
|
| 841 |
+
{
|
| 842 |
+
"epoch": 2.158990256864482,
|
| 843 |
+
"grad_norm": 11.35392951965332,
|
| 844 |
+
"learning_rate": 1.4016829052258637e-05,
|
| 845 |
+
"loss": 0.1345,
|
| 846 |
+
"step": 58500
|
| 847 |
},
|
| 848 |
{
|
| 849 |
"epoch": 2.1774431650428108,
|
| 850 |
+
"grad_norm": 2.2750699520111084,
|
| 851 |
"learning_rate": 1.3709280582619822e-05,
|
| 852 |
+
"loss": 0.1347,
|
| 853 |
+
"step": 59000
|
| 854 |
+
},
|
| 855 |
+
{
|
| 856 |
+
"epoch": 2.1958960732211397,
|
| 857 |
+
"grad_norm": 4.66851282119751,
|
| 858 |
+
"learning_rate": 1.3401732112981005e-05,
|
| 859 |
+
"loss": 0.1359,
|
| 860 |
+
"step": 59500
|
| 861 |
},
|
| 862 |
{
|
| 863 |
"epoch": 2.2143489813994686,
|
| 864 |
+
"grad_norm": 1.2594196796417236,
|
| 865 |
"learning_rate": 1.3094183643342192e-05,
|
| 866 |
+
"loss": 0.135,
|
| 867 |
+
"step": 60000
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"epoch": 2.2328018895777975,
|
| 871 |
+
"grad_norm": 0.6602271199226379,
|
| 872 |
+
"learning_rate": 1.2786635173703375e-05,
|
| 873 |
+
"loss": 0.1381,
|
| 874 |
+
"step": 60500
|
| 875 |
},
|
| 876 |
{
|
| 877 |
"epoch": 2.2512547977561264,
|
| 878 |
+
"grad_norm": 0.8580902814865112,
|
| 879 |
"learning_rate": 1.2479086704064562e-05,
|
| 880 |
+
"loss": 0.1308,
|
| 881 |
+
"step": 61000
|
| 882 |
+
},
|
| 883 |
+
{
|
| 884 |
+
"epoch": 2.2697077059344553,
|
| 885 |
+
"grad_norm": 0.8672662377357483,
|
| 886 |
+
"learning_rate": 1.2171538234425746e-05,
|
| 887 |
+
"loss": 0.1395,
|
| 888 |
+
"step": 61500
|
| 889 |
},
|
| 890 |
{
|
| 891 |
"epoch": 2.288160614112784,
|
| 892 |
+
"grad_norm": 1.646864891052246,
|
| 893 |
"learning_rate": 1.186398976478693e-05,
|
| 894 |
+
"loss": 0.1419,
|
| 895 |
+
"step": 62000
|
| 896 |
+
},
|
| 897 |
+
{
|
| 898 |
+
"epoch": 2.306613522291113,
|
| 899 |
+
"grad_norm": 4.04207181930542,
|
| 900 |
+
"learning_rate": 1.1556441295148116e-05,
|
| 901 |
+
"loss": 0.1337,
|
| 902 |
+
"step": 62500
|
| 903 |
},
|
| 904 |
{
|
| 905 |
"epoch": 2.325066430469442,
|
| 906 |
+
"grad_norm": 5.613555431365967,
|
| 907 |
"learning_rate": 1.1248892825509301e-05,
|
| 908 |
+
"loss": 0.1429,
|
| 909 |
+
"step": 63000
|
| 910 |
+
},
|
| 911 |
+
{
|
| 912 |
+
"epoch": 2.343519338647771,
|
| 913 |
+
"grad_norm": 1.977729082107544,
|
| 914 |
+
"learning_rate": 1.0941344355870485e-05,
|
| 915 |
+
"loss": 0.1323,
|
| 916 |
+
"step": 63500
|
| 917 |
},
|
| 918 |
{
|
| 919 |
"epoch": 2.3619722468260997,
|
| 920 |
+
"grad_norm": 1.2868248224258423,
|
| 921 |
"learning_rate": 1.0633795886231671e-05,
|
| 922 |
+
"loss": 0.1383,
|
| 923 |
+
"step": 64000
|
| 924 |
+
},
|
| 925 |
+
{
|
| 926 |
+
"epoch": 2.3804251550044286,
|
| 927 |
+
"grad_norm": 1.098742961883545,
|
| 928 |
+
"learning_rate": 1.0326247416592857e-05,
|
| 929 |
+
"loss": 0.1387,
|
| 930 |
+
"step": 64500
|
| 931 |
},
|
| 932 |
{
|
| 933 |
"epoch": 2.3988780631827575,
|
| 934 |
+
"grad_norm": 2.9264678955078125,
|
| 935 |
"learning_rate": 1.001869894695404e-05,
|
| 936 |
+
"loss": 0.1386,
|
| 937 |
+
"step": 65000
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"epoch": 2.4173309713610864,
|
| 941 |
+
"grad_norm": 3.179082155227661,
|
| 942 |
+
"learning_rate": 9.711150477315225e-06,
|
| 943 |
+
"loss": 0.1444,
|
| 944 |
+
"step": 65500
|
| 945 |
},
|
| 946 |
{
|
| 947 |
"epoch": 2.4357838795394153,
|
| 948 |
+
"grad_norm": 1.5083171129226685,
|
| 949 |
"learning_rate": 9.40360200767641e-06,
|
| 950 |
+
"loss": 0.1351,
|
| 951 |
+
"step": 66000
|
| 952 |
+
},
|
| 953 |
+
{
|
| 954 |
+
"epoch": 2.4542367877177442,
|
| 955 |
+
"grad_norm": 1.590307354927063,
|
| 956 |
+
"learning_rate": 9.096053538037595e-06,
|
| 957 |
+
"loss": 0.1379,
|
| 958 |
+
"step": 66500
|
| 959 |
},
|
| 960 |
{
|
| 961 |
"epoch": 2.472689695896073,
|
| 962 |
+
"grad_norm": 1.490502953529358,
|
| 963 |
"learning_rate": 8.78850506839878e-06,
|
| 964 |
+
"loss": 0.1285,
|
| 965 |
+
"step": 67000
|
| 966 |
+
},
|
| 967 |
+
{
|
| 968 |
+
"epoch": 2.491142604074402,
|
| 969 |
+
"grad_norm": 2.0561413764953613,
|
| 970 |
+
"learning_rate": 8.480956598759966e-06,
|
| 971 |
+
"loss": 0.1396,
|
| 972 |
+
"step": 67500
|
| 973 |
},
|
| 974 |
{
|
| 975 |
"epoch": 2.509595512252731,
|
| 976 |
+
"grad_norm": 1.0588093996047974,
|
| 977 |
"learning_rate": 8.17340812912115e-06,
|
| 978 |
+
"loss": 0.1367,
|
| 979 |
+
"step": 68000
|
| 980 |
+
},
|
| 981 |
+
{
|
| 982 |
+
"epoch": 2.52804842043106,
|
| 983 |
+
"grad_norm": 0.8184725046157837,
|
| 984 |
+
"learning_rate": 7.865859659482334e-06,
|
| 985 |
+
"loss": 0.1322,
|
| 986 |
+
"step": 68500
|
| 987 |
},
|
| 988 |
{
|
| 989 |
"epoch": 2.5465013286093887,
|
| 990 |
+
"grad_norm": 1.3976045846939087,
|
| 991 |
"learning_rate": 7.55831118984352e-06,
|
| 992 |
+
"loss": 0.1332,
|
| 993 |
+
"step": 69000
|
| 994 |
+
},
|
| 995 |
+
{
|
| 996 |
+
"epoch": 2.5649542367877176,
|
| 997 |
+
"grad_norm": 2.417647361755371,
|
| 998 |
+
"learning_rate": 7.250762720204704e-06,
|
| 999 |
+
"loss": 0.1342,
|
| 1000 |
+
"step": 69500
|
| 1001 |
},
|
| 1002 |
{
|
| 1003 |
"epoch": 2.5834071449660465,
|
| 1004 |
+
"grad_norm": 4.064483165740967,
|
| 1005 |
"learning_rate": 6.94321425056589e-06,
|
| 1006 |
+
"loss": 0.1355,
|
| 1007 |
+
"step": 70000
|
| 1008 |
+
},
|
| 1009 |
+
{
|
| 1010 |
+
"epoch": 2.6018600531443754,
|
| 1011 |
+
"grad_norm": 2.23105788230896,
|
| 1012 |
+
"learning_rate": 6.635665780927075e-06,
|
| 1013 |
+
"loss": 0.1315,
|
| 1014 |
+
"step": 70500
|
| 1015 |
},
|
| 1016 |
{
|
| 1017 |
"epoch": 2.6203129613227043,
|
| 1018 |
+
"grad_norm": 2.205604076385498,
|
| 1019 |
"learning_rate": 6.328117311288259e-06,
|
| 1020 |
+
"loss": 0.1379,
|
| 1021 |
+
"step": 71000
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"epoch": 2.638765869501033,
|
| 1025 |
+
"grad_norm": 2.5101168155670166,
|
| 1026 |
+
"learning_rate": 6.020568841649444e-06,
|
| 1027 |
+
"loss": 0.142,
|
| 1028 |
+
"step": 71500
|
| 1029 |
},
|
| 1030 |
{
|
| 1031 |
"epoch": 2.657218777679362,
|
| 1032 |
+
"grad_norm": 11.855621337890625,
|
| 1033 |
"learning_rate": 5.713020372010629e-06,
|
| 1034 |
+
"loss": 0.1359,
|
| 1035 |
+
"step": 72000
|
| 1036 |
+
},
|
| 1037 |
+
{
|
| 1038 |
+
"epoch": 2.675671685857691,
|
| 1039 |
+
"grad_norm": 1.7274291515350342,
|
| 1040 |
+
"learning_rate": 5.4054719023718145e-06,
|
| 1041 |
+
"loss": 0.1386,
|
| 1042 |
+
"step": 72500
|
| 1043 |
},
|
| 1044 |
{
|
| 1045 |
"epoch": 2.69412459403602,
|
| 1046 |
+
"grad_norm": 1.0947271585464478,
|
| 1047 |
"learning_rate": 5.097923432732999e-06,
|
| 1048 |
+
"loss": 0.1393,
|
| 1049 |
+
"step": 73000
|
| 1050 |
+
},
|
| 1051 |
+
{
|
| 1052 |
+
"epoch": 2.712577502214349,
|
| 1053 |
+
"grad_norm": 1.6208831071853638,
|
| 1054 |
+
"learning_rate": 4.790374963094184e-06,
|
| 1055 |
+
"loss": 0.1276,
|
| 1056 |
+
"step": 73500
|
| 1057 |
},
|
| 1058 |
{
|
| 1059 |
"epoch": 2.7310304103926777,
|
| 1060 |
+
"grad_norm": 1.5204744338989258,
|
| 1061 |
"learning_rate": 4.482826493455368e-06,
|
| 1062 |
+
"loss": 0.1297,
|
| 1063 |
+
"step": 74000
|
| 1064 |
+
},
|
| 1065 |
+
{
|
| 1066 |
+
"epoch": 2.7494833185710066,
|
| 1067 |
+
"grad_norm": 4.482317924499512,
|
| 1068 |
+
"learning_rate": 4.175278023816553e-06,
|
| 1069 |
+
"loss": 0.1303,
|
| 1070 |
+
"step": 74500
|
| 1071 |
},
|
| 1072 |
{
|
| 1073 |
"epoch": 2.7679362267493355,
|
| 1074 |
+
"grad_norm": 9.054340362548828,
|
| 1075 |
"learning_rate": 3.8677295541777385e-06,
|
| 1076 |
+
"loss": 0.1319,
|
| 1077 |
+
"step": 75000
|
| 1078 |
+
},
|
| 1079 |
+
{
|
| 1080 |
+
"epoch": 2.7863891349276644,
|
| 1081 |
+
"grad_norm": 1.8670865297317505,
|
| 1082 |
+
"learning_rate": 3.5601810845389237e-06,
|
| 1083 |
+
"loss": 0.1301,
|
| 1084 |
+
"step": 75500
|
| 1085 |
},
|
| 1086 |
{
|
| 1087 |
"epoch": 2.8048420431059933,
|
| 1088 |
+
"grad_norm": 1.451202154159546,
|
| 1089 |
"learning_rate": 3.2526326149001084e-06,
|
| 1090 |
+
"loss": 0.1309,
|
| 1091 |
+
"step": 76000
|
| 1092 |
+
},
|
| 1093 |
+
{
|
| 1094 |
+
"epoch": 2.823294951284322,
|
| 1095 |
+
"grad_norm": 3.281291961669922,
|
| 1096 |
+
"learning_rate": 2.945084145261293e-06,
|
| 1097 |
+
"loss": 0.1407,
|
| 1098 |
+
"step": 76500
|
| 1099 |
},
|
| 1100 |
{
|
| 1101 |
"epoch": 2.841747859462651,
|
| 1102 |
+
"grad_norm": 3.273066997528076,
|
| 1103 |
"learning_rate": 2.6375356756224782e-06,
|
| 1104 |
+
"loss": 0.1267,
|
| 1105 |
+
"step": 77000
|
| 1106 |
+
},
|
| 1107 |
+
{
|
| 1108 |
+
"epoch": 2.86020076764098,
|
| 1109 |
+
"grad_norm": 8.522459030151367,
|
| 1110 |
+
"learning_rate": 2.3299872059836634e-06,
|
| 1111 |
+
"loss": 0.1304,
|
| 1112 |
+
"step": 77500
|
| 1113 |
},
|
| 1114 |
{
|
| 1115 |
"epoch": 2.878653675819309,
|
| 1116 |
+
"grad_norm": 1.6981911659240723,
|
| 1117 |
"learning_rate": 2.022438736344848e-06,
|
| 1118 |
+
"loss": 0.1436,
|
| 1119 |
+
"step": 78000
|
| 1120 |
+
},
|
| 1121 |
+
{
|
| 1122 |
+
"epoch": 2.8971065839976378,
|
| 1123 |
+
"grad_norm": 2.415241003036499,
|
| 1124 |
+
"learning_rate": 1.7148902667060328e-06,
|
| 1125 |
+
"loss": 0.1297,
|
| 1126 |
+
"step": 78500
|
| 1127 |
},
|
| 1128 |
{
|
| 1129 |
"epoch": 2.9155594921759667,
|
| 1130 |
+
"grad_norm": 1.65168035030365,
|
| 1131 |
"learning_rate": 1.4073417970672177e-06,
|
| 1132 |
+
"loss": 0.138,
|
| 1133 |
+
"step": 79000
|
| 1134 |
+
},
|
| 1135 |
+
{
|
| 1136 |
+
"epoch": 2.934012400354296,
|
| 1137 |
+
"grad_norm": 1.9556164741516113,
|
| 1138 |
+
"learning_rate": 1.0997933274284029e-06,
|
| 1139 |
+
"loss": 0.1346,
|
| 1140 |
+
"step": 79500
|
| 1141 |
},
|
| 1142 |
{
|
| 1143 |
"epoch": 2.952465308532625,
|
| 1144 |
+
"grad_norm": 2.9853076934814453,
|
| 1145 |
"learning_rate": 7.922448577895876e-07,
|
| 1146 |
+
"loss": 0.1371,
|
| 1147 |
+
"step": 80000
|
| 1148 |
+
},
|
| 1149 |
+
{
|
| 1150 |
+
"epoch": 2.970918216710954,
|
| 1151 |
+
"grad_norm": 2.885925054550171,
|
| 1152 |
+
"learning_rate": 4.846963881507725e-07,
|
| 1153 |
+
"loss": 0.1342,
|
| 1154 |
+
"step": 80500
|
| 1155 |
},
|
| 1156 |
{
|
| 1157 |
"epoch": 2.9893711248892827,
|
| 1158 |
+
"grad_norm": 2.020306348800659,
|
| 1159 |
"learning_rate": 1.771479185119575e-07,
|
| 1160 |
+
"loss": 0.141,
|
| 1161 |
+
"step": 81000
|
| 1162 |
},
|
| 1163 |
{
|
| 1164 |
"epoch": 3.0,
|
| 1165 |
+
"eval_loss": 0.19889499247074127,
|
| 1166 |
+
"eval_mse": 0.19889500241300032,
|
| 1167 |
+
"eval_runtime": 55.3999,
|
| 1168 |
+
"eval_samples_per_second": 1738.955,
|
| 1169 |
+
"eval_steps_per_second": 217.383,
|
| 1170 |
+
"step": 81288
|
| 1171 |
},
|
| 1172 |
{
|
| 1173 |
"epoch": 3.0,
|
| 1174 |
+
"step": 81288,
|
| 1175 |
"total_flos": 4.283504864539085e+16,
|
| 1176 |
+
"train_loss": 0.17857247165732115,
|
| 1177 |
+
"train_runtime": 4471.1905,
|
| 1178 |
+
"train_samples_per_second": 581.752,
|
| 1179 |
+
"train_steps_per_second": 18.18
|
| 1180 |
}
|
| 1181 |
],
|
| 1182 |
"logging_steps": 500,
|
| 1183 |
+
"max_steps": 81288,
|
| 1184 |
"num_input_tokens_seen": 0,
|
| 1185 |
"num_train_epochs": 3,
|
| 1186 |
"save_steps": 500,
|
|
|
|
| 1197 |
}
|
| 1198 |
},
|
| 1199 |
"total_flos": 4.283504864539085e+16,
|
| 1200 |
+
"train_batch_size": 32,
|
| 1201 |
"trial_name": null,
|
| 1202 |
"trial_params": null
|
| 1203 |
}
|
training_args.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 5368
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f7846a1e21a9a7ecb44d6f53ed2f2bbd4dbcdeca0216b7cbfc52b373edb803d5
|
| 3 |
size 5368
|