Baselhany commited on
Commit
baa2953
·
verified ·
1 Parent(s): 07eba8a

Training in progress, step 2400, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cb04c63682b4786c42d9e8bcd69c2b68a0037931f97a599af7cbf1129ff15e31
3
  size 223144592
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3b114ec9aae5f56c896dc809331fe15ed8ccb9c22a63d8091cf763b49c85e3d
3
  size 223144592
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:108be5a941549c57ad5b41aeea576ba2ac85a3d6695d48c5bb5811e64b3badba
3
  size 281574266
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edcd52e16b677aaaa2c66be498a66202ce1cc335af39cbbad43ce8a2ad734b75
3
  size 281574266
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52d892b0d38f04ef62cc89aabaa03d55447e3a9c0b3878a8dd9be38b5a4c5433
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:87f3e0f4d936e870e79e3be44a85542644bf76c70c4bbcaa7cf95399760c586a
3
  size 14244
last-checkpoint/scaler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:887c9ba23b35d52f7d9ec04d9717eb1e05e155aaf6ef15078b7968f60c8f51be
3
  size 988
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df198c0270152417dac5be430450a9752660b128bc2ee48e34c53ddc72b40e6e
3
  size 988
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0930051c8643f83c104fb653cb7c51b35919ee5fd0154c43b4ca0dc5de1c8c3d
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d75303d12b74496ad035c93f7f537a1c9eccd4f5cc765415d2bf1655e9477134
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
- "best_global_step": 2000,
3
- "best_metric": 0.2625994694960212,
4
- "best_model_checkpoint": "./distil-whisper/checkpoint-2000",
5
- "epoch": 1.424728309282024,
6
  "eval_steps": 400,
7
- "global_step": 2000,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -193,6 +193,43 @@
193
  "eval_steps_per_second": 0.398,
194
  "eval_wer": 0.2625994694960212,
195
  "step": 2000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
  }
197
  ],
198
  "logging_steps": 100,
@@ -212,7 +249,7 @@
212
  "attributes": {}
213
  }
214
  },
215
- "total_flos": 2.60046401568768e+18,
216
  "train_batch_size": 8,
217
  "trial_name": null,
218
  "trial_params": null
 
1
  {
2
+ "best_global_step": 2400,
3
+ "best_metric": 0.2371058060713233,
4
+ "best_model_checkpoint": "./distil-whisper/checkpoint-2400",
5
+ "epoch": 1.7097808658471405,
6
  "eval_steps": 400,
7
+ "global_step": 2400,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
193
  "eval_steps_per_second": 0.398,
194
  "eval_wer": 0.2625994694960212,
195
  "step": 2000
196
+ },
197
+ {
198
+ "epoch": 1.495991448423303,
199
+ "grad_norm": 60.051639556884766,
200
+ "learning_rate": 5.7266109463467245e-05,
201
+ "loss": 6.4429,
202
+ "step": 2100
203
+ },
204
+ {
205
+ "epoch": 1.5672545875645822,
206
+ "grad_norm": 107.0939712524414,
207
+ "learning_rate": 5.456996495012133e-05,
208
+ "loss": 6.7403,
209
+ "step": 2200
210
+ },
211
+ {
212
+ "epoch": 1.6385177267058615,
213
+ "grad_norm": 52.28662109375,
214
+ "learning_rate": 5.187382043677541e-05,
215
+ "loss": 6.4136,
216
+ "step": 2300
217
+ },
218
+ {
219
+ "epoch": 1.7097808658471405,
220
+ "grad_norm": 115.394287109375,
221
+ "learning_rate": 4.91776759234295e-05,
222
+ "loss": 5.6497,
223
+ "step": 2400
224
+ },
225
+ {
226
+ "epoch": 1.7097808658471405,
227
+ "eval_loss": 0.11455892771482468,
228
+ "eval_runtime": 156.1161,
229
+ "eval_samples_per_second": 3.203,
230
+ "eval_steps_per_second": 0.404,
231
+ "eval_wer": 0.2371058060713233,
232
+ "step": 2400
233
  }
234
  ],
235
  "logging_steps": 100,
 
249
  "attributes": {}
250
  }
251
  },
252
+ "total_flos": 3.12079259271168e+18,
253
  "train_batch_size": 8,
254
  "trial_name": null,
255
  "trial_params": null